From da9d912d2804fdd2d37648acf590d2edc9583fc7 Mon Sep 17 00:00:00 2001 From: Diana Arroyo Date: Sat, 26 Jun 2021 22:33:31 -0500 Subject: [PATCH 1/2] Partial 3 cleanup of vendor folder 1st set (mostly .go files). Signed-off-by: Diana Arroyo --- vendor/github.com/docker/docker/api/common.go | 11 - .../docker/docker/api/common_unix.go | 6 - .../docker/docker/api/common_windows.go | 8 - .../api/server/backend/build/backend.go | 90 - .../docker/api/server/backend/build/tag.go | 77 - .../docker/api/server/httputils/decoder.go | 16 - .../docker/api/server/httputils/errors.go | 131 - .../docker/api/server/httputils/form.go | 76 - .../docker/api/server/httputils/httputils.go | 100 - .../server/httputils/httputils_write_json.go | 15 - .../api/server/httputils/write_log_stream.go | 84 - .../docker/docker/api/server/middleware.go | 24 - .../docker/api/server/middleware/cors.go | 37 - .../docker/api/server/middleware/debug.go | 94 - .../api/server/middleware/experimental.go | 28 - .../api/server/middleware/middleware.go | 12 - .../docker/api/server/middleware/version.go | 65 - .../docker/api/server/router/build/backend.go | 22 - .../docker/api/server/router/build/build.go | 29 - .../api/server/router/build/build_routes.go | 269 -- .../api/server/router/checkpoint/backend.go | 10 - .../server/router/checkpoint/checkpoint.go | 36 - .../router/checkpoint/checkpoint_routes.go | 65 - .../api/server/router/container/backend.go | 83 - .../api/server/router/container/container.go | 70 - .../router/container/container_routes.go | 661 ---- .../api/server/router/container/copy.go | 140 - .../api/server/router/container/exec.go | 149 - .../api/server/router/container/inspect.go | 21 - .../docker/api/server/router/debug/debug.go | 53 - .../api/server/router/debug/debug_routes.go | 12 - .../api/server/router/distribution/backend.go | 15 - .../router/distribution/distribution.go | 31 - .../distribution/distribution_routes.go | 138 - .../docker/api/server/router/experimental.go | 68 - .../docker/api/server/router/image/backend.go | 40 - .../docker/api/server/router/image/image.go | 44 - .../api/server/router/image/image_routes.go | 314 -- .../docker/docker/api/server/router/local.go | 104 - .../api/server/router/network/backend.go | 32 - .../api/server/router/network/filter.go | 93 - .../api/server/router/network/network.go | 43 - .../server/router/network/network_routes.go | 597 ---- .../api/server/router/plugin/backend.go | 27 - .../docker/api/server/router/plugin/plugin.go | 39 - .../api/server/router/plugin/plugin_routes.go | 310 -- .../docker/docker/api/server/router/router.go | 19 - .../api/server/router/session/backend.go | 11 - .../api/server/router/session/session.go | 29 - .../server/router/session/session_routes.go | 16 - .../docker/api/server/router/swarm/backend.go | 48 - .../docker/api/server/router/swarm/cluster.go | 63 - .../api/server/router/swarm/cluster_routes.go | 494 --- .../docker/api/server/router/swarm/helpers.go | 66 - .../api/server/router/system/backend.go | 28 - .../docker/api/server/router/system/system.go | 41 - .../api/server/router/system/system_routes.go | 199 -- .../api/server/router/volume/backend.go | 20 - .../docker/api/server/router/volume/volume.go | 36 - .../api/server/router/volume/volume_routes.go | 96 - .../docker/api/server/router_swapper.go | 30 - .../docker/docker/api/server/server.go | 209 -- .../api/templates/server/operation.gotmpl | 26 - .../docker/docker/api/types/auth.go | 22 - .../docker/api/types/backend/backend.go | 128 - .../docker/docker/api/types/backend/build.go | 44 - .../docker/docker/api/types/blkiodev/blkio.go | 23 - .../docker/docker/api/types/client.go | 390 --- .../docker/docker/api/types/configs.go | 57 - .../docker/api/types/container/config.go | 69 - .../api/types/container/container_changes.go | 21 - .../api/types/container/container_create.go | 21 - .../api/types/container/container_top.go | 21 - .../api/types/container/container_update.go | 17 - .../api/types/container/container_wait.go | 29 - .../docker/api/types/container/host_config.go | 412 --- .../api/types/container/hostconfig_unix.go | 41 - .../api/types/container/hostconfig_windows.go | 40 - .../api/types/container/waitcondition.go | 22 - .../docker/docker/api/types/error_response.go | 13 - .../docker/docker/api/types/events/events.go | 52 - .../docker/docker/api/types/filters/parse.go | 350 -- .../docker/api/types/graph_driver_data.go | 17 - .../docker/docker/api/types/id_response.go | 13 - .../docker/api/types/image/image_history.go | 37 - .../api/types/image_delete_response_item.go | 15 - .../docker/docker/api/types/image_summary.go | 49 - .../docker/docker/api/types/mount/mount.go | 130 - .../docker/api/types/network/network.go | 108 - .../docker/docker/api/types/plugin.go | 203 -- .../docker/docker/api/types/plugin_device.go | 25 - .../docker/docker/api/types/plugin_env.go | 25 - .../docker/api/types/plugin_interface_type.go | 21 - .../docker/docker/api/types/plugin_mount.go | 37 - .../docker/api/types/plugin_responses.go | 71 - .../api/types/plugins/logdriver/entry.pb.go | 449 --- .../docker/api/types/plugins/logdriver/gen.go | 3 - .../docker/api/types/plugins/logdriver/io.go | 87 - .../docker/docker/api/types/port.go | 23 - .../docker/api/types/registry/authenticate.go | 21 - .../docker/api/types/registry/registry.go | 119 - .../docker/docker/api/types/seccomp.go | 93 - .../api/types/service_update_response.go | 12 - .../docker/docker/api/types/stats.go | 181 -- .../docker/api/types/strslice/strslice.go | 30 - .../docker/docker/api/types/swarm/common.go | 40 - .../docker/docker/api/types/swarm/config.go | 35 - .../docker/api/types/swarm/container.go | 74 - .../docker/docker/api/types/swarm/network.go | 121 - .../docker/docker/api/types/swarm/node.go | 115 - .../docker/docker/api/types/swarm/runtime.go | 27 - .../docker/api/types/swarm/runtime/gen.go | 3 - .../api/types/swarm/runtime/plugin.pb.go | 712 ----- .../docker/docker/api/types/swarm/secret.go | 36 - .../docker/docker/api/types/swarm/service.go | 124 - .../docker/docker/api/types/swarm/swarm.go | 217 -- .../docker/docker/api/types/swarm/task.go | 191 -- .../docker/api/types/time/duration_convert.go | 12 - .../docker/docker/api/types/time/timestamp.go | 129 - .../docker/docker/api/types/types.go | 587 ---- .../docker/api/types/versions/compare.go | 62 - .../docker/api/types/versions/v1p19/types.go | 35 - .../docker/api/types/versions/v1p20/types.go | 40 - .../docker/docker/api/types/volume.go | 69 - .../docker/api/types/volume/volume_create.go | 29 - .../docker/api/types/volume/volume_list.go | 23 - .../docker/docker/builder/builder.go | 115 - .../docker/builder/dockerfile/buildargs.go | 172 - .../docker/builder/dockerfile/builder.go | 421 --- .../docker/builder/dockerfile/builder_unix.go | 7 - .../builder/dockerfile/builder_windows.go | 8 - .../builder/dockerfile/clientsession.go | 76 - .../builder/dockerfile/containerbackend.go | 146 - .../docker/docker/builder/dockerfile/copy.go | 560 ---- .../docker/builder/dockerfile/copy_unix.go | 48 - .../docker/builder/dockerfile/copy_windows.go | 43 - .../docker/builder/dockerfile/dispatchers.go | 571 ---- .../builder/dockerfile/dispatchers_unix.go | 23 - .../builder/dockerfile/dispatchers_windows.go | 95 - .../docker/builder/dockerfile/evaluator.go | 250 -- .../docker/builder/dockerfile/imagecontext.go | 121 - .../docker/builder/dockerfile/imageprobe.go | 63 - .../docker/builder/dockerfile/internals.go | 481 --- .../builder/dockerfile/internals_linux.go | 88 - .../builder/dockerfile/internals_windows.go | 7 - .../docker/builder/dockerfile/metrics.go | 44 - .../builder/dockerignore/dockerignore.go | 64 - .../docker/docker/builder/fscache/fscache.go | 652 ---- .../docker/builder/fscache/naivedriver.go | 28 - .../docker/builder/remotecontext/archive.go | 125 - .../docker/builder/remotecontext/detect.go | 180 -- .../docker/builder/remotecontext/filehash.go | 45 - .../docker/builder/remotecontext/generate.go | 3 - .../docker/builder/remotecontext/git.go | 35 - .../builder/remotecontext/git/gitutils.go | 204 -- .../builder/remotecontext/lazycontext.go | 102 - .../docker/builder/remotecontext/mimetype.go | 27 - .../docker/builder/remotecontext/remote.go | 127 - .../docker/builder/remotecontext/tarsum.go | 157 - .../docker/builder/remotecontext/tarsum.pb.go | 525 --- vendor/github.com/docker/docker/cli/cobra.go | 131 - .../docker/docker/cli/config/configdir.go | 25 - .../docker/docker/cli/debug/debug.go | 26 - vendor/github.com/docker/docker/cli/error.go | 33 - .../github.com/docker/docker/cli/required.go | 27 - .../docker/docker/client/build_prune.go | 30 - .../docker/docker/client/checkpoint_create.go | 14 - .../docker/docker/client/checkpoint_delete.go | 20 - .../docker/docker/client/checkpoint_list.go | 28 - .../github.com/docker/docker/client/client.go | 402 --- .../docker/docker/client/client_unix.go | 9 - .../docker/docker/client/client_windows.go | 7 - .../docker/docker/client/config_create.go | 25 - .../docker/docker/client/config_inspect.go | 36 - .../docker/docker/client/config_list.go | 38 - .../docker/docker/client/config_remove.go | 13 - .../docker/docker/client/config_update.go | 21 - .../docker/docker/client/container_attach.go | 57 - .../docker/docker/client/container_commit.go | 55 - .../docker/docker/client/container_copy.go | 101 - .../docker/docker/client/container_create.go | 56 - .../docker/docker/client/container_diff.go | 23 - .../docker/docker/client/container_exec.go | 54 - .../docker/docker/client/container_export.go | 19 - .../docker/docker/client/container_inspect.go | 53 - .../docker/docker/client/container_kill.go | 16 - .../docker/docker/client/container_list.go | 56 - .../docker/docker/client/container_logs.go | 80 - .../docker/docker/client/container_pause.go | 10 - .../docker/docker/client/container_prune.go | 36 - .../docker/docker/client/container_remove.go | 27 - .../docker/docker/client/container_rename.go | 15 - .../docker/docker/client/container_resize.go | 29 - .../docker/docker/client/container_restart.go | 22 - .../docker/docker/client/container_start.go | 23 - .../docker/docker/client/container_stats.go | 26 - .../docker/docker/client/container_stop.go | 26 - .../docker/docker/client/container_top.go | 28 - .../docker/docker/client/container_unpause.go | 10 - .../docker/docker/client/container_update.go | 22 - .../docker/docker/client/container_wait.go | 83 - .../docker/docker/client/disk_usage.go | 26 - .../docker/client/distribution_inspect.go | 38 - .../github.com/docker/docker/client/errors.go | 132 - .../github.com/docker/docker/client/events.go | 101 - .../github.com/docker/docker/client/hijack.go | 129 - .../docker/docker/client/image_build.go | 137 - .../docker/docker/client/image_create.go | 37 - .../docker/docker/client/image_history.go | 22 - .../docker/docker/client/image_import.go | 40 - .../docker/docker/client/image_inspect.go | 32 - .../docker/docker/client/image_list.go | 45 - .../docker/docker/client/image_load.go | 29 - .../docker/docker/client/image_prune.go | 36 - .../docker/docker/client/image_pull.go | 64 - .../docker/docker/client/image_push.go | 55 - .../docker/docker/client/image_remove.go | 31 - .../docker/docker/client/image_save.go | 21 - .../docker/docker/client/image_search.go | 51 - .../docker/docker/client/image_tag.go | 37 - .../github.com/docker/docker/client/info.go | 26 - .../docker/docker/client/interface.go | 197 -- .../docker/client/interface_experimental.go | 18 - .../docker/docker/client/interface_stable.go | 10 - .../github.com/docker/docker/client/login.go | 29 - .../docker/docker/client/network_connect.go | 19 - .../docker/docker/client/network_create.go | 25 - .../docker/client/network_disconnect.go | 15 - .../docker/docker/client/network_inspect.go | 49 - .../docker/docker/client/network_list.go | 31 - .../docker/docker/client/network_prune.go | 36 - .../docker/docker/client/network_remove.go | 10 - .../docker/docker/client/node_inspect.go | 32 - .../docker/docker/client/node_list.go | 36 - .../docker/docker/client/node_remove.go | 20 - .../docker/docker/client/node_update.go | 18 - .../github.com/docker/docker/client/ping.go | 32 - .../docker/docker/client/plugin_create.go | 26 - .../docker/docker/client/plugin_disable.go | 19 - .../docker/docker/client/plugin_enable.go | 19 - .../docker/docker/client/plugin_inspect.go | 31 - .../docker/docker/client/plugin_install.go | 113 - .../docker/docker/client/plugin_list.go | 32 - .../docker/docker/client/plugin_push.go | 16 - .../docker/docker/client/plugin_remove.go | 20 - .../docker/docker/client/plugin_set.go | 12 - .../docker/docker/client/plugin_upgrade.go | 39 - .../docker/docker/client/request.go | 259 -- .../docker/docker/client/secret_create.go | 25 - .../docker/docker/client/secret_inspect.go | 36 - .../docker/docker/client/secret_list.go | 38 - .../docker/docker/client/secret_remove.go | 13 - .../docker/docker/client/secret_update.go | 21 - .../docker/docker/client/service_create.go | 166 - .../docker/docker/client/service_inspect.go | 37 - .../docker/docker/client/service_list.go | 35 - .../docker/docker/client/service_logs.go | 52 - .../docker/docker/client/service_remove.go | 10 - .../docker/docker/client/service_update.go | 92 - .../docker/docker/client/session.go | 18 - .../docker/client/swarm_get_unlock_key.go | 21 - .../docker/docker/client/swarm_init.go | 21 - .../docker/docker/client/swarm_inspect.go | 21 - .../docker/docker/client/swarm_join.go | 14 - .../docker/docker/client/swarm_leave.go | 17 - .../docker/docker/client/swarm_unlock.go | 14 - .../docker/docker/client/swarm_update.go | 22 - .../docker/docker/client/task_inspect.go | 32 - .../docker/docker/client/task_list.go | 35 - .../docker/docker/client/task_logs.go | 51 - .../docker/docker/client/transport.go | 17 - .../github.com/docker/docker/client/utils.go | 34 - .../docker/docker/client/version.go | 21 - .../docker/docker/client/volume_create.go | 21 - .../docker/docker/client/volume_inspect.go | 38 - .../docker/docker/client/volume_list.go | 32 - .../docker/docker/client/volume_prune.go | 36 - .../docker/docker/client/volume_remove.go | 21 - .../docker/docker/cmd/dockerd/config.go | 99 - .../docker/cmd/dockerd/config_common_unix.go | 34 - .../docker/docker/cmd/dockerd/config_unix.go | 50 - .../docker/cmd/dockerd/config_windows.go | 26 - .../docker/docker/cmd/dockerd/daemon.go | 626 ---- .../docker/cmd/dockerd/daemon_freebsd.go | 9 - .../docker/docker/cmd/dockerd/daemon_linux.go | 13 - .../docker/docker/cmd/dockerd/daemon_unix.go | 117 - .../docker/cmd/dockerd/daemon_windows.go | 85 - .../docker/docker/cmd/dockerd/docker.go | 67 - .../docker/docker/cmd/dockerd/docker_unix.go | 8 - .../docker/cmd/dockerd/docker_windows.go | 38 - .../dockerd/hack/malformed_host_override.go | 121 - .../docker/docker/cmd/dockerd/metrics.go | 27 - .../docker/docker/cmd/dockerd/options.go | 122 - .../docker/cmd/dockerd/service_unsupported.go | 10 - .../docker/cmd/dockerd/service_windows.go | 430 --- .../docker/docker/container/archive.go | 86 - .../docker/docker/container/container.go | 720 ----- .../docker/docker/container/container_unix.go | 463 --- .../docker/container/container_windows.go | 213 -- .../github.com/docker/docker/container/env.go | 43 - .../docker/docker/container/health.go | 82 - .../docker/docker/container/history.go | 30 - .../docker/docker/container/memory_store.go | 95 - .../docker/docker/container/monitor.go | 46 - .../docker/docker/container/mounts_unix.go | 12 - .../docker/docker/container/mounts_windows.go | 8 - .../docker/docker/container/state.go | 409 --- .../docker/docker/container/store.go | 28 - .../docker/docker/container/stream/attach.go | 175 - .../docker/docker/container/stream/streams.go | 146 - .../docker/docker/container/view.go | 494 --- .../docker/docker/contrib/apparmor/main.go | 56 - .../docker/contrib/apparmor/template.go | 268 -- .../contrib/docker-device-tool/device_tool.go | 167 - .../docker-device-tool/device_tool_windows.go | 4 - .../docker/contrib/httpserver/server.go | 12 - .../docker/docker/contrib/nnp-test/nnp-test.c | 10 - .../docker/docker/contrib/syscall-test/acct.c | 16 - .../docker/contrib/syscall-test/exit32.s | 7 - .../docker/docker/contrib/syscall-test/ns.c | 63 - .../docker/docker/contrib/syscall-test/raw.c | 14 - .../docker/contrib/syscall-test/setgid.c | 11 - .../docker/contrib/syscall-test/setuid.c | 11 - .../docker/contrib/syscall-test/socket.c | 30 - .../docker/contrib/syscall-test/userns.c | 63 - .../docker/docker/daemon/apparmor_default.go | 36 - .../daemon/apparmor_default_unsupported.go | 7 - .../docker/docker/daemon/archive.go | 449 --- .../docker/daemon/archive_tarcopyoptions.go | 15 - .../daemon/archive_tarcopyoptions_unix.go | 25 - .../daemon/archive_tarcopyoptions_windows.go | 10 - .../docker/docker/daemon/archive_unix.go | 31 - .../docker/docker/daemon/archive_windows.go | 39 - .../github.com/docker/docker/daemon/attach.go | 187 -- .../github.com/docker/docker/daemon/auth.go | 13 - .../docker/docker/daemon/bindmount_unix.go | 5 - .../docker/docker/daemon/caps/utils_unix.go | 141 - .../docker/docker/daemon/changes.go | 34 - .../docker/docker/daemon/checkpoint.go | 143 - .../docker/docker/daemon/cluster.go | 26 - .../docker/docker/daemon/cluster/cluster.go | 450 --- .../docker/docker/daemon/cluster/configs.go | 118 - .../cluster/controllers/plugin/controller.go | 261 -- .../docker/daemon/cluster/convert/config.go | 78 - .../daemon/cluster/convert/container.go | 398 --- .../docker/daemon/cluster/convert/network.go | 240 -- .../docker/daemon/cluster/convert/node.go | 94 - .../docker/daemon/cluster/convert/secret.go | 80 - .../docker/daemon/cluster/convert/service.go | 639 ---- .../docker/daemon/cluster/convert/swarm.go | 147 - .../docker/daemon/cluster/convert/task.go | 69 - .../docker/docker/daemon/cluster/errors.go | 61 - .../docker/daemon/cluster/executor/backend.go | 75 - .../cluster/executor/container/adapter.go | 477 --- .../cluster/executor/container/attachment.go | 74 - .../cluster/executor/container/container.go | 680 ---- .../cluster/executor/container/controller.go | 692 ---- .../cluster/executor/container/errors.go | 17 - .../cluster/executor/container/executor.go | 293 -- .../cluster/executor/container/validate.go | 40 - .../docker/docker/daemon/cluster/filters.go | 123 - .../docker/docker/daemon/cluster/helpers.go | 246 -- .../docker/daemon/cluster/listen_addr.go | 301 -- .../daemon/cluster/listen_addr_linux.go | 89 - .../daemon/cluster/listen_addr_others.go | 9 - .../docker/docker/daemon/cluster/networks.go | 316 -- .../docker/daemon/cluster/noderunner.go | 388 --- .../docker/docker/daemon/cluster/nodes.go | 105 - .../docker/daemon/cluster/provider/network.go | 37 - .../docker/docker/daemon/cluster/secrets.go | 118 - .../docker/docker/daemon/cluster/services.go | 602 ---- .../docker/docker/daemon/cluster/swarm.go | 569 ---- .../docker/docker/daemon/cluster/tasks.go | 87 - .../docker/docker/daemon/cluster/utils.go | 63 - .../github.com/docker/docker/daemon/commit.go | 186 -- .../docker/docker/daemon/config/config.go | 567 ---- .../daemon/config/config_common_unix.go | 71 - .../docker/daemon/config/config_unix.go | 87 - .../docker/daemon/config/config_windows.go | 57 - .../docker/docker/daemon/config/opts.go | 22 - .../docker/docker/daemon/configs.go | 21 - .../docker/docker/daemon/configs_linux.go | 5 - .../docker/daemon/configs_unsupported.go | 7 - .../docker/docker/daemon/configs_windows.go | 5 - .../docker/docker/daemon/container.go | 358 --- .../docker/docker/daemon/container_linux.go | 30 - .../docker/daemon/container_operations.go | 1150 ------- .../daemon/container_operations_unix.go | 403 --- .../daemon/container_operations_windows.go | 201 -- .../docker/docker/daemon/container_windows.go | 9 - .../github.com/docker/docker/daemon/create.go | 304 -- .../docker/docker/daemon/create_unix.go | 94 - .../docker/docker/daemon/create_windows.go | 93 - .../github.com/docker/docker/daemon/daemon.go | 1315 -------- .../docker/docker/daemon/daemon_linux.go | 133 - .../docker/docker/daemon/daemon_unix.go | 1523 --------- .../docker/daemon/daemon_unsupported.go | 5 - .../docker/docker/daemon/daemon_windows.go | 655 ---- .../docker/docker/daemon/debugtrap_unix.go | 27 - .../docker/daemon/debugtrap_unsupported.go | 7 - .../docker/docker/daemon/debugtrap_windows.go | 46 - .../github.com/docker/docker/daemon/delete.go | 152 - .../docker/docker/daemon/dependency.go | 17 - .../docker/daemon/discovery/discovery.go | 202 -- .../docker/docker/daemon/disk_usage.go | 50 - .../github.com/docker/docker/daemon/errors.go | 155 - .../github.com/docker/docker/daemon/events.go | 308 -- .../docker/docker/daemon/events/events.go | 165 - .../docker/docker/daemon/events/filter.go | 138 - .../docker/docker/daemon/events/metrics.go | 15 - .../daemon/events/testutils/testutils.go | 76 - .../github.com/docker/docker/daemon/exec.go | 324 -- .../docker/docker/daemon/exec/exec.go | 146 - .../docker/docker/daemon/exec_linux.go | 59 - .../docker/docker/daemon/exec_windows.go | 16 - .../github.com/docker/docker/daemon/export.go | 86 - .../docker/daemon/graphdriver/aufs/aufs.go | 678 ---- .../docker/daemon/graphdriver/aufs/dirs.go | 64 - .../docker/daemon/graphdriver/aufs/mount.go | 17 - .../daemon/graphdriver/aufs/mount_linux.go | 7 - .../graphdriver/aufs/mount_unsupported.go | 12 - .../docker/daemon/graphdriver/btrfs/btrfs.go | 663 ---- .../graphdriver/btrfs/dummy_unsupported.go | 3 - .../daemon/graphdriver/btrfs/version.go | 26 - .../daemon/graphdriver/btrfs/version_none.go | 14 - .../docker/daemon/graphdriver/copy/copy.go | 277 -- .../docker/daemon/graphdriver/counter.go | 62 - .../graphdriver/devmapper/device_setup.go | 231 -- .../daemon/graphdriver/devmapper/deviceset.go | 2824 ----------------- .../graphdriver/devmapper/devmapper_doc.go | 106 - .../daemon/graphdriver/devmapper/driver.go | 258 -- .../daemon/graphdriver/devmapper/mount.go | 66 - .../docker/daemon/graphdriver/driver.go | 307 -- .../daemon/graphdriver/driver_freebsd.go | 21 - .../docker/daemon/graphdriver/driver_linux.go | 124 - .../daemon/graphdriver/driver_unsupported.go | 13 - .../daemon/graphdriver/driver_windows.go | 12 - .../docker/daemon/graphdriver/errors.go | 36 - .../docker/daemon/graphdriver/fsdiff.go | 175 - .../graphdriver/graphtest/graphbench_unix.go | 257 -- .../graphdriver/graphtest/graphtest_unix.go | 352 -- .../graphtest/graphtest_windows.go | 1 - .../daemon/graphdriver/graphtest/testutil.go | 337 -- .../graphdriver/graphtest/testutil_unix.go | 69 - .../docker/daemon/graphdriver/lcow/lcow.go | 1052 ------ .../daemon/graphdriver/lcow/lcow_svm.go | 378 --- .../daemon/graphdriver/lcow/remotefs.go | 139 - .../daemon/graphdriver/lcow/remotefs_file.go | 211 -- .../graphdriver/lcow/remotefs_filedriver.go | 123 - .../graphdriver/lcow/remotefs_pathdriver.go | 212 -- .../daemon/graphdriver/overlay/overlay.go | 524 --- .../overlay/overlay_unsupported.go | 3 - .../daemon/graphdriver/overlay2/check.go | 134 - .../daemon/graphdriver/overlay2/mount.go | 89 - .../daemon/graphdriver/overlay2/overlay.go | 769 ----- .../overlay2/overlay_unsupported.go | 3 - .../daemon/graphdriver/overlay2/randomid.go | 81 - .../graphdriver/overlayutils/overlayutils.go | 25 - .../docker/daemon/graphdriver/plugin.go | 55 - .../docker/docker/daemon/graphdriver/proxy.go | 264 -- .../docker/daemon/graphdriver/quota/errors.go | 19 - .../daemon/graphdriver/quota/projectquota.go | 384 --- .../graphdriver/register/register_aufs.go | 8 - .../graphdriver/register/register_btrfs.go | 8 - .../register/register_devicemapper.go | 8 - .../graphdriver/register/register_overlay.go | 8 - .../graphdriver/register/register_overlay2.go | 8 - .../graphdriver/register/register_vfs.go | 6 - .../graphdriver/register/register_windows.go | 7 - .../graphdriver/register/register_zfs.go | 8 - .../daemon/graphdriver/vfs/copy_linux.go | 7 - .../graphdriver/vfs/copy_unsupported.go | 9 - .../docker/daemon/graphdriver/vfs/driver.go | 167 - .../daemon/graphdriver/vfs/quota_linux.go | 26 - .../graphdriver/vfs/quota_unsupported.go | 20 - .../daemon/graphdriver/windows/windows.go | 942 ------ .../docker/daemon/graphdriver/zfs/zfs.go | 431 --- .../daemon/graphdriver/zfs/zfs_freebsd.go | 38 - .../daemon/graphdriver/zfs/zfs_linux.go | 28 - .../daemon/graphdriver/zfs/zfs_unsupported.go | 11 - .../github.com/docker/docker/daemon/health.go | 381 --- .../docker/docker/daemon/images/cache.go | 27 - .../docker/docker/daemon/images/image.go | 64 - .../docker/daemon/images/image_builder.go | 219 -- .../docker/daemon/images/image_commit.go | 127 - .../docker/daemon/images/image_delete.go | 414 --- .../docker/daemon/images/image_events.go | 39 - .../docker/daemon/images/image_exporter.go | 25 - .../docker/daemon/images/image_history.go | 87 - .../docker/daemon/images/image_import.go | 138 - .../docker/daemon/images/image_inspect.go | 104 - .../docker/daemon/images/image_prune.go | 211 -- .../docker/docker/daemon/images/image_pull.go | 131 - .../docker/docker/daemon/images/image_push.go | 66 - .../docker/daemon/images/image_search.go | 95 - .../docker/docker/daemon/images/image_tag.go | 41 - .../docker/docker/daemon/images/image_unix.go | 45 - .../docker/daemon/images/image_windows.go | 41 - .../docker/docker/daemon/images/images.go | 348 -- .../docker/docker/daemon/images/locals.go | 32 - .../docker/docker/daemon/images/service.go | 229 -- .../github.com/docker/docker/daemon/info.go | 206 -- .../docker/docker/daemon/info_unix.go | 93 - .../docker/docker/daemon/info_windows.go | 10 - .../docker/daemon/initlayer/setup_unix.go | 73 - .../docker/daemon/initlayer/setup_windows.go | 16 - .../docker/docker/daemon/inspect.go | 273 -- .../docker/docker/daemon/inspect_linux.go | 73 - .../docker/docker/daemon/inspect_windows.go | 26 - .../github.com/docker/docker/daemon/keys.go | 59 - .../docker/docker/daemon/keys_unsupported.go | 8 - .../github.com/docker/docker/daemon/kill.go | 180 -- .../github.com/docker/docker/daemon/links.go | 91 - .../docker/docker/daemon/links/links.go | 141 - .../github.com/docker/docker/daemon/list.go | 607 ---- .../docker/docker/daemon/list_unix.go | 11 - .../docker/docker/daemon/list_windows.go | 20 - .../docker/daemon/listeners/group_unix.go | 34 - .../daemon/listeners/listeners_linux.go | 102 - .../daemon/listeners/listeners_windows.go | 54 - .../docker/docker/daemon/logdrivers_linux.go | 15 - .../docker/daemon/logdrivers_windows.go | 14 - .../docker/docker/daemon/logger/adapter.go | 139 - .../daemon/logger/awslogs/cloudwatchlogs.go | 744 ----- .../docker/docker/daemon/logger/copier.go | 186 -- .../daemon/logger/etwlogs/etwlogs_windows.go | 168 - .../docker/docker/daemon/logger/factory.go | 162 - .../docker/daemon/logger/fluentd/fluentd.go | 263 -- .../daemon/logger/gcplogs/gcplogging.go | 244 -- .../daemon/logger/gcplogs/gcplogging_linux.go | 29 - .../logger/gcplogs/gcplogging_others.go | 7 - .../docker/docker/daemon/logger/gelf/gelf.go | 268 -- .../docker/daemon/logger/journald/journald.go | 127 - .../logger/journald/journald_unsupported.go | 6 - .../docker/daemon/logger/journald/read.go | 441 --- .../daemon/logger/journald/read_native.go | 6 - .../logger/journald/read_native_compat.go | 6 - .../logger/journald/read_unsupported.go | 7 - .../daemon/logger/jsonfilelog/jsonfilelog.go | 185 -- .../logger/jsonfilelog/jsonlog/jsonlog.go | 25 - .../jsonfilelog/jsonlog/jsonlogbytes.go | 125 - .../jsonfilelog/jsonlog/time_marshalling.go | 20 - .../docker/daemon/logger/jsonfilelog/read.go | 89 - .../daemon/logger/logentries/logentries.go | 115 - .../docker/docker/daemon/logger/logger.go | 145 - .../daemon/logger/loggerutils/log_tag.go | 31 - .../daemon/logger/loggerutils/logfile.go | 666 ---- .../loggerutils/multireader/multireader.go | 212 -- .../docker/docker/daemon/logger/loginfo.go | 129 - .../docker/docker/daemon/logger/metrics.go | 21 - .../docker/docker/daemon/logger/plugin.go | 116 - .../docker/daemon/logger/plugin_unix.go | 23 - .../daemon/logger/plugin_unsupported.go | 12 - .../docker/docker/daemon/logger/proxy.go | 107 - .../docker/docker/daemon/logger/ring.go | 223 -- .../docker/daemon/logger/splunk/splunk.go | 649 ---- .../docker/daemon/logger/syslog/syslog.go | 266 -- .../daemon/logger/templates/templates.go | 50 - .../github.com/docker/docker/daemon/logs.go | 209 -- .../docker/docker/daemon/metrics.go | 192 -- .../docker/docker/daemon/metrics_unix.go | 60 - .../docker/daemon/metrics_unsupported.go | 12 - .../docker/docker/daemon/monitor.go | 212 -- .../github.com/docker/docker/daemon/mounts.go | 55 - .../github.com/docker/docker/daemon/names.go | 113 - .../docker/docker/daemon/names/names.go | 9 - .../docker/docker/daemon/network.go | 918 ------ .../docker/docker/daemon/network/settings.go | 69 - .../docker/docker/daemon/oci_linux.go | 941 ------ .../docker/docker/daemon/oci_windows.go | 408 --- .../github.com/docker/docker/daemon/pause.go | 55 - .../github.com/docker/docker/daemon/prune.go | 250 -- .../github.com/docker/docker/daemon/reload.go | 324 -- .../docker/docker/daemon/reload_unix.go | 56 - .../docker/docker/daemon/reload_windows.go | 9 - .../github.com/docker/docker/daemon/rename.go | 123 - .../github.com/docker/docker/daemon/resize.go | 50 - .../docker/docker/daemon/restart.go | 70 - .../docker/docker/daemon/seccomp_disabled.go | 19 - .../docker/docker/daemon/seccomp_linux.go | 55 - .../docker/daemon/seccomp_unsupported.go | 5 - .../docker/docker/daemon/secrets.go | 23 - .../docker/docker/daemon/secrets_linux.go | 5 - .../docker/daemon/secrets_unsupported.go | 7 - .../docker/docker/daemon/secrets_windows.go | 5 - .../docker/docker/daemon/selinux_linux.go | 15 - .../docker/daemon/selinux_unsupported.go | 13 - .../github.com/docker/docker/daemon/start.go | 254 -- .../docker/docker/daemon/start_unix.go | 57 - .../docker/docker/daemon/start_windows.go | 38 - .../github.com/docker/docker/daemon/stats.go | 155 - .../docker/docker/daemon/stats/collector.go | 159 - .../docker/daemon/stats/collector_unix.go | 83 - .../docker/daemon/stats/collector_windows.go | 17 - .../docker/docker/daemon/stats_collector.go | 26 - .../docker/docker/daemon/stats_unix.go | 57 - .../docker/docker/daemon/stats_windows.go | 11 - .../github.com/docker/docker/daemon/stop.go | 89 - .../docker/docker/daemon/top_unix.go | 189 -- .../docker/docker/daemon/top_windows.go | 63 - .../docker/docker/daemon/trustkey.go | 57 - .../docker/docker/daemon/unpause.go | 44 - .../github.com/docker/docker/daemon/update.go | 95 - .../docker/docker/daemon/update_linux.go | 54 - .../docker/docker/daemon/update_windows.go | 11 - .../docker/docker/daemon/volumes.go | 417 --- .../docker/docker/daemon/volumes_linux.go | 36 - .../docker/docker/daemon/volumes_unix.go | 156 - .../docker/docker/daemon/volumes_windows.go | 51 - .../github.com/docker/docker/daemon/wait.go | 23 - .../docker/docker/daemon/workdir.go | 20 - .../docker/docker/distribution/config.go | 267 -- .../docker/docker/distribution/errors.go | 206 -- .../docker/distribution/metadata/metadata.go | 75 - .../distribution/metadata/v1_id_service.go | 51 - .../metadata/v2_metadata_service.go | 241 -- .../docker/docker/distribution/pull.go | 206 -- .../docker/docker/distribution/pull_v1.go | 367 --- .../docker/docker/distribution/pull_v2.go | 941 ------ .../docker/distribution/pull_v2_unix.go | 34 - .../docker/distribution/pull_v2_windows.go | 130 - .../docker/docker/distribution/push.go | 186 -- .../docker/docker/distribution/push_v1.go | 457 --- .../docker/docker/distribution/push_v2.go | 709 ----- .../docker/docker/distribution/registry.go | 156 - .../docker/distribution/utils/progress.go | 44 - .../docker/distribution/xfer/download.go | 474 --- .../docker/distribution/xfer/transfer.go | 401 --- .../docker/docker/distribution/xfer/upload.go | 174 - .../docker/docker/dockerversion/useragent.go | 76 - .../docker/dockerversion/version_lib.go | 17 - .../github.com/docker/docker/errdefs/defs.go | 74 - .../github.com/docker/docker/errdefs/doc.go | 8 - .../docker/docker/errdefs/helpers.go | 240 -- vendor/github.com/docker/docker/errdefs/is.go | 114 - .../agent/master/call.go | 132 - .../agent/master/master.go | 65 - .../agent/master/set.go | 28 - .../agent/types/types.go | 18 - .../agent/worker/executor.go | 118 - .../agent/worker/worker.go | 69 - .../integration-cli-on-swarm/host/compose.go | 122 - .../host/dockercmd.go | 64 - .../host/enumerate.go | 55 - .../integration-cli-on-swarm/host/host.go | 198 -- .../integration-cli-on-swarm/host/volume.go | 88 - .../docker/docker/hack/make/.go-autogen | 89 - .../docker/docker/hack/make/.go-autogen.ps1 | 93 - .../hack/make/.resources-windows/common.rc | 38 - .../hack/make/.resources-windows/docker.rc | 3 - .../hack/make/.resources-windows/dockerd.rc | 4 - .../make/.resources-windows/event_messages.mc | 39 - .../hack/make/.resources-windows/resources.go | 18 - .../docker/docker/image/cache/cache.go | 253 -- .../docker/docker/image/cache/compare.go | 63 - vendor/github.com/docker/docker/image/fs.go | 175 - .../github.com/docker/docker/image/image.go | 232 -- .../github.com/docker/docker/image/rootfs.go | 52 - .../github.com/docker/docker/image/store.go | 345 -- .../docker/docker/image/tarexport/load.go | 429 --- .../docker/docker/image/tarexport/save.go | 431 --- .../docker/image/tarexport/tarexport.go | 47 - .../docker/docker/image/v1/imagev1.go | 150 - .../docker/integration-cli/checker/checker.go | 46 - .../docker/integration-cli/cli/build/build.go | 82 - .../docker/docker/integration-cli/cli/cli.go | 226 -- .../docker/integration-cli/daemon/daemon.go | 143 - .../integration-cli/daemon/daemon_swarm.go | 197 -- .../environment/environment.go | 49 - .../requirement/requirement.go | 34 - .../docker/docker/integration/doc.go | 3 - .../internal/container/container.go | 54 - .../integration/internal/container/exec.go | 86 - .../integration/internal/container/ops.go | 136 - .../integration/internal/container/states.go | 41 - .../integration/internal/network/network.go | 35 - .../integration/internal/network/ops.go | 57 - .../internal/requirement/requirement.go | 53 - .../integration/internal/swarm/service.go | 200 -- .../docker/integration/network/helpers.go | 85 - .../plugin/logging/cmd/close_on_start/main.go | 48 - .../plugin/logging/cmd/dummy/main.go | 19 - .../plugin/volumes/cmd/dummy/main.go | 19 - .../docker/internal/test/daemon/config.go | 82 - .../docker/internal/test/daemon/container.go | 40 - .../docker/internal/test/daemon/daemon.go | 681 ---- .../internal/test/daemon/daemon_unix.go | 39 - .../internal/test/daemon/daemon_windows.go | 25 - .../docker/internal/test/daemon/node.go | 82 - .../docker/docker/internal/test/daemon/ops.go | 44 - .../docker/internal/test/daemon/plugin.go | 77 - .../docker/internal/test/daemon/secret.go | 84 - .../docker/internal/test/daemon/service.go | 131 - .../docker/internal/test/daemon/swarm.go | 194 -- .../docker/internal/test/environment/clean.go | 217 -- .../internal/test/environment/environment.go | 158 - .../internal/test/environment/protect.go | 254 -- .../internal/test/fakecontext/context.go | 131 - .../docker/internal/test/fakegit/fakegit.go | 136 - .../internal/test/fakestorage/fixtures.go | 92 - .../internal/test/fakestorage/storage.go | 200 -- .../internal/test/fixtures/load/frozen.go | 196 -- .../test/fixtures/plugin/basic/basic.go | 34 - .../internal/test/fixtures/plugin/plugin.go | 216 -- .../docker/docker/internal/test/helper.go | 6 - .../docker/internal/test/registry/ops.go | 26 - .../docker/internal/test/registry/registry.go | 255 -- .../internal/test/registry/registry_mock.go | 71 - .../docker/internal/test/request/npipe.go | 12 - .../internal/test/request/npipe_windows.go | 12 - .../docker/internal/test/request/ops.go | 78 - .../docker/internal/test/request/request.go | 218 -- .../docker/internal/testutil/helpers.go | 17 - .../docker/internal/testutil/stringutils.go | 14 - .../github.com/docker/docker/layer/empty.go | 61 - .../docker/docker/layer/filestore.go | 355 --- .../docker/docker/layer/filestore_unix.go | 15 - .../docker/docker/layer/filestore_windows.go | 35 - .../github.com/docker/docker/layer/layer.go | 237 -- .../docker/docker/layer/layer_store.go | 750 ----- .../docker/layer/layer_store_windows.go | 11 - .../docker/docker/layer/layer_unix.go | 9 - .../docker/docker/layer/layer_windows.go | 46 - .../docker/docker/layer/migration.go | 252 -- .../docker/docker/layer/mounted_layer.go | 100 - .../docker/docker/layer/ro_layer.go | 178 -- .../docker/docker/layer/ro_layer_windows.go | 9 - .../docker/libcontainerd/client_daemon.go | 894 ------ .../libcontainerd/client_daemon_linux.go | 108 - .../libcontainerd/client_daemon_windows.go | 55 - .../libcontainerd/client_local_windows.go | 1319 -------- .../docker/docker/libcontainerd/errors.go | 13 - .../docker/libcontainerd/process_windows.go | 44 - .../docker/docker/libcontainerd/queue.go | 35 - .../docker/libcontainerd/remote_daemon.go | 344 -- .../libcontainerd/remote_daemon_linux.go | 61 - .../libcontainerd/remote_daemon_options.go | 141 - .../remote_daemon_options_linux.go | 18 - .../libcontainerd/remote_daemon_windows.go | 50 - .../docker/libcontainerd/remote_local.go | 59 - .../docker/docker/libcontainerd/types.go | 108 - .../docker/libcontainerd/types_linux.go | 30 - .../docker/libcontainerd/types_windows.go | 42 - .../docker/libcontainerd/utils_linux.go | 12 - .../docker/libcontainerd/utils_windows.go | 46 - .../docker/docker/migrate/v1/migratev1.go | 501 --- .../github.com/docker/docker/oci/defaults.go | 211 -- .../docker/docker/oci/devices_linux.go | 86 - .../docker/docker/oci/devices_unsupported.go | 20 - .../docker/docker/oci/namespaces.go | 13 - .../docker/docker/opts/address_pools.go | 84 - vendor/github.com/docker/docker/opts/env.go | 48 - vendor/github.com/docker/docker/opts/hosts.go | 165 - .../docker/docker/opts/hosts_unix.go | 8 - .../docker/docker/opts/hosts_windows.go | 4 - vendor/github.com/docker/docker/opts/ip.go | 47 - vendor/github.com/docker/docker/opts/opts.go | 337 -- .../docker/docker/opts/opts_unix.go | 6 - .../docker/docker/opts/opts_windows.go | 56 - .../docker/docker/opts/quotedstring.go | 37 - .../github.com/docker/docker/opts/runtime.go | 79 - .../github.com/docker/docker/opts/ulimit.go | 81 - .../docker/docker/pkg/aaparser/aaparser.go | 89 - .../docker/docker/pkg/archive/archive.go | 1291 -------- .../docker/pkg/archive/archive_linux.go | 92 - .../docker/pkg/archive/archive_other.go | 7 - .../docker/docker/pkg/archive/archive_unix.go | 114 - .../docker/pkg/archive/archive_windows.go | 77 - .../docker/docker/pkg/archive/changes.go | 441 --- .../docker/pkg/archive/changes_linux.go | 313 -- .../docker/pkg/archive/changes_other.go | 97 - .../docker/docker/pkg/archive/changes_unix.go | 37 - .../docker/pkg/archive/changes_windows.go | 30 - .../docker/docker/pkg/archive/copy.go | 472 --- .../docker/docker/pkg/archive/copy_unix.go | 11 - .../docker/docker/pkg/archive/copy_windows.go | 9 - .../docker/docker/pkg/archive/diff.go | 258 -- .../docker/pkg/archive/example_changes.go | 97 - .../docker/docker/pkg/archive/time_linux.go | 16 - .../docker/pkg/archive/time_unsupported.go | 16 - .../docker/docker/pkg/archive/whiteouts.go | 23 - .../docker/docker/pkg/archive/wrap.go | 59 - .../docker/docker/pkg/authorization/api.go | 88 - .../docker/docker/pkg/authorization/authz.go | 189 -- .../docker/pkg/authorization/middleware.go | 110 - .../docker/docker/pkg/authorization/plugin.go | 118 - .../docker/pkg/authorization/response.go | 210 -- .../docker/pkg/broadcaster/unbuffered.go | 49 - .../docker/pkg/chrootarchive/archive.go | 73 - .../docker/pkg/chrootarchive/archive_unix.go | 88 - .../pkg/chrootarchive/archive_windows.go | 22 - .../docker/pkg/chrootarchive/chroot_linux.go | 113 - .../docker/pkg/chrootarchive/chroot_unix.go | 12 - .../docker/docker/pkg/chrootarchive/diff.go | 23 - .../docker/pkg/chrootarchive/diff_unix.go | 130 - .../docker/pkg/chrootarchive/diff_windows.go | 45 - .../docker/pkg/chrootarchive/init_unix.go | 28 - .../docker/pkg/chrootarchive/init_windows.go | 4 - .../docker/docker/pkg/containerfs/archiver.go | 203 -- .../docker/pkg/containerfs/containerfs.go | 87 - .../pkg/containerfs/containerfs_unix.go | 10 - .../pkg/containerfs/containerfs_windows.go | 15 - .../docker/pkg/devicemapper/devmapper.go | 826 ----- .../docker/pkg/devicemapper/devmapper_log.go | 124 - .../pkg/devicemapper/devmapper_wrapper.go | 252 -- .../devicemapper/devmapper_wrapper_dynamic.go | 6 - ...vmapper_wrapper_dynamic_deferred_remove.go | 35 - ...r_wrapper_dynamic_dlsym_deferred_remove.go | 128 - .../devmapper_wrapper_no_deferred_remove.go | 17 - .../docker/docker/pkg/devicemapper/ioctl.go | 28 - .../docker/docker/pkg/devicemapper/log.go | 11 - .../docker/docker/pkg/directory/directory.go | 26 - .../docker/pkg/directory/directory_unix.go | 54 - .../docker/pkg/directory/directory_windows.go | 42 - .../docker/docker/pkg/discovery/backends.go | 107 - .../docker/docker/pkg/discovery/discovery.go | 35 - .../docker/docker/pkg/discovery/entry.go | 94 - .../docker/docker/pkg/discovery/file/file.go | 107 - .../docker/docker/pkg/discovery/generator.go | 35 - .../docker/docker/pkg/discovery/kv/kv.go | 192 -- .../docker/pkg/discovery/memory/memory.go | 93 - .../docker/pkg/discovery/nodes/nodes.go | 54 - .../docker/docker/pkg/dmesg/dmesg_linux.go | 18 - .../docker/pkg/filenotify/filenotify.go | 40 - .../docker/docker/pkg/filenotify/fsnotify.go | 18 - .../docker/docker/pkg/filenotify/poller.go | 204 -- .../docker/docker/pkg/fileutils/fileutils.go | 298 -- .../docker/pkg/fileutils/fileutils_darwin.go | 27 - .../docker/pkg/fileutils/fileutils_unix.go | 22 - .../docker/pkg/fileutils/fileutils_windows.go | 7 - .../docker/pkg/fsutils/fsutils_linux.go | 86 - .../docker/pkg/homedir/homedir_linux.go | 21 - .../docker/pkg/homedir/homedir_others.go | 13 - .../docker/docker/pkg/homedir/homedir_unix.go | 34 - .../docker/pkg/homedir/homedir_windows.go | 24 - .../docker/docker/pkg/idtools/idtools.go | 266 -- .../docker/docker/pkg/idtools/idtools_unix.go | 230 -- .../docker/pkg/idtools/idtools_windows.go | 23 - .../docker/pkg/idtools/usergroupadd_linux.go | 164 - .../pkg/idtools/usergroupadd_unsupported.go | 12 - .../docker/docker/pkg/idtools/utils_unix.go | 32 - .../docker/docker/pkg/ioutils/buffer.go | 51 - .../docker/docker/pkg/ioutils/bytespipe.go | 186 -- .../docker/docker/pkg/ioutils/fswriters.go | 162 - .../docker/docker/pkg/ioutils/readers.go | 157 - .../docker/docker/pkg/ioutils/temp_unix.go | 10 - .../docker/docker/pkg/ioutils/temp_windows.go | 16 - .../docker/docker/pkg/ioutils/writeflusher.go | 92 - .../docker/docker/pkg/ioutils/writers.go | 66 - .../docker/pkg/jsonmessage/jsonmessage.go | 335 -- .../docker/docker/pkg/locker/locker.go | 112 - .../docker/docker/pkg/longpath/longpath.go | 26 - .../docker/pkg/loopback/attach_loopback.go | 137 - .../docker/docker/pkg/loopback/ioctl.go | 48 - .../docker/pkg/loopback/loop_wrapper.go | 52 - .../docker/docker/pkg/loopback/loopback.go | 64 - .../docker/docker/pkg/mount/flags.go | 149 - .../docker/docker/pkg/mount/flags_freebsd.go | 49 - .../docker/docker/pkg/mount/flags_linux.go | 87 - .../docker/pkg/mount/flags_unsupported.go | 31 - .../docker/docker/pkg/mount/mount.go | 141 - .../docker/pkg/mount/mounter_freebsd.go | 60 - .../docker/docker/pkg/mount/mounter_linux.go | 57 - .../docker/pkg/mount/mounter_unsupported.go | 11 - .../docker/docker/pkg/mount/mountinfo.go | 40 - .../docker/pkg/mount/mountinfo_freebsd.go | 55 - .../docker/pkg/mount/mountinfo_linux.go | 132 - .../docker/pkg/mount/mountinfo_unsupported.go | 12 - .../docker/pkg/mount/mountinfo_windows.go | 6 - .../docker/pkg/mount/sharedsubtree_linux.go | 67 - .../cmd/names-generator/main.go | 14 - .../pkg/namesgenerator/names-generator.go | 645 ---- .../docker/pkg/parsers/kernel/kernel.go | 74 - .../pkg/parsers/kernel/kernel_darwin.go | 56 - .../docker/pkg/parsers/kernel/kernel_unix.go | 35 - .../pkg/parsers/kernel/kernel_windows.go | 51 - .../docker/pkg/parsers/kernel/uname_linux.go | 17 - .../pkg/parsers/kernel/uname_solaris.go | 14 - .../pkg/parsers/kernel/uname_unsupported.go | 18 - .../operatingsystem/operatingsystem_linux.go | 77 - .../operatingsystem/operatingsystem_unix.go | 25 - .../operatingsystem_windows.go | 51 - .../docker/docker/pkg/parsers/parsers.go | 69 - .../docker/docker/pkg/pidfile/pidfile.go | 53 - .../docker/pkg/pidfile/pidfile_darwin.go | 14 - .../docker/docker/pkg/pidfile/pidfile_unix.go | 16 - .../docker/pkg/pidfile/pidfile_windows.go | 25 - .../docker/pkg/platform/architecture_linux.go | 18 - .../docker/pkg/platform/architecture_unix.go | 20 - .../pkg/platform/architecture_windows.go | 60 - .../docker/docker/pkg/platform/platform.go | 23 - .../docker/docker/pkg/plugingetter/getter.go | 52 - .../docker/docker/pkg/plugins/client.go | 242 -- .../docker/docker/pkg/plugins/discovery.go | 154 - .../docker/pkg/plugins/discovery_unix.go | 5 - .../docker/pkg/plugins/discovery_windows.go | 8 - .../docker/docker/pkg/plugins/errors.go | 33 - .../pkg/plugins/pluginrpc-gen/fixtures/foo.go | 83 - .../fixtures/otherfixture/spaceship.go | 4 - .../docker/pkg/plugins/pluginrpc-gen/main.go | 91 - .../pkg/plugins/pluginrpc-gen/parser.go | 263 -- .../pkg/plugins/pluginrpc-gen/template.go | 118 - .../docker/docker/pkg/plugins/plugins.go | 337 -- .../docker/docker/pkg/plugins/plugins_unix.go | 9 - .../docker/pkg/plugins/plugins_windows.go | 7 - .../docker/pkg/plugins/transport/http.go | 36 - .../docker/pkg/plugins/transport/transport.go | 36 - .../docker/docker/pkg/pools/pools.go | 137 - .../docker/docker/pkg/progress/progress.go | 89 - .../docker/pkg/progress/progressreader.go | 66 - .../docker/docker/pkg/pubsub/publisher.go | 121 - .../docker/docker/pkg/reexec/command_linux.go | 28 - .../docker/docker/pkg/reexec/command_unix.go | 23 - .../docker/pkg/reexec/command_unsupported.go | 12 - .../docker/pkg/reexec/command_windows.go | 21 - .../docker/docker/pkg/reexec/reexec.go | 47 - .../docker/docker/pkg/signal/signal.go | 54 - .../docker/docker/pkg/signal/signal_darwin.go | 41 - .../docker/pkg/signal/signal_freebsd.go | 43 - .../docker/docker/pkg/signal/signal_linux.go | 81 - .../docker/docker/pkg/signal/signal_unix.go | 21 - .../docker/pkg/signal/signal_unsupported.go | 10 - .../docker/pkg/signal/signal_windows.go | 26 - .../docker/pkg/signal/testfiles/main.go | 43 - .../docker/docker/pkg/signal/trap.go | 104 - .../docker/docker/pkg/stdcopy/stdcopy.go | 190 -- .../pkg/streamformatter/streamformatter.go | 159 - .../pkg/streamformatter/streamwriter.go | 47 - .../docker/docker/pkg/stringid/stringid.go | 99 - .../docker/docker/pkg/symlink/fs.go | 144 - .../docker/docker/pkg/symlink/fs_unix.go | 15 - .../docker/docker/pkg/symlink/fs_windows.go | 169 - .../docker/docker/pkg/sysinfo/numcpu.go | 12 - .../docker/docker/pkg/sysinfo/numcpu_linux.go | 42 - .../docker/pkg/sysinfo/numcpu_windows.go | 35 - .../docker/docker/pkg/sysinfo/sysinfo.go | 144 - .../docker/pkg/sysinfo/sysinfo_linux.go | 254 -- .../docker/docker/pkg/sysinfo/sysinfo_unix.go | 9 - .../docker/pkg/sysinfo/sysinfo_windows.go | 7 - .../docker/docker/pkg/system/chtimes.go | 31 - .../docker/docker/pkg/system/chtimes_unix.go | 14 - .../docker/pkg/system/chtimes_windows.go | 26 - .../docker/docker/pkg/system/errors.go | 13 - .../docker/docker/pkg/system/exitcode.go | 19 - .../docker/docker/pkg/system/filesys.go | 67 - .../docker/pkg/system/filesys_windows.go | 296 -- .../docker/docker/pkg/system/init.go | 22 - .../docker/docker/pkg/system/init_unix.go | 7 - .../docker/docker/pkg/system/init_windows.go | 12 - .../docker/docker/pkg/system/lcow.go | 69 - .../docker/docker/pkg/system/lcow_unix.go | 8 - .../docker/docker/pkg/system/lcow_windows.go | 6 - .../docker/docker/pkg/system/lstat_unix.go | 19 - .../docker/docker/pkg/system/lstat_windows.go | 14 - .../docker/docker/pkg/system/meminfo.go | 17 - .../docker/docker/pkg/system/meminfo_linux.go | 65 - .../docker/pkg/system/meminfo_unsupported.go | 8 - .../docker/pkg/system/meminfo_windows.go | 45 - .../docker/docker/pkg/system/mknod.go | 22 - .../docker/docker/pkg/system/mknod_windows.go | 11 - .../docker/docker/pkg/system/path.go | 60 - .../docker/docker/pkg/system/process_unix.go | 24 - .../docker/pkg/system/process_windows.go | 18 - .../github.com/docker/docker/pkg/system/rm.go | 80 - .../docker/docker/pkg/system/stat_darwin.go | 13 - .../docker/docker/pkg/system/stat_freebsd.go | 13 - .../docker/docker/pkg/system/stat_linux.go | 19 - .../docker/docker/pkg/system/stat_openbsd.go | 13 - .../docker/docker/pkg/system/stat_solaris.go | 13 - .../docker/docker/pkg/system/stat_unix.go | 65 - .../docker/docker/pkg/system/stat_windows.go | 49 - .../docker/docker/pkg/system/syscall_unix.go | 17 - .../docker/pkg/system/syscall_windows.go | 127 - .../docker/docker/pkg/system/umask.go | 13 - .../docker/docker/pkg/system/umask_windows.go | 7 - .../docker/pkg/system/utimes_freebsd.go | 24 - .../docker/docker/pkg/system/utimes_linux.go | 25 - .../docker/pkg/system/utimes_unsupported.go | 10 - .../docker/docker/pkg/system/xattrs_linux.go | 29 - .../docker/pkg/system/xattrs_unsupported.go | 13 - .../docker/docker/pkg/tailfile/tailfile.go | 66 - .../docker/pkg/tarsum/builder_context.go | 21 - .../docker/docker/pkg/tarsum/fileinfosums.go | 133 - .../docker/docker/pkg/tarsum/tarsum.go | 301 -- .../docker/docker/pkg/tarsum/versioning.go | 158 - .../docker/docker/pkg/tarsum/writercloser.go | 22 - .../docker/pkg/truncindex/truncindex.go | 139 - .../docker/docker/pkg/urlutil/urlutil.go | 52 - .../docker/docker/pkg/useragent/useragent.go | 55 - .../docker/docker/plugin/backend_linux.go | 876 ----- .../docker/plugin/backend_unsupported.go | 72 - .../docker/docker/plugin/blobstore.go | 190 -- .../github.com/docker/docker/plugin/defs.go | 50 - .../github.com/docker/docker/plugin/errors.go | 66 - .../github.com/docker/docker/plugin/events.go | 111 - .../plugin/executor/containerd/containerd.go | 175 - .../docker/docker/plugin/manager.go | 384 --- .../docker/docker/plugin/manager_linux.go | 335 -- .../docker/docker/plugin/manager_windows.go | 28 - .../github.com/docker/docker/plugin/store.go | 291 -- .../docker/docker/plugin/v2/plugin.go | 311 -- .../docker/docker/plugin/v2/plugin_linux.go | 141 - .../docker/plugin/v2/plugin_unsupported.go | 14 - .../docker/docker/plugin/v2/settable.go | 102 - .../docker/profiles/apparmor/apparmor.go | 114 - .../docker/profiles/apparmor/template.go | 44 - .../docker/profiles/seccomp/generate.go | 32 - .../docker/docker/profiles/seccomp/seccomp.go | 160 - .../profiles/seccomp/seccomp_default.go | 640 ---- .../profiles/seccomp/seccomp_unsupported.go | 12 - .../docker/docker/reference/errors.go | 25 - .../docker/docker/reference/store.go | 343 -- .../github.com/docker/docker/registry/auth.go | 296 -- .../docker/docker/registry/config.go | 442 --- .../docker/docker/registry/config_unix.go | 16 - .../docker/docker/registry/config_windows.go | 18 - .../docker/docker/registry/endpoint_v1.go | 198 -- .../docker/docker/registry/errors.go | 31 - .../docker/docker/registry/registry.go | 191 -- .../resumable/resumablerequestreader.go | 96 - .../docker/docker/registry/service.go | 328 -- .../docker/docker/registry/service_v1.go | 40 - .../docker/docker/registry/service_v2.go | 82 - .../docker/docker/registry/session.go | 779 ----- .../docker/docker/registry/types.go | 70 - .../docker/restartmanager/restartmanager.go | 133 - .../docker/docker/runconfig/config.go | 81 - .../docker/docker/runconfig/config_unix.go | 59 - .../docker/docker/runconfig/config_windows.go | 19 - .../docker/docker/runconfig/errors.go | 42 - .../docker/docker/runconfig/hostconfig.go | 79 - .../docker/runconfig/hostconfig_unix.go | 110 - .../docker/runconfig/hostconfig_windows.go | 96 - .../docker/docker/runconfig/opts/parse.go | 20 - .../docker/docker/volume/drivers/adapter.go | 176 - .../docker/docker/volume/drivers/extpoint.go | 235 -- .../docker/docker/volume/drivers/proxy.go | 255 -- .../docker/docker/volume/local/local.go | 378 --- .../docker/docker/volume/local/local_unix.go | 99 - .../docker/volume/local/local_windows.go | 46 - .../docker/volume/mounts/lcow_parser.go | 34 - .../docker/volume/mounts/linux_parser.go | 417 --- .../docker/docker/volume/mounts/mounts.go | 170 - .../docker/docker/volume/mounts/parser.go | 47 - .../docker/docker/volume/mounts/validate.go | 28 - .../docker/volume/mounts/volume_copy.go | 23 - .../docker/volume/mounts/volume_unix.go | 18 - .../docker/volume/mounts/volume_windows.go | 8 - .../docker/volume/mounts/windows_parser.go | 456 --- .../docker/docker/volume/service/by.go | 89 - .../docker/docker/volume/service/convert.go | 132 - .../docker/docker/volume/service/db.go | 95 - .../docker/volume/service/default_driver.go | 21 - .../volume/service/default_driver_stubs.go | 10 - .../docker/docker/volume/service/errors.go | 111 - .../docker/docker/volume/service/opts/opts.go | 89 - .../docker/docker/volume/service/restore.go | 85 - .../docker/docker/volume/service/service.go | 243 -- .../docker/docker/volume/service/store.go | 858 ----- .../docker/volume/service/store_unix.go | 9 - .../docker/volume/service/store_windows.go | 12 - .../docker/volume/testutils/testutils.go | 227 -- .../github.com/docker/docker/volume/volume.go | 69 - .../emicklei/go-restful/examples/.goconvey | 1 - .../examples/google_app_engine/.goconvey | 1 - .../google_app_engine/datastore/.goconvey | 1 - .../google_app_engine/datastore/main.go | 267 -- .../restful-appstats-integration.go | 12 - .../google_app_engine/restful-user-service.go | 162 - .../examples/msgpack/msgpack_entity.go | 34 - .../examples/restful-CORS-filter.go | 68 - .../examples/restful-NCSA-logging.go | 54 - .../examples/restful-basic-authentication.go | 35 - .../examples/restful-cpuprofiler-service.go | 65 - .../examples/restful-curly-router.go | 107 - .../examples/restful-encoding-filter.go | 61 - .../go-restful/examples/restful-filters.go | 114 - .../examples/restful-form-handling.go | 63 - .../examples/restful-hello-world.go | 23 - .../examples/restful-html-template.go | 35 - .../examples/restful-multi-containers.go | 43 - .../examples/restful-no-cache-filter.go | 25 - .../examples/restful-options-filter.go | 51 - .../go-restful/examples/restful-path-tail.go | 27 - .../examples/restful-pre-post-filters.go | 98 - .../examples/restful-resource-functions.go | 63 - .../examples/restful-serve-static.go | 47 - .../go-restful/examples/restful-swagger.go | 61 - .../examples/restful-user-resource.go | 152 - .../examples/restful-user-service.go | 143 - .../examples/testproto/test.pb.go | 329 -- .../stretchr/testify/.travis.gofmt.sh | 7 - .../stretchr/testify/.travis.gogenerate.sh | 13 - .../stretchr/testify/.travis.govet.sh | 10 - vendor/google.golang.org/grpc/.travis.yml | 20 - .../grpc/benchmark/benchmain/main.go | 499 --- .../grpc/benchmark/benchmark.go | 364 --- .../grpc/benchmark/benchresult/main.go | 133 - .../grpc/benchmark/client/main.go | 180 -- .../grpc/benchmark/grpc_testing/control.pb.go | 1194 ------- .../grpc/benchmark/grpc_testing/control.proto | 186 -- .../benchmark/grpc_testing/messages.pb.go | 479 --- .../benchmark/grpc_testing/messages.proto | 157 - .../benchmark/grpc_testing/payloads.pb.go | 250 -- .../benchmark/grpc_testing/payloads.proto | 40 - .../benchmark/grpc_testing/services.pb.go | 442 --- .../benchmark/grpc_testing/services.proto | 56 - .../grpc/benchmark/grpc_testing/stats.pb.go | 208 -- .../grpc/benchmark/grpc_testing/stats.proto | 55 - .../grpc/benchmark/latency/latency.go | 316 -- .../grpc/benchmark/server/main.go | 53 - .../grpc/benchmark/stats/histogram.go | 222 -- .../grpc/benchmark/stats/stats.go | 291 -- .../grpc/benchmark/stats/util.go | 208 -- .../grpc/benchmark/worker/benchmark_client.go | 392 --- .../grpc/benchmark/worker/benchmark_server.go | 184 -- .../grpc/benchmark/worker/main.go | 229 -- .../grpc/benchmark/worker/util.go | 35 - vendor/google.golang.org/grpc/codegen.sh | 17 - .../helloworld/greeter_client/main.go | 54 - .../helloworld/greeter_server/main.go | 57 - .../helloworld/helloworld/helloworld.pb.go | 164 - .../helloworld/helloworld/helloworld.proto | 37 - .../helloworld/mock_helloworld/hw_mock.go | 48 - .../examples/route_guide/client/client.go | 184 -- .../route_guide/mock_routeguide/rg_mock.go | 200 -- .../route_guide/routeguide/route_guide.pb.go | 543 ---- .../route_guide/routeguide/route_guide.proto | 110 - .../examples/route_guide/server/server.go | 233 -- .../grpc/stress/client/main.go | 336 -- .../grpc/stress/grpc_testing/metrics.pb.go | 374 --- .../grpc/stress/grpc_testing/metrics.proto | 49 - .../grpc/stress/metrics_client/main.go | 82 - .../grpc/test/bufconn/bufconn.go | 229 -- .../grpc/test/grpc_testing/test.pb.go | 788 ----- .../grpc/test/grpc_testing/test.proto | 154 - .../grpc/test/leakcheck/leakcheck.go | 118 - vendor/google.golang.org/grpc/test/race.go | 24 - .../grpc/test/servertester.go | 280 -- vendor/google.golang.org/grpc/vet.sh | 78 - .../client-go/pkg/apis/cr/register.go | 21 - .../examples/client-go/pkg/apis/cr/v1/doc.go | 21 - .../client-go/pkg/apis/cr/v1/register.go | 53 - .../client-go/pkg/apis/cr/v1/types.go | 63 - .../pkg/apis/cr/v1/zz_generated.deepcopy.go | 118 - .../client/clientset/versioned/clientset.go | 98 - .../pkg/client/clientset/versioned/doc.go | 20 - .../versioned/fake/clientset_generated.go | 82 - .../client/clientset/versioned/fake/doc.go | 20 - .../clientset/versioned/fake/register.go | 56 - .../client/clientset/versioned/scheme/doc.go | 20 - .../clientset/versioned/scheme/register.go | 56 - .../versioned/typed/cr/v1/cr_client.go | 90 - .../clientset/versioned/typed/cr/v1/doc.go | 20 - .../versioned/typed/cr/v1/example.go | 157 - .../versioned/typed/cr/v1/fake/doc.go | 20 - .../typed/cr/v1/fake/fake_cr_client.go | 40 - .../typed/cr/v1/fake/fake_example.go | 128 - .../typed/cr/v1/generated_expansion.go | 21 - .../externalversions/cr/interface.go | 46 - .../externalversions/cr/v1/example.go | 89 - .../externalversions/cr/v1/interface.go | 45 - .../informers/externalversions/factory.go | 180 -- .../informers/externalversions/generic.go | 62 - .../internalinterfaces/factory_interfaces.go | 38 - .../pkg/client/listers/cr/v1/example.go | 94 - .../listers/cr/v1/expansion_generated.go | 27 - .../k8s.io/apiserver/pkg/apis/example/doc.go | 22 - .../pkg/apis/example/fuzzer/fuzzer.go | 99 - .../pkg/apis/example/install/install.go | 33 - .../apiserver/pkg/apis/example/register.go | 52 - .../apiserver/pkg/apis/example/types.go | 170 - .../pkg/apis/example/v1/conversion.go | 26 - .../apiserver/pkg/apis/example/v1/defaults.go | 26 - .../apiserver/pkg/apis/example/v1/doc.go | 23 - .../pkg/apis/example/v1/generated.pb.go | 2008 ------------ .../apiserver/pkg/apis/example/v1/register.go | 63 - .../apiserver/pkg/apis/example/v1/types.go | 196 -- .../example/v1/zz_generated.conversion.go | 266 -- .../apis/example/v1/zz_generated.deepcopy.go | 164 - .../apis/example/v1/zz_generated.defaults.go | 32 - .../pkg/apis/example/zz_generated.deepcopy.go | 224 -- .../k8s.io/apiserver/pkg/apis/example2/doc.go | 24 - .../pkg/apis/example2/install/install.go | 33 - .../apiserver/pkg/apis/example2/register.go | 52 - .../pkg/apis/example2/v1/conversion.go | 51 - .../pkg/apis/example2/v1/defaults.go | 26 - .../apiserver/pkg/apis/example2/v1/doc.go | 24 - .../pkg/apis/example2/v1/generated.pb.go | 680 ---- .../pkg/apis/example2/v1/register.go | 63 - .../apiserver/pkg/apis/example2/v1/types.go | 64 - .../example2/v1/zz_generated.conversion.go | 144 - .../apis/example2/v1/zz_generated.deepcopy.go | 90 - .../apis/example2/v1/zz_generated.defaults.go | 32 - .../apis/example2/zz_generated.deepcopy.go | 21 - .../client-go/deprecated-dynamic/bad_debt.go | 79 - .../client-go/deprecated-dynamic/client.go | 131 - .../deprecated-dynamic/client_pool.go | 122 - .../create-update-delete-deployment/main.go | 171 - .../client-go/examples/fake-client/doc.go | 20 - .../in-cluster-client-configuration/main.go | 64 - .../main.go | 88 - .../client-go/examples/workqueue/main.go | 217 -- .../plugin/pkg/client/auth/azure/azure.go | 359 --- .../pkg/client/auth/openstack/openstack.go | 193 -- .../plugin/pkg/client/auth/plugins.go | 25 - vendor/k8s.io/client-go/scale/client.go | 221 -- vendor/k8s.io/client-go/scale/doc.go | 21 - vendor/k8s.io/client-go/scale/fake/client.go | 67 - vendor/k8s.io/client-go/scale/interfaces.go | 39 - .../client-go/scale/scheme/appsint/doc.go | 22 - .../scale/scheme/appsint/register.go | 53 - .../scale/scheme/appsv1beta1/conversion.go | 87 - .../client-go/scale/scheme/appsv1beta1/doc.go | 20 - .../scale/scheme/appsv1beta1/register.go | 45 - .../appsv1beta1/zz_generated.conversion.go | 143 - .../scale/scheme/appsv1beta2/conversion.go | 87 - .../client-go/scale/scheme/appsv1beta2/doc.go | 20 - .../scale/scheme/appsv1beta2/register.go | 45 - .../appsv1beta2/zz_generated.conversion.go | 143 - .../scale/scheme/autoscalingv1/conversion.go | 69 - .../scale/scheme/autoscalingv1/doc.go | 20 - .../scale/scheme/autoscalingv1/register.go | 45 - .../autoscalingv1/zz_generated.conversion.go | 142 - vendor/k8s.io/client-go/scale/scheme/doc.go | 22 - .../scale/scheme/extensionsint/doc.go | 22 - .../scale/scheme/extensionsint/register.go | 53 - .../scheme/extensionsv1beta1/conversion.go | 87 - .../scale/scheme/extensionsv1beta1/doc.go | 20 - .../scheme/extensionsv1beta1/register.go | 45 - .../zz_generated.conversion.go | 143 - .../k8s.io/client-go/scale/scheme/register.go | 52 - vendor/k8s.io/client-go/scale/scheme/types.go | 60 - .../scale/scheme/zz_generated.deepcopy.go | 91 - vendor/k8s.io/client-go/scale/util.go | 197 -- 1235 files changed, 152792 deletions(-) delete mode 100644 vendor/github.com/docker/docker/api/common.go delete mode 100644 vendor/github.com/docker/docker/api/common_unix.go delete mode 100644 vendor/github.com/docker/docker/api/common_windows.go delete mode 100644 vendor/github.com/docker/docker/api/server/backend/build/backend.go delete mode 100644 vendor/github.com/docker/docker/api/server/backend/build/tag.go delete mode 100644 vendor/github.com/docker/docker/api/server/httputils/decoder.go delete mode 100644 vendor/github.com/docker/docker/api/server/httputils/errors.go delete mode 100644 vendor/github.com/docker/docker/api/server/httputils/form.go delete mode 100644 vendor/github.com/docker/docker/api/server/httputils/httputils.go delete mode 100644 vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go delete mode 100644 vendor/github.com/docker/docker/api/server/httputils/write_log_stream.go delete mode 100644 vendor/github.com/docker/docker/api/server/middleware.go delete mode 100644 vendor/github.com/docker/docker/api/server/middleware/cors.go delete mode 100644 vendor/github.com/docker/docker/api/server/middleware/debug.go delete mode 100644 vendor/github.com/docker/docker/api/server/middleware/experimental.go delete mode 100644 vendor/github.com/docker/docker/api/server/middleware/middleware.go delete mode 100644 vendor/github.com/docker/docker/api/server/middleware/version.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/build/backend.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/build/build.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/build/build_routes.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/checkpoint/backend.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint_routes.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/container/backend.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/container/container.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/container/container_routes.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/container/copy.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/container/exec.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/container/inspect.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/debug/debug.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/debug/debug_routes.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/distribution/backend.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/distribution/distribution.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/distribution/distribution_routes.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/experimental.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/image/backend.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/image/image.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/image/image_routes.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/local.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/network/backend.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/network/filter.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/network/network.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/network/network_routes.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/plugin/backend.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/plugin/plugin.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/router.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/session/backend.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/session/session.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/session/session_routes.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/swarm/backend.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/swarm/cluster.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/swarm/helpers.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/system/backend.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/system/system.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/system/system_routes.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/volume/backend.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/volume/volume.go delete mode 100644 vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go delete mode 100644 vendor/github.com/docker/docker/api/server/router_swapper.go delete mode 100644 vendor/github.com/docker/docker/api/server/server.go delete mode 100644 vendor/github.com/docker/docker/api/templates/server/operation.gotmpl delete mode 100644 vendor/github.com/docker/docker/api/types/auth.go delete mode 100644 vendor/github.com/docker/docker/api/types/backend/backend.go delete mode 100644 vendor/github.com/docker/docker/api/types/backend/build.go delete mode 100644 vendor/github.com/docker/docker/api/types/blkiodev/blkio.go delete mode 100644 vendor/github.com/docker/docker/api/types/client.go delete mode 100644 vendor/github.com/docker/docker/api/types/configs.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/config.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_changes.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_create.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_top.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_update.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_wait.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/host_config.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/waitcondition.go delete mode 100644 vendor/github.com/docker/docker/api/types/error_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/events/events.go delete mode 100644 vendor/github.com/docker/docker/api/types/filters/parse.go delete mode 100644 vendor/github.com/docker/docker/api/types/graph_driver_data.go delete mode 100644 vendor/github.com/docker/docker/api/types/id_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/image/image_history.go delete mode 100644 vendor/github.com/docker/docker/api/types/image_delete_response_item.go delete mode 100644 vendor/github.com/docker/docker/api/types/image_summary.go delete mode 100644 vendor/github.com/docker/docker/api/types/mount/mount.go delete mode 100644 vendor/github.com/docker/docker/api/types/network/network.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_device.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_env.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_interface_type.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_mount.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugin_responses.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.pb.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugins/logdriver/gen.go delete mode 100644 vendor/github.com/docker/docker/api/types/plugins/logdriver/io.go delete mode 100644 vendor/github.com/docker/docker/api/types/port.go delete mode 100644 vendor/github.com/docker/docker/api/types/registry/authenticate.go delete mode 100644 vendor/github.com/docker/docker/api/types/registry/registry.go delete mode 100644 vendor/github.com/docker/docker/api/types/seccomp.go delete mode 100644 vendor/github.com/docker/docker/api/types/service_update_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/stats.go delete mode 100644 vendor/github.com/docker/docker/api/types/strslice/strslice.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/common.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/config.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/container.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/network.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/node.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/secret.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/service.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/swarm.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/task.go delete mode 100644 vendor/github.com/docker/docker/api/types/time/duration_convert.go delete mode 100644 vendor/github.com/docker/docker/api/types/time/timestamp.go delete mode 100644 vendor/github.com/docker/docker/api/types/types.go delete mode 100644 vendor/github.com/docker/docker/api/types/versions/compare.go delete mode 100644 vendor/github.com/docker/docker/api/types/versions/v1p19/types.go delete mode 100644 vendor/github.com/docker/docker/api/types/versions/v1p20/types.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/volume_create.go delete mode 100644 vendor/github.com/docker/docker/api/types/volume/volume_list.go delete mode 100644 vendor/github.com/docker/docker/builder/builder.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/buildargs.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/builder.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/clientsession.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/copy.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/evaluator.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerfile/metrics.go delete mode 100644 vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go delete mode 100644 vendor/github.com/docker/docker/builder/fscache/fscache.go delete mode 100644 vendor/github.com/docker/docker/builder/fscache/naivedriver.go delete mode 100644 vendor/github.com/docker/docker/builder/remotecontext/archive.go delete mode 100644 vendor/github.com/docker/docker/builder/remotecontext/detect.go delete mode 100644 vendor/github.com/docker/docker/builder/remotecontext/filehash.go delete mode 100644 vendor/github.com/docker/docker/builder/remotecontext/generate.go delete mode 100644 vendor/github.com/docker/docker/builder/remotecontext/git.go delete mode 100644 vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go delete mode 100644 vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go delete mode 100644 vendor/github.com/docker/docker/builder/remotecontext/mimetype.go delete mode 100644 vendor/github.com/docker/docker/builder/remotecontext/remote.go delete mode 100644 vendor/github.com/docker/docker/builder/remotecontext/tarsum.go delete mode 100644 vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go delete mode 100644 vendor/github.com/docker/docker/cli/cobra.go delete mode 100644 vendor/github.com/docker/docker/cli/config/configdir.go delete mode 100644 vendor/github.com/docker/docker/cli/debug/debug.go delete mode 100644 vendor/github.com/docker/docker/cli/error.go delete mode 100644 vendor/github.com/docker/docker/cli/required.go delete mode 100644 vendor/github.com/docker/docker/client/build_prune.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint_create.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint_delete.go delete mode 100644 vendor/github.com/docker/docker/client/checkpoint_list.go delete mode 100644 vendor/github.com/docker/docker/client/client.go delete mode 100644 vendor/github.com/docker/docker/client/client_unix.go delete mode 100644 vendor/github.com/docker/docker/client/client_windows.go delete mode 100644 vendor/github.com/docker/docker/client/config_create.go delete mode 100644 vendor/github.com/docker/docker/client/config_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/config_list.go delete mode 100644 vendor/github.com/docker/docker/client/config_remove.go delete mode 100644 vendor/github.com/docker/docker/client/config_update.go delete mode 100644 vendor/github.com/docker/docker/client/container_attach.go delete mode 100644 vendor/github.com/docker/docker/client/container_commit.go delete mode 100644 vendor/github.com/docker/docker/client/container_copy.go delete mode 100644 vendor/github.com/docker/docker/client/container_create.go delete mode 100644 vendor/github.com/docker/docker/client/container_diff.go delete mode 100644 vendor/github.com/docker/docker/client/container_exec.go delete mode 100644 vendor/github.com/docker/docker/client/container_export.go delete mode 100644 vendor/github.com/docker/docker/client/container_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/container_kill.go delete mode 100644 vendor/github.com/docker/docker/client/container_list.go delete mode 100644 vendor/github.com/docker/docker/client/container_logs.go delete mode 100644 vendor/github.com/docker/docker/client/container_pause.go delete mode 100644 vendor/github.com/docker/docker/client/container_prune.go delete mode 100644 vendor/github.com/docker/docker/client/container_remove.go delete mode 100644 vendor/github.com/docker/docker/client/container_rename.go delete mode 100644 vendor/github.com/docker/docker/client/container_resize.go delete mode 100644 vendor/github.com/docker/docker/client/container_restart.go delete mode 100644 vendor/github.com/docker/docker/client/container_start.go delete mode 100644 vendor/github.com/docker/docker/client/container_stats.go delete mode 100644 vendor/github.com/docker/docker/client/container_stop.go delete mode 100644 vendor/github.com/docker/docker/client/container_top.go delete mode 100644 vendor/github.com/docker/docker/client/container_unpause.go delete mode 100644 vendor/github.com/docker/docker/client/container_update.go delete mode 100644 vendor/github.com/docker/docker/client/container_wait.go delete mode 100644 vendor/github.com/docker/docker/client/disk_usage.go delete mode 100644 vendor/github.com/docker/docker/client/distribution_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/errors.go delete mode 100644 vendor/github.com/docker/docker/client/events.go delete mode 100644 vendor/github.com/docker/docker/client/hijack.go delete mode 100644 vendor/github.com/docker/docker/client/image_build.go delete mode 100644 vendor/github.com/docker/docker/client/image_create.go delete mode 100644 vendor/github.com/docker/docker/client/image_history.go delete mode 100644 vendor/github.com/docker/docker/client/image_import.go delete mode 100644 vendor/github.com/docker/docker/client/image_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/image_list.go delete mode 100644 vendor/github.com/docker/docker/client/image_load.go delete mode 100644 vendor/github.com/docker/docker/client/image_prune.go delete mode 100644 vendor/github.com/docker/docker/client/image_pull.go delete mode 100644 vendor/github.com/docker/docker/client/image_push.go delete mode 100644 vendor/github.com/docker/docker/client/image_remove.go delete mode 100644 vendor/github.com/docker/docker/client/image_save.go delete mode 100644 vendor/github.com/docker/docker/client/image_search.go delete mode 100644 vendor/github.com/docker/docker/client/image_tag.go delete mode 100644 vendor/github.com/docker/docker/client/info.go delete mode 100644 vendor/github.com/docker/docker/client/interface.go delete mode 100644 vendor/github.com/docker/docker/client/interface_experimental.go delete mode 100644 vendor/github.com/docker/docker/client/interface_stable.go delete mode 100644 vendor/github.com/docker/docker/client/login.go delete mode 100644 vendor/github.com/docker/docker/client/network_connect.go delete mode 100644 vendor/github.com/docker/docker/client/network_create.go delete mode 100644 vendor/github.com/docker/docker/client/network_disconnect.go delete mode 100644 vendor/github.com/docker/docker/client/network_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/network_list.go delete mode 100644 vendor/github.com/docker/docker/client/network_prune.go delete mode 100644 vendor/github.com/docker/docker/client/network_remove.go delete mode 100644 vendor/github.com/docker/docker/client/node_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/node_list.go delete mode 100644 vendor/github.com/docker/docker/client/node_remove.go delete mode 100644 vendor/github.com/docker/docker/client/node_update.go delete mode 100644 vendor/github.com/docker/docker/client/ping.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_create.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_disable.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_enable.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_install.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_list.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_push.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_remove.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_set.go delete mode 100644 vendor/github.com/docker/docker/client/plugin_upgrade.go delete mode 100644 vendor/github.com/docker/docker/client/request.go delete mode 100644 vendor/github.com/docker/docker/client/secret_create.go delete mode 100644 vendor/github.com/docker/docker/client/secret_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/secret_list.go delete mode 100644 vendor/github.com/docker/docker/client/secret_remove.go delete mode 100644 vendor/github.com/docker/docker/client/secret_update.go delete mode 100644 vendor/github.com/docker/docker/client/service_create.go delete mode 100644 vendor/github.com/docker/docker/client/service_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/service_list.go delete mode 100644 vendor/github.com/docker/docker/client/service_logs.go delete mode 100644 vendor/github.com/docker/docker/client/service_remove.go delete mode 100644 vendor/github.com/docker/docker/client/service_update.go delete mode 100644 vendor/github.com/docker/docker/client/session.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_get_unlock_key.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_init.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_join.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_leave.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_unlock.go delete mode 100644 vendor/github.com/docker/docker/client/swarm_update.go delete mode 100644 vendor/github.com/docker/docker/client/task_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/task_list.go delete mode 100644 vendor/github.com/docker/docker/client/task_logs.go delete mode 100644 vendor/github.com/docker/docker/client/transport.go delete mode 100644 vendor/github.com/docker/docker/client/utils.go delete mode 100644 vendor/github.com/docker/docker/client/version.go delete mode 100644 vendor/github.com/docker/docker/client/volume_create.go delete mode 100644 vendor/github.com/docker/docker/client/volume_inspect.go delete mode 100644 vendor/github.com/docker/docker/client/volume_list.go delete mode 100644 vendor/github.com/docker/docker/client/volume_prune.go delete mode 100644 vendor/github.com/docker/docker/client/volume_remove.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/config.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/config_common_unix.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/config_unix.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/config_windows.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/docker.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/docker_unix.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/metrics.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/options.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go delete mode 100644 vendor/github.com/docker/docker/cmd/dockerd/service_windows.go delete mode 100644 vendor/github.com/docker/docker/container/archive.go delete mode 100644 vendor/github.com/docker/docker/container/container.go delete mode 100644 vendor/github.com/docker/docker/container/container_unix.go delete mode 100644 vendor/github.com/docker/docker/container/container_windows.go delete mode 100644 vendor/github.com/docker/docker/container/env.go delete mode 100644 vendor/github.com/docker/docker/container/health.go delete mode 100644 vendor/github.com/docker/docker/container/history.go delete mode 100644 vendor/github.com/docker/docker/container/memory_store.go delete mode 100644 vendor/github.com/docker/docker/container/monitor.go delete mode 100644 vendor/github.com/docker/docker/container/mounts_unix.go delete mode 100644 vendor/github.com/docker/docker/container/mounts_windows.go delete mode 100644 vendor/github.com/docker/docker/container/state.go delete mode 100644 vendor/github.com/docker/docker/container/store.go delete mode 100644 vendor/github.com/docker/docker/container/stream/attach.go delete mode 100644 vendor/github.com/docker/docker/container/stream/streams.go delete mode 100644 vendor/github.com/docker/docker/container/view.go delete mode 100644 vendor/github.com/docker/docker/contrib/apparmor/main.go delete mode 100644 vendor/github.com/docker/docker/contrib/apparmor/template.go delete mode 100644 vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go delete mode 100644 vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool_windows.go delete mode 100644 vendor/github.com/docker/docker/contrib/httpserver/server.go delete mode 100644 vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c delete mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/acct.c delete mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/exit32.s delete mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/ns.c delete mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/raw.c delete mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/setgid.c delete mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/setuid.c delete mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/socket.c delete mode 100644 vendor/github.com/docker/docker/contrib/syscall-test/userns.c delete mode 100644 vendor/github.com/docker/docker/daemon/apparmor_default.go delete mode 100644 vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/archive.go delete mode 100644 vendor/github.com/docker/docker/daemon/archive_tarcopyoptions.go delete mode 100644 vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/archive_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/archive_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/attach.go delete mode 100644 vendor/github.com/docker/docker/daemon/auth.go delete mode 100644 vendor/github.com/docker/docker/daemon/bindmount_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/caps/utils_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/changes.go delete mode 100644 vendor/github.com/docker/docker/daemon/checkpoint.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/cluster.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/configs.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/config.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/container.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/network.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/node.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/secret.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/service.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/convert/task.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/errors.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/backend.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/filters.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/helpers.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/listen_addr.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/networks.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/noderunner.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/nodes.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/provider/network.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/secrets.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/services.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/swarm.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/tasks.go delete mode 100644 vendor/github.com/docker/docker/daemon/cluster/utils.go delete mode 100644 vendor/github.com/docker/docker/daemon/commit.go delete mode 100644 vendor/github.com/docker/docker/daemon/config/config.go delete mode 100644 vendor/github.com/docker/docker/daemon/config/config_common_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/config/config_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/config/config_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/config/opts.go delete mode 100644 vendor/github.com/docker/docker/daemon/configs.go delete mode 100644 vendor/github.com/docker/docker/daemon/configs_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/configs_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/configs_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/container.go delete mode 100644 vendor/github.com/docker/docker/daemon/container_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/container_operations.go delete mode 100644 vendor/github.com/docker/docker/daemon/container_operations_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/container_operations_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/container_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/create.go delete mode 100644 vendor/github.com/docker/docker/daemon/create_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/create_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/daemon.go delete mode 100644 vendor/github.com/docker/docker/daemon/daemon_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/daemon_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/daemon_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/daemon_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/debugtrap_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/debugtrap_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/delete.go delete mode 100644 vendor/github.com/docker/docker/daemon/dependency.go delete mode 100644 vendor/github.com/docker/docker/daemon/discovery/discovery.go delete mode 100644 vendor/github.com/docker/docker/daemon/disk_usage.go delete mode 100644 vendor/github.com/docker/docker/daemon/errors.go delete mode 100644 vendor/github.com/docker/docker/daemon/events.go delete mode 100644 vendor/github.com/docker/docker/daemon/events/events.go delete mode 100644 vendor/github.com/docker/docker/daemon/events/filter.go delete mode 100644 vendor/github.com/docker/docker/daemon/events/metrics.go delete mode 100644 vendor/github.com/docker/docker/daemon/events/testutils/testutils.go delete mode 100644 vendor/github.com/docker/docker/daemon/exec.go delete mode 100644 vendor/github.com/docker/docker/daemon/exec/exec.go delete mode 100644 vendor/github.com/docker/docker/daemon/exec_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/exec_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/export.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/copy/copy.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/counter.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/device_setup.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/errors.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow_svm.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_file.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_filedriver.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_pathdriver.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/plugin.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/proxy.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/quota/errors.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay2.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/health.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/cache.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image_builder.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image_commit.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image_delete.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image_events.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image_exporter.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image_history.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image_import.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image_inspect.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image_prune.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image_pull.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image_push.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image_search.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image_tag.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/image_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/images.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/locals.go delete mode 100644 vendor/github.com/docker/docker/daemon/images/service.go delete mode 100644 vendor/github.com/docker/docker/daemon/info.go delete mode 100644 vendor/github.com/docker/docker/daemon/info_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/info_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/inspect.go delete mode 100644 vendor/github.com/docker/docker/daemon/inspect_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/inspect_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/keys.go delete mode 100644 vendor/github.com/docker/docker/daemon/keys_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/kill.go delete mode 100644 vendor/github.com/docker/docker/daemon/links.go delete mode 100644 vendor/github.com/docker/docker/daemon/links/links.go delete mode 100644 vendor/github.com/docker/docker/daemon/list.go delete mode 100644 vendor/github.com/docker/docker/daemon/list_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/list_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/listeners/group_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/listeners/listeners_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/listeners/listeners_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/logdrivers_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/logdrivers_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/adapter.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/copier.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/factory.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_others.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/journald.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/read.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/read_native.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/logger.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/loggerutils/multireader/multireader.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/loginfo.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/metrics.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/plugin.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/plugin_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/proxy.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/ring.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go delete mode 100644 vendor/github.com/docker/docker/daemon/logger/templates/templates.go delete mode 100644 vendor/github.com/docker/docker/daemon/logs.go delete mode 100644 vendor/github.com/docker/docker/daemon/metrics.go delete mode 100644 vendor/github.com/docker/docker/daemon/metrics_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/metrics_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/monitor.go delete mode 100644 vendor/github.com/docker/docker/daemon/mounts.go delete mode 100644 vendor/github.com/docker/docker/daemon/names.go delete mode 100644 vendor/github.com/docker/docker/daemon/names/names.go delete mode 100644 vendor/github.com/docker/docker/daemon/network.go delete mode 100644 vendor/github.com/docker/docker/daemon/network/settings.go delete mode 100644 vendor/github.com/docker/docker/daemon/oci_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/oci_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/pause.go delete mode 100644 vendor/github.com/docker/docker/daemon/prune.go delete mode 100644 vendor/github.com/docker/docker/daemon/reload.go delete mode 100644 vendor/github.com/docker/docker/daemon/reload_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/reload_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/rename.go delete mode 100644 vendor/github.com/docker/docker/daemon/resize.go delete mode 100644 vendor/github.com/docker/docker/daemon/restart.go delete mode 100644 vendor/github.com/docker/docker/daemon/seccomp_disabled.go delete mode 100644 vendor/github.com/docker/docker/daemon/seccomp_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/seccomp_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/secrets.go delete mode 100644 vendor/github.com/docker/docker/daemon/secrets_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/secrets_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/secrets_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/selinux_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/selinux_unsupported.go delete mode 100644 vendor/github.com/docker/docker/daemon/start.go delete mode 100644 vendor/github.com/docker/docker/daemon/start_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/start_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/stats.go delete mode 100644 vendor/github.com/docker/docker/daemon/stats/collector.go delete mode 100644 vendor/github.com/docker/docker/daemon/stats/collector_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/stats/collector_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/stats_collector.go delete mode 100644 vendor/github.com/docker/docker/daemon/stats_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/stats_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/stop.go delete mode 100644 vendor/github.com/docker/docker/daemon/top_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/top_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/trustkey.go delete mode 100644 vendor/github.com/docker/docker/daemon/unpause.go delete mode 100644 vendor/github.com/docker/docker/daemon/update.go delete mode 100644 vendor/github.com/docker/docker/daemon/update_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/update_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/volumes.go delete mode 100644 vendor/github.com/docker/docker/daemon/volumes_linux.go delete mode 100644 vendor/github.com/docker/docker/daemon/volumes_unix.go delete mode 100644 vendor/github.com/docker/docker/daemon/volumes_windows.go delete mode 100644 vendor/github.com/docker/docker/daemon/wait.go delete mode 100644 vendor/github.com/docker/docker/daemon/workdir.go delete mode 100644 vendor/github.com/docker/docker/distribution/config.go delete mode 100644 vendor/github.com/docker/docker/distribution/errors.go delete mode 100644 vendor/github.com/docker/docker/distribution/metadata/metadata.go delete mode 100644 vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go delete mode 100644 vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go delete mode 100644 vendor/github.com/docker/docker/distribution/pull.go delete mode 100644 vendor/github.com/docker/docker/distribution/pull_v1.go delete mode 100644 vendor/github.com/docker/docker/distribution/pull_v2.go delete mode 100644 vendor/github.com/docker/docker/distribution/pull_v2_unix.go delete mode 100644 vendor/github.com/docker/docker/distribution/pull_v2_windows.go delete mode 100644 vendor/github.com/docker/docker/distribution/push.go delete mode 100644 vendor/github.com/docker/docker/distribution/push_v1.go delete mode 100644 vendor/github.com/docker/docker/distribution/push_v2.go delete mode 100644 vendor/github.com/docker/docker/distribution/registry.go delete mode 100644 vendor/github.com/docker/docker/distribution/utils/progress.go delete mode 100644 vendor/github.com/docker/docker/distribution/xfer/download.go delete mode 100644 vendor/github.com/docker/docker/distribution/xfer/transfer.go delete mode 100644 vendor/github.com/docker/docker/distribution/xfer/upload.go delete mode 100644 vendor/github.com/docker/docker/dockerversion/useragent.go delete mode 100644 vendor/github.com/docker/docker/dockerversion/version_lib.go delete mode 100644 vendor/github.com/docker/docker/errdefs/defs.go delete mode 100644 vendor/github.com/docker/docker/errdefs/doc.go delete mode 100644 vendor/github.com/docker/docker/errdefs/helpers.go delete mode 100644 vendor/github.com/docker/docker/errdefs/is.go delete mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/call.go delete mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/master.go delete mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/set.go delete mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/types/types.go delete mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/executor.go delete mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/worker.go delete mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/compose.go delete mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/dockercmd.go delete mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate.go delete mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/host.go delete mode 100644 vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/volume.go delete mode 100644 vendor/github.com/docker/docker/hack/make/.go-autogen delete mode 100644 vendor/github.com/docker/docker/hack/make/.go-autogen.ps1 delete mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/common.rc delete mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/docker.rc delete mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/dockerd.rc delete mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/event_messages.mc delete mode 100644 vendor/github.com/docker/docker/hack/make/.resources-windows/resources.go delete mode 100644 vendor/github.com/docker/docker/image/cache/cache.go delete mode 100644 vendor/github.com/docker/docker/image/cache/compare.go delete mode 100644 vendor/github.com/docker/docker/image/fs.go delete mode 100644 vendor/github.com/docker/docker/image/image.go delete mode 100644 vendor/github.com/docker/docker/image/rootfs.go delete mode 100644 vendor/github.com/docker/docker/image/store.go delete mode 100644 vendor/github.com/docker/docker/image/tarexport/load.go delete mode 100644 vendor/github.com/docker/docker/image/tarexport/save.go delete mode 100644 vendor/github.com/docker/docker/image/tarexport/tarexport.go delete mode 100644 vendor/github.com/docker/docker/image/v1/imagev1.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/checker/checker.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/cli/build/build.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/cli/cli.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/daemon/daemon.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/environment/environment.go delete mode 100644 vendor/github.com/docker/docker/integration-cli/requirement/requirement.go delete mode 100644 vendor/github.com/docker/docker/integration/doc.go delete mode 100644 vendor/github.com/docker/docker/integration/internal/container/container.go delete mode 100644 vendor/github.com/docker/docker/integration/internal/container/exec.go delete mode 100644 vendor/github.com/docker/docker/integration/internal/container/ops.go delete mode 100644 vendor/github.com/docker/docker/integration/internal/container/states.go delete mode 100644 vendor/github.com/docker/docker/integration/internal/network/network.go delete mode 100644 vendor/github.com/docker/docker/integration/internal/network/ops.go delete mode 100644 vendor/github.com/docker/docker/integration/internal/requirement/requirement.go delete mode 100644 vendor/github.com/docker/docker/integration/internal/swarm/service.go delete mode 100644 vendor/github.com/docker/docker/integration/network/helpers.go delete mode 100644 vendor/github.com/docker/docker/integration/plugin/logging/cmd/close_on_start/main.go delete mode 100644 vendor/github.com/docker/docker/integration/plugin/logging/cmd/dummy/main.go delete mode 100644 vendor/github.com/docker/docker/integration/plugin/volumes/cmd/dummy/main.go delete mode 100644 vendor/github.com/docker/docker/internal/test/daemon/config.go delete mode 100644 vendor/github.com/docker/docker/internal/test/daemon/container.go delete mode 100644 vendor/github.com/docker/docker/internal/test/daemon/daemon.go delete mode 100644 vendor/github.com/docker/docker/internal/test/daemon/daemon_unix.go delete mode 100644 vendor/github.com/docker/docker/internal/test/daemon/daemon_windows.go delete mode 100644 vendor/github.com/docker/docker/internal/test/daemon/node.go delete mode 100644 vendor/github.com/docker/docker/internal/test/daemon/ops.go delete mode 100644 vendor/github.com/docker/docker/internal/test/daemon/plugin.go delete mode 100644 vendor/github.com/docker/docker/internal/test/daemon/secret.go delete mode 100644 vendor/github.com/docker/docker/internal/test/daemon/service.go delete mode 100644 vendor/github.com/docker/docker/internal/test/daemon/swarm.go delete mode 100644 vendor/github.com/docker/docker/internal/test/environment/clean.go delete mode 100644 vendor/github.com/docker/docker/internal/test/environment/environment.go delete mode 100644 vendor/github.com/docker/docker/internal/test/environment/protect.go delete mode 100644 vendor/github.com/docker/docker/internal/test/fakecontext/context.go delete mode 100644 vendor/github.com/docker/docker/internal/test/fakegit/fakegit.go delete mode 100644 vendor/github.com/docker/docker/internal/test/fakestorage/fixtures.go delete mode 100644 vendor/github.com/docker/docker/internal/test/fakestorage/storage.go delete mode 100644 vendor/github.com/docker/docker/internal/test/fixtures/load/frozen.go delete mode 100644 vendor/github.com/docker/docker/internal/test/fixtures/plugin/basic/basic.go delete mode 100644 vendor/github.com/docker/docker/internal/test/fixtures/plugin/plugin.go delete mode 100644 vendor/github.com/docker/docker/internal/test/helper.go delete mode 100644 vendor/github.com/docker/docker/internal/test/registry/ops.go delete mode 100644 vendor/github.com/docker/docker/internal/test/registry/registry.go delete mode 100644 vendor/github.com/docker/docker/internal/test/registry/registry_mock.go delete mode 100644 vendor/github.com/docker/docker/internal/test/request/npipe.go delete mode 100644 vendor/github.com/docker/docker/internal/test/request/npipe_windows.go delete mode 100644 vendor/github.com/docker/docker/internal/test/request/ops.go delete mode 100644 vendor/github.com/docker/docker/internal/test/request/request.go delete mode 100644 vendor/github.com/docker/docker/internal/testutil/helpers.go delete mode 100644 vendor/github.com/docker/docker/internal/testutil/stringutils.go delete mode 100644 vendor/github.com/docker/docker/layer/empty.go delete mode 100644 vendor/github.com/docker/docker/layer/filestore.go delete mode 100644 vendor/github.com/docker/docker/layer/filestore_unix.go delete mode 100644 vendor/github.com/docker/docker/layer/filestore_windows.go delete mode 100644 vendor/github.com/docker/docker/layer/layer.go delete mode 100644 vendor/github.com/docker/docker/layer/layer_store.go delete mode 100644 vendor/github.com/docker/docker/layer/layer_store_windows.go delete mode 100644 vendor/github.com/docker/docker/layer/layer_unix.go delete mode 100644 vendor/github.com/docker/docker/layer/layer_windows.go delete mode 100644 vendor/github.com/docker/docker/layer/migration.go delete mode 100644 vendor/github.com/docker/docker/layer/mounted_layer.go delete mode 100644 vendor/github.com/docker/docker/layer/ro_layer.go delete mode 100644 vendor/github.com/docker/docker/layer/ro_layer_windows.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/client_daemon.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/client_daemon_linux.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/client_daemon_windows.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/client_local_windows.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/errors.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/process_windows.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/queue.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_daemon.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_daemon_linux.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_daemon_options.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_daemon_options_linux.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_daemon_windows.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/remote_local.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/types.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/types_linux.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/types_windows.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/utils_linux.go delete mode 100644 vendor/github.com/docker/docker/libcontainerd/utils_windows.go delete mode 100644 vendor/github.com/docker/docker/migrate/v1/migratev1.go delete mode 100644 vendor/github.com/docker/docker/oci/defaults.go delete mode 100644 vendor/github.com/docker/docker/oci/devices_linux.go delete mode 100644 vendor/github.com/docker/docker/oci/devices_unsupported.go delete mode 100644 vendor/github.com/docker/docker/oci/namespaces.go delete mode 100644 vendor/github.com/docker/docker/opts/address_pools.go delete mode 100644 vendor/github.com/docker/docker/opts/env.go delete mode 100644 vendor/github.com/docker/docker/opts/hosts.go delete mode 100644 vendor/github.com/docker/docker/opts/hosts_unix.go delete mode 100644 vendor/github.com/docker/docker/opts/hosts_windows.go delete mode 100644 vendor/github.com/docker/docker/opts/ip.go delete mode 100644 vendor/github.com/docker/docker/opts/opts.go delete mode 100644 vendor/github.com/docker/docker/opts/opts_unix.go delete mode 100644 vendor/github.com/docker/docker/opts/opts_windows.go delete mode 100644 vendor/github.com/docker/docker/opts/quotedstring.go delete mode 100644 vendor/github.com/docker/docker/opts/runtime.go delete mode 100644 vendor/github.com/docker/docker/opts/ulimit.go delete mode 100644 vendor/github.com/docker/docker/pkg/aaparser/aaparser.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_other.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_other.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/copy.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/diff.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/example_changes.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/time_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/time_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/whiteouts.go delete mode 100644 vendor/github.com/docker/docker/pkg/archive/wrap.go delete mode 100644 vendor/github.com/docker/docker/pkg/authorization/api.go delete mode 100644 vendor/github.com/docker/docker/pkg/authorization/authz.go delete mode 100644 vendor/github.com/docker/docker/pkg/authorization/middleware.go delete mode 100644 vendor/github.com/docker/docker/pkg/authorization/plugin.go delete mode 100644 vendor/github.com/docker/docker/pkg/authorization/response.go delete mode 100644 vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/containerfs/archiver.go delete mode 100644 vendor/github.com/docker/docker/pkg/containerfs/containerfs.go delete mode 100644 vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go delete mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go delete mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go delete mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic.go delete mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic_deferred_remove.go delete mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic_dlsym_deferred_remove.go delete mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go delete mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go delete mode 100644 vendor/github.com/docker/docker/pkg/devicemapper/log.go delete mode 100644 vendor/github.com/docker/docker/pkg/directory/directory.go delete mode 100644 vendor/github.com/docker/docker/pkg/directory/directory_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/directory/directory_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/discovery/backends.go delete mode 100644 vendor/github.com/docker/docker/pkg/discovery/discovery.go delete mode 100644 vendor/github.com/docker/docker/pkg/discovery/entry.go delete mode 100644 vendor/github.com/docker/docker/pkg/discovery/file/file.go delete mode 100644 vendor/github.com/docker/docker/pkg/discovery/generator.go delete mode 100644 vendor/github.com/docker/docker/pkg/discovery/kv/kv.go delete mode 100644 vendor/github.com/docker/docker/pkg/discovery/memory/memory.go delete mode 100644 vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go delete mode 100644 vendor/github.com/docker/docker/pkg/dmesg/dmesg_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/filenotify/filenotify.go delete mode 100644 vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go delete mode 100644 vendor/github.com/docker/docker/pkg/filenotify/poller.go delete mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils.go delete mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go delete mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_others.go delete mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/idtools/utils_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/buffer.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fswriters.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/readers.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go delete mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writers.go delete mode 100644 vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go delete mode 100644 vendor/github.com/docker/docker/pkg/locker/locker.go delete mode 100644 vendor/github.com/docker/docker/pkg/longpath/longpath.go delete mode 100644 vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go delete mode 100644 vendor/github.com/docker/docker/pkg/loopback/ioctl.go delete mode 100644 vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go delete mode 100644 vendor/github.com/docker/docker/pkg/loopback/loopback.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/flags.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/flags_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mount.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go delete mode 100644 vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/parsers/parsers.go delete mode 100644 vendor/github.com/docker/docker/pkg/pidfile/pidfile.go delete mode 100644 vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go delete mode 100644 vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/platform/architecture_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/platform/architecture_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/platform/architecture_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/platform/platform.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugingetter/getter.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/client.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/discovery.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/errors.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/plugins.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/transport/http.go delete mode 100644 vendor/github.com/docker/docker/pkg/plugins/transport/transport.go delete mode 100644 vendor/github.com/docker/docker/pkg/pools/pools.go delete mode 100644 vendor/github.com/docker/docker/pkg/progress/progress.go delete mode 100644 vendor/github.com/docker/docker/pkg/progress/progressreader.go delete mode 100644 vendor/github.com/docker/docker/pkg/pubsub/publisher.go delete mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/reexec/reexec.go delete mode 100644 vendor/github.com/docker/docker/pkg/signal/signal.go delete mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_darwin.go delete mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/signal/signal_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/signal/testfiles/main.go delete mode 100644 vendor/github.com/docker/docker/pkg/signal/trap.go delete mode 100644 vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go delete mode 100644 vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go delete mode 100644 vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go delete mode 100644 vendor/github.com/docker/docker/pkg/stringid/stringid.go delete mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs.go delete mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/symlink/fs_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go delete mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go delete mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/errors.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/exitcode.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/filesys.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/init.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/init_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/init_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/lcow.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/lcow_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/lcow_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/lstat_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/lstat_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/mknod.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/mknod_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/path.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/process_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/process_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/rm.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_darwin.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_freebsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_openbsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_solaris.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/stat_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/umask.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/umask_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/tailfile/tailfile.go delete mode 100644 vendor/github.com/docker/docker/pkg/tarsum/builder_context.go delete mode 100644 vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go delete mode 100644 vendor/github.com/docker/docker/pkg/tarsum/tarsum.go delete mode 100644 vendor/github.com/docker/docker/pkg/tarsum/versioning.go delete mode 100644 vendor/github.com/docker/docker/pkg/tarsum/writercloser.go delete mode 100644 vendor/github.com/docker/docker/pkg/truncindex/truncindex.go delete mode 100644 vendor/github.com/docker/docker/pkg/urlutil/urlutil.go delete mode 100644 vendor/github.com/docker/docker/pkg/useragent/useragent.go delete mode 100644 vendor/github.com/docker/docker/plugin/backend_linux.go delete mode 100644 vendor/github.com/docker/docker/plugin/backend_unsupported.go delete mode 100644 vendor/github.com/docker/docker/plugin/blobstore.go delete mode 100644 vendor/github.com/docker/docker/plugin/defs.go delete mode 100644 vendor/github.com/docker/docker/plugin/errors.go delete mode 100644 vendor/github.com/docker/docker/plugin/events.go delete mode 100644 vendor/github.com/docker/docker/plugin/executor/containerd/containerd.go delete mode 100644 vendor/github.com/docker/docker/plugin/manager.go delete mode 100644 vendor/github.com/docker/docker/plugin/manager_linux.go delete mode 100644 vendor/github.com/docker/docker/plugin/manager_windows.go delete mode 100644 vendor/github.com/docker/docker/plugin/store.go delete mode 100644 vendor/github.com/docker/docker/plugin/v2/plugin.go delete mode 100644 vendor/github.com/docker/docker/plugin/v2/plugin_linux.go delete mode 100644 vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go delete mode 100644 vendor/github.com/docker/docker/plugin/v2/settable.go delete mode 100644 vendor/github.com/docker/docker/profiles/apparmor/apparmor.go delete mode 100644 vendor/github.com/docker/docker/profiles/apparmor/template.go delete mode 100644 vendor/github.com/docker/docker/profiles/seccomp/generate.go delete mode 100644 vendor/github.com/docker/docker/profiles/seccomp/seccomp.go delete mode 100644 vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go delete mode 100644 vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go delete mode 100644 vendor/github.com/docker/docker/reference/errors.go delete mode 100644 vendor/github.com/docker/docker/reference/store.go delete mode 100644 vendor/github.com/docker/docker/registry/auth.go delete mode 100644 vendor/github.com/docker/docker/registry/config.go delete mode 100644 vendor/github.com/docker/docker/registry/config_unix.go delete mode 100644 vendor/github.com/docker/docker/registry/config_windows.go delete mode 100644 vendor/github.com/docker/docker/registry/endpoint_v1.go delete mode 100644 vendor/github.com/docker/docker/registry/errors.go delete mode 100644 vendor/github.com/docker/docker/registry/registry.go delete mode 100644 vendor/github.com/docker/docker/registry/resumable/resumablerequestreader.go delete mode 100644 vendor/github.com/docker/docker/registry/service.go delete mode 100644 vendor/github.com/docker/docker/registry/service_v1.go delete mode 100644 vendor/github.com/docker/docker/registry/service_v2.go delete mode 100644 vendor/github.com/docker/docker/registry/session.go delete mode 100644 vendor/github.com/docker/docker/registry/types.go delete mode 100644 vendor/github.com/docker/docker/restartmanager/restartmanager.go delete mode 100644 vendor/github.com/docker/docker/runconfig/config.go delete mode 100644 vendor/github.com/docker/docker/runconfig/config_unix.go delete mode 100644 vendor/github.com/docker/docker/runconfig/config_windows.go delete mode 100644 vendor/github.com/docker/docker/runconfig/errors.go delete mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig.go delete mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_unix.go delete mode 100644 vendor/github.com/docker/docker/runconfig/hostconfig_windows.go delete mode 100644 vendor/github.com/docker/docker/runconfig/opts/parse.go delete mode 100644 vendor/github.com/docker/docker/volume/drivers/adapter.go delete mode 100644 vendor/github.com/docker/docker/volume/drivers/extpoint.go delete mode 100644 vendor/github.com/docker/docker/volume/drivers/proxy.go delete mode 100644 vendor/github.com/docker/docker/volume/local/local.go delete mode 100644 vendor/github.com/docker/docker/volume/local/local_unix.go delete mode 100644 vendor/github.com/docker/docker/volume/local/local_windows.go delete mode 100644 vendor/github.com/docker/docker/volume/mounts/lcow_parser.go delete mode 100644 vendor/github.com/docker/docker/volume/mounts/linux_parser.go delete mode 100644 vendor/github.com/docker/docker/volume/mounts/mounts.go delete mode 100644 vendor/github.com/docker/docker/volume/mounts/parser.go delete mode 100644 vendor/github.com/docker/docker/volume/mounts/validate.go delete mode 100644 vendor/github.com/docker/docker/volume/mounts/volume_copy.go delete mode 100644 vendor/github.com/docker/docker/volume/mounts/volume_unix.go delete mode 100644 vendor/github.com/docker/docker/volume/mounts/volume_windows.go delete mode 100644 vendor/github.com/docker/docker/volume/mounts/windows_parser.go delete mode 100644 vendor/github.com/docker/docker/volume/service/by.go delete mode 100644 vendor/github.com/docker/docker/volume/service/convert.go delete mode 100644 vendor/github.com/docker/docker/volume/service/db.go delete mode 100644 vendor/github.com/docker/docker/volume/service/default_driver.go delete mode 100644 vendor/github.com/docker/docker/volume/service/default_driver_stubs.go delete mode 100644 vendor/github.com/docker/docker/volume/service/errors.go delete mode 100644 vendor/github.com/docker/docker/volume/service/opts/opts.go delete mode 100644 vendor/github.com/docker/docker/volume/service/restore.go delete mode 100644 vendor/github.com/docker/docker/volume/service/service.go delete mode 100644 vendor/github.com/docker/docker/volume/service/store.go delete mode 100644 vendor/github.com/docker/docker/volume/service/store_unix.go delete mode 100644 vendor/github.com/docker/docker/volume/service/store_windows.go delete mode 100644 vendor/github.com/docker/docker/volume/testutils/testutils.go delete mode 100644 vendor/github.com/docker/docker/volume/volume.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/.goconvey delete mode 100644 vendor/github.com/emicklei/go-restful/examples/google_app_engine/.goconvey delete mode 100644 vendor/github.com/emicklei/go-restful/examples/google_app_engine/datastore/.goconvey delete mode 100644 vendor/github.com/emicklei/go-restful/examples/google_app_engine/datastore/main.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/google_app_engine/restful-appstats-integration.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/google_app_engine/restful-user-service.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/msgpack/msgpack_entity.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-CORS-filter.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-NCSA-logging.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-basic-authentication.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-cpuprofiler-service.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-curly-router.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-encoding-filter.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-filters.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-form-handling.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-hello-world.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-html-template.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-multi-containers.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-no-cache-filter.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-options-filter.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-path-tail.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-pre-post-filters.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-resource-functions.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-serve-static.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-swagger.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-user-resource.go delete mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-user-service.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-prometheus/examples/testproto/test.pb.go delete mode 100755 vendor/github.com/stretchr/testify/.travis.gofmt.sh delete mode 100755 vendor/github.com/stretchr/testify/.travis.gogenerate.sh delete mode 100755 vendor/github.com/stretchr/testify/.travis.govet.sh delete mode 100644 vendor/google.golang.org/grpc/.travis.yml delete mode 100644 vendor/google.golang.org/grpc/benchmark/benchmain/main.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/benchmark.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/benchresult/main.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/client/main.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto delete mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto delete mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto delete mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto delete mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto delete mode 100644 vendor/google.golang.org/grpc/benchmark/latency/latency.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/server/main.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/stats/histogram.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/stats/stats.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/stats/util.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/worker/main.go delete mode 100644 vendor/google.golang.org/grpc/benchmark/worker/util.go delete mode 100755 vendor/google.golang.org/grpc/codegen.sh delete mode 100644 vendor/google.golang.org/grpc/examples/helloworld/greeter_client/main.go delete mode 100644 vendor/google.golang.org/grpc/examples/helloworld/greeter_server/main.go delete mode 100644 vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go delete mode 100644 vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto delete mode 100644 vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock.go delete mode 100644 vendor/google.golang.org/grpc/examples/route_guide/client/client.go delete mode 100644 vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go delete mode 100644 vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go delete mode 100644 vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto delete mode 100644 vendor/google.golang.org/grpc/examples/route_guide/server/server.go delete mode 100644 vendor/google.golang.org/grpc/stress/client/main.go delete mode 100644 vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go delete mode 100644 vendor/google.golang.org/grpc/stress/grpc_testing/metrics.proto delete mode 100644 vendor/google.golang.org/grpc/stress/metrics_client/main.go delete mode 100644 vendor/google.golang.org/grpc/test/bufconn/bufconn.go delete mode 100644 vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go delete mode 100644 vendor/google.golang.org/grpc/test/grpc_testing/test.proto delete mode 100644 vendor/google.golang.org/grpc/test/leakcheck/leakcheck.go delete mode 100644 vendor/google.golang.org/grpc/test/race.go delete mode 100644 vendor/google.golang.org/grpc/test/servertester.go delete mode 100755 vendor/google.golang.org/grpc/vet.sh delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/register.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/doc.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/register.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/types.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/clientset.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/doc.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/clientset_generated.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/doc.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/register.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme/doc.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme/register.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/cr_client.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/doc.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/example.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/doc.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/fake_cr_client.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/fake_example.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/interface.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/v1/example.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/v1/interface.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/factory.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/generic.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/listers/cr/v1/example.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/listers/cr/v1/expansion_generated.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/fuzzer/fuzzer.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/install/install.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/register.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/types.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/conversion.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/defaults.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/generated.pb.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/register.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/types.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example2/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example2/install/install.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example2/register.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example2/v1/conversion.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example2/v1/defaults.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example2/v1/doc.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.pb.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example2/v1/register.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example2/v1/types.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/apiserver/pkg/apis/example2/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/deprecated-dynamic/bad_debt.go delete mode 100644 vendor/k8s.io/client-go/deprecated-dynamic/client.go delete mode 100644 vendor/k8s.io/client-go/deprecated-dynamic/client_pool.go delete mode 100644 vendor/k8s.io/client-go/examples/create-update-delete-deployment/main.go delete mode 100644 vendor/k8s.io/client-go/examples/fake-client/doc.go delete mode 100644 vendor/k8s.io/client-go/examples/in-cluster-client-configuration/main.go delete mode 100644 vendor/k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go delete mode 100644 vendor/k8s.io/client-go/examples/workqueue/main.go delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins.go delete mode 100644 vendor/k8s.io/client-go/scale/client.go delete mode 100644 vendor/k8s.io/client-go/scale/doc.go delete mode 100644 vendor/k8s.io/client-go/scale/fake/client.go delete mode 100644 vendor/k8s.io/client-go/scale/interfaces.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/appsint/doc.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/appsint/register.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/doc.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/register.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/types.go delete mode 100644 vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/client-go/scale/util.go diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go deleted file mode 100644 index 255a81aed..000000000 --- a/vendor/github.com/docker/docker/api/common.go +++ /dev/null @@ -1,11 +0,0 @@ -package api // import "github.com/docker/docker/api" - -// Common constants for daemon and client. -const ( - // DefaultVersion of Current REST API - DefaultVersion = "1.38" - - // NoBaseImageSpecifier is the symbol used by the FROM - // command to specify that no base image is to be used. - NoBaseImageSpecifier = "scratch" -) diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go deleted file mode 100644 index 504b0c90d..000000000 --- a/vendor/github.com/docker/docker/api/common_unix.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !windows - -package api // import "github.com/docker/docker/api" - -// MinVersion represents Minimum REST API version supported -const MinVersion = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go deleted file mode 100644 index 590ba5479..000000000 --- a/vendor/github.com/docker/docker/api/common_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package api // import "github.com/docker/docker/api" - -// MinVersion represents Minimum REST API version supported -// Technically the first daemon API version released on Windows is v1.25 in -// engine version 1.13. However, some clients are explicitly using downlevel -// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. -// Hence also allowing 1.24 on Windows. -const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/server/backend/build/backend.go b/vendor/github.com/docker/docker/api/server/backend/build/backend.go deleted file mode 100644 index 22ce9cef7..000000000 --- a/vendor/github.com/docker/docker/api/server/backend/build/backend.go +++ /dev/null @@ -1,90 +0,0 @@ -package build // import "github.com/docker/docker/api/server/backend/build" - -import ( - "context" - "fmt" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/fscache" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/stringid" - "github.com/pkg/errors" -) - -// ImageComponent provides an interface for working with images -type ImageComponent interface { - SquashImage(from string, to string) (string, error) - TagImageWithReference(image.ID, reference.Named) error -} - -// Builder defines interface for running a build -type Builder interface { - Build(context.Context, backend.BuildConfig) (*builder.Result, error) -} - -// Backend provides build functionality to the API router -type Backend struct { - builder Builder - fsCache *fscache.FSCache - imageComponent ImageComponent -} - -// NewBackend creates a new build backend from components -func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSCache) (*Backend, error) { - return &Backend{imageComponent: components, builder: builder, fsCache: fsCache}, nil -} - -// Build builds an image from a Source -func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string, error) { - options := config.Options - tagger, err := NewTagger(b.imageComponent, config.ProgressWriter.StdoutFormatter, options.Tags) - if err != nil { - return "", err - } - - build, err := b.builder.Build(ctx, config) - if err != nil { - return "", err - } - - var imageID = build.ImageID - if options.Squash { - if imageID, err = squashBuild(build, b.imageComponent); err != nil { - return "", err - } - if config.ProgressWriter.AuxFormatter != nil { - if err = config.ProgressWriter.AuxFormatter.Emit(types.BuildResult{ID: imageID}); err != nil { - return "", err - } - } - } - - stdout := config.ProgressWriter.StdoutFormatter - fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID)) - err = tagger.TagImages(image.ID(imageID)) - return imageID, err -} - -// PruneCache removes all cached build sources -func (b *Backend) PruneCache(ctx context.Context) (*types.BuildCachePruneReport, error) { - size, err := b.fsCache.Prune(ctx) - if err != nil { - return nil, errors.Wrap(err, "failed to prune build cache") - } - return &types.BuildCachePruneReport{SpaceReclaimed: size}, nil -} - -func squashBuild(build *builder.Result, imageComponent ImageComponent) (string, error) { - var fromID string - if build.FromImage != nil { - fromID = build.FromImage.ImageID() - } - imageID, err := imageComponent.SquashImage(build.ImageID, fromID) - if err != nil { - return "", errors.Wrap(err, "error squashing image") - } - return imageID, nil -} diff --git a/vendor/github.com/docker/docker/api/server/backend/build/tag.go b/vendor/github.com/docker/docker/api/server/backend/build/tag.go deleted file mode 100644 index f840b9d72..000000000 --- a/vendor/github.com/docker/docker/api/server/backend/build/tag.go +++ /dev/null @@ -1,77 +0,0 @@ -package build // import "github.com/docker/docker/api/server/backend/build" - -import ( - "fmt" - "io" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/image" - "github.com/pkg/errors" -) - -// Tagger is responsible for tagging an image created by a builder -type Tagger struct { - imageComponent ImageComponent - stdout io.Writer - repoAndTags []reference.Named -} - -// NewTagger returns a new Tagger for tagging the images of a build. -// If any of the names are invalid tags an error is returned. -func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagger, error) { - reposAndTags, err := sanitizeRepoAndTags(names) - if err != nil { - return nil, err - } - return &Tagger{ - imageComponent: backend, - stdout: stdout, - repoAndTags: reposAndTags, - }, nil -} - -// TagImages creates image tags for the imageID -func (bt *Tagger) TagImages(imageID image.ID) error { - for _, rt := range bt.repoAndTags { - if err := bt.imageComponent.TagImageWithReference(imageID, rt); err != nil { - return err - } - fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt)) - } - return nil -} - -// sanitizeRepoAndTags parses the raw "t" parameter received from the client -// to a slice of repoAndTag. -// It also validates each repoName and tag. -func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { - var ( - repoAndTags []reference.Named - // This map is used for deduplicating the "-t" parameter. - uniqNames = make(map[string]struct{}) - ) - for _, repo := range names { - if repo == "" { - continue - } - - ref, err := reference.ParseNormalizedNamed(repo) - if err != nil { - return nil, err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return nil, errors.New("build tag cannot contain a digest") - } - - ref = reference.TagNameOnly(ref) - - nameWithTag := ref.String() - - if _, exists := uniqNames[nameWithTag]; !exists { - uniqNames[nameWithTag] = struct{}{} - repoAndTags = append(repoAndTags, ref) - } - } - return repoAndTags, nil -} diff --git a/vendor/github.com/docker/docker/api/server/httputils/decoder.go b/vendor/github.com/docker/docker/api/server/httputils/decoder.go deleted file mode 100644 index 8293503c4..000000000 --- a/vendor/github.com/docker/docker/api/server/httputils/decoder.go +++ /dev/null @@ -1,16 +0,0 @@ -package httputils // import "github.com/docker/docker/api/server/httputils" - -import ( - "io" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" -) - -// ContainerDecoder specifies how -// to translate an io.Reader into -// container configuration. -type ContainerDecoder interface { - DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error) - DecodeHostConfig(src io.Reader) (*container.HostConfig, error) -} diff --git a/vendor/github.com/docker/docker/api/server/httputils/errors.go b/vendor/github.com/docker/docker/api/server/httputils/errors.go deleted file mode 100644 index a21affff3..000000000 --- a/vendor/github.com/docker/docker/api/server/httputils/errors.go +++ /dev/null @@ -1,131 +0,0 @@ -package httputils // import "github.com/docker/docker/api/server/httputils" - -import ( - "fmt" - "net/http" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" - "github.com/gorilla/mux" - "github.com/sirupsen/logrus" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -type causer interface { - Cause() error -} - -// GetHTTPErrorStatusCode retrieves status code from error message. -func GetHTTPErrorStatusCode(err error) int { - if err == nil { - logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") - return http.StatusInternalServerError - } - - var statusCode int - - // Stop right there - // Are you sure you should be adding a new error class here? Do one of the existing ones work? - - // Note that the below functions are already checking the error causal chain for matches. - switch { - case errdefs.IsNotFound(err): - statusCode = http.StatusNotFound - case errdefs.IsInvalidParameter(err): - statusCode = http.StatusBadRequest - case errdefs.IsConflict(err) || errdefs.IsAlreadyExists(err): - statusCode = http.StatusConflict - case errdefs.IsUnauthorized(err): - statusCode = http.StatusUnauthorized - case errdefs.IsUnavailable(err): - statusCode = http.StatusServiceUnavailable - case errdefs.IsForbidden(err): - statusCode = http.StatusForbidden - case errdefs.IsNotModified(err): - statusCode = http.StatusNotModified - case errdefs.IsNotImplemented(err): - statusCode = http.StatusNotImplemented - case errdefs.IsSystem(err) || errdefs.IsUnknown(err) || errdefs.IsDataLoss(err) || errdefs.IsDeadline(err) || errdefs.IsCancelled(err): - statusCode = http.StatusInternalServerError - default: - statusCode = statusCodeFromGRPCError(err) - if statusCode != http.StatusInternalServerError { - return statusCode - } - - if e, ok := err.(causer); ok { - return GetHTTPErrorStatusCode(e.Cause()) - } - - logrus.WithFields(logrus.Fields{ - "module": "api", - "error_type": fmt.Sprintf("%T", err), - }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err) - } - - if statusCode == 0 { - statusCode = http.StatusInternalServerError - } - - return statusCode -} - -func apiVersionSupportsJSONErrors(version string) bool { - const firstAPIVersionWithJSONErrors = "1.23" - return version == "" || versions.GreaterThan(version, firstAPIVersionWithJSONErrors) -} - -// MakeErrorHandler makes an HTTP handler that decodes a Docker error and -// returns it in the response. -func MakeErrorHandler(err error) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - statusCode := GetHTTPErrorStatusCode(err) - vars := mux.Vars(r) - if apiVersionSupportsJSONErrors(vars["version"]) { - response := &types.ErrorResponse{ - Message: err.Error(), - } - WriteJSON(w, statusCode, response) - } else { - http.Error(w, grpc.ErrorDesc(err), statusCode) - } - } -} - -// statusCodeFromGRPCError returns status code according to gRPC error -func statusCodeFromGRPCError(err error) int { - switch grpc.Code(err) { - case codes.InvalidArgument: // code 3 - return http.StatusBadRequest - case codes.NotFound: // code 5 - return http.StatusNotFound - case codes.AlreadyExists: // code 6 - return http.StatusConflict - case codes.PermissionDenied: // code 7 - return http.StatusForbidden - case codes.FailedPrecondition: // code 9 - return http.StatusBadRequest - case codes.Unauthenticated: // code 16 - return http.StatusUnauthorized - case codes.OutOfRange: // code 11 - return http.StatusBadRequest - case codes.Unimplemented: // code 12 - return http.StatusNotImplemented - case codes.Unavailable: // code 14 - return http.StatusServiceUnavailable - default: - if e, ok := err.(causer); ok { - return statusCodeFromGRPCError(e.Cause()) - } - // codes.Canceled(1) - // codes.Unknown(2) - // codes.DeadlineExceeded(4) - // codes.ResourceExhausted(8) - // codes.Aborted(10) - // codes.Internal(13) - // codes.DataLoss(15) - return http.StatusInternalServerError - } -} diff --git a/vendor/github.com/docker/docker/api/server/httputils/form.go b/vendor/github.com/docker/docker/api/server/httputils/form.go deleted file mode 100644 index 6d166eac1..000000000 --- a/vendor/github.com/docker/docker/api/server/httputils/form.go +++ /dev/null @@ -1,76 +0,0 @@ -package httputils // import "github.com/docker/docker/api/server/httputils" - -import ( - "net/http" - "strconv" - "strings" -) - -// BoolValue transforms a form value in different formats into a boolean type. -func BoolValue(r *http.Request, k string) bool { - s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) - return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") -} - -// BoolValueOrDefault returns the default bool passed if the query param is -// missing, otherwise it's just a proxy to boolValue above. -func BoolValueOrDefault(r *http.Request, k string, d bool) bool { - if _, ok := r.Form[k]; !ok { - return d - } - return BoolValue(r, k) -} - -// Int64ValueOrZero parses a form value into an int64 type. -// It returns 0 if the parsing fails. -func Int64ValueOrZero(r *http.Request, k string) int64 { - val, err := Int64ValueOrDefault(r, k, 0) - if err != nil { - return 0 - } - return val -} - -// Int64ValueOrDefault parses a form value into an int64 type. If there is an -// error, returns the error. If there is no value returns the default value. -func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { - if r.Form.Get(field) != "" { - value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) - return value, err - } - return def, nil -} - -// ArchiveOptions stores archive information for different operations. -type ArchiveOptions struct { - Name string - Path string -} - -type badParameterError struct { - param string -} - -func (e badParameterError) Error() string { - return "bad parameter: " + e.param + "cannot be empty" -} - -func (e badParameterError) InvalidParameter() {} - -// ArchiveFormValues parses form values and turns them into ArchiveOptions. -// It fails if the archive name and path are not in the request. -func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { - if err := ParseForm(r); err != nil { - return ArchiveOptions{}, err - } - - name := vars["name"] - if name == "" { - return ArchiveOptions{}, badParameterError{"name"} - } - path := r.Form.Get("path") - if path == "" { - return ArchiveOptions{}, badParameterError{"path"} - } - return ArchiveOptions{name, path}, nil -} diff --git a/vendor/github.com/docker/docker/api/server/httputils/httputils.go b/vendor/github.com/docker/docker/api/server/httputils/httputils.go deleted file mode 100644 index 5a6854415..000000000 --- a/vendor/github.com/docker/docker/api/server/httputils/httputils.go +++ /dev/null @@ -1,100 +0,0 @@ -package httputils // import "github.com/docker/docker/api/server/httputils" - -import ( - "context" - "io" - "mime" - "net/http" - "strings" - - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type contextKey string - -// APIVersionKey is the client's requested API version. -const APIVersionKey contextKey = "api-version" - -// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. -// Any function that has the appropriate signature can be registered as an API endpoint (e.g. getVersion). -type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error - -// HijackConnection interrupts the http response writer to get the -// underlying connection and operate with it. -func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { - conn, _, err := w.(http.Hijacker).Hijack() - if err != nil { - return nil, nil, err - } - // Flush the options to make sure the client sets the raw mode - conn.Write([]byte{}) - return conn, conn, nil -} - -// CloseStreams ensures that a list for http streams are properly closed. -func CloseStreams(streams ...interface{}) { - for _, stream := range streams { - if tcpc, ok := stream.(interface { - CloseWrite() error - }); ok { - tcpc.CloseWrite() - } else if closer, ok := stream.(io.Closer); ok { - closer.Close() - } - } -} - -// CheckForJSON makes sure that the request's Content-Type is application/json. -func CheckForJSON(r *http.Request) error { - ct := r.Header.Get("Content-Type") - - // No Content-Type header is ok as long as there's no Body - if ct == "" { - if r.Body == nil || r.ContentLength == 0 { - return nil - } - } - - // Otherwise it better be json - if matchesContentType(ct, "application/json") { - return nil - } - return errdefs.InvalidParameter(errors.Errorf("Content-Type specified (%s) must be 'application/json'", ct)) -} - -// ParseForm ensures the request form is parsed even with invalid content types. -// If we don't do this, POST method without Content-type (even with empty body) will fail. -func ParseForm(r *http.Request) error { - if r == nil { - return nil - } - if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { - return errdefs.InvalidParameter(err) - } - return nil -} - -// VersionFromContext returns an API version from the context using APIVersionKey. -// It panics if the context value does not have version.Version type. -func VersionFromContext(ctx context.Context) string { - if ctx == nil { - return "" - } - - if val := ctx.Value(APIVersionKey); val != nil { - return val.(string) - } - - return "" -} - -// matchesContentType validates the content type against the expected one -func matchesContentType(contentType, expectedType string) bool { - mimetype, _, err := mime.ParseMediaType(contentType) - if err != nil { - logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) - } - return err == nil && mimetype == expectedType -} diff --git a/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go b/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go deleted file mode 100644 index 148dd038b..000000000 --- a/vendor/github.com/docker/docker/api/server/httputils/httputils_write_json.go +++ /dev/null @@ -1,15 +0,0 @@ -package httputils // import "github.com/docker/docker/api/server/httputils" - -import ( - "encoding/json" - "net/http" -) - -// WriteJSON writes the value v to the http response stream as json with standard json encoding. -func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - enc := json.NewEncoder(w) - enc.SetEscapeHTML(false) - return enc.Encode(v) -} diff --git a/vendor/github.com/docker/docker/api/server/httputils/write_log_stream.go b/vendor/github.com/docker/docker/api/server/httputils/write_log_stream.go deleted file mode 100644 index 9e769c8b4..000000000 --- a/vendor/github.com/docker/docker/api/server/httputils/write_log_stream.go +++ /dev/null @@ -1,84 +0,0 @@ -package httputils // import "github.com/docker/docker/api/server/httputils" - -import ( - "context" - "fmt" - "io" - "net/url" - "sort" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/stdcopy" -) - -// WriteLogStream writes an encoded byte stream of log messages from the -// messages channel, multiplexing them with a stdcopy.Writer if mux is true -func WriteLogStream(_ context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) { - wf := ioutils.NewWriteFlusher(w) - defer wf.Close() - - wf.Flush() - - outStream := io.Writer(wf) - errStream := outStream - sysErrStream := errStream - if mux { - sysErrStream = stdcopy.NewStdWriter(outStream, stdcopy.Systemerr) - errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) - outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } - - for { - msg, ok := <-msgs - if !ok { - return - } - // check if the message contains an error. if so, write that error - // and exit - if msg.Err != nil { - fmt.Fprintf(sysErrStream, "Error grabbing logs: %v\n", msg.Err) - continue - } - logLine := msg.Line - if config.Details { - logLine = append(attrsByteSlice(msg.Attrs), ' ') - logLine = append(logLine, msg.Line...) - } - if config.Timestamps { - logLine = append([]byte(msg.Timestamp.Format(jsonmessage.RFC3339NanoFixed)+" "), logLine...) - } - if msg.Source == "stdout" && config.ShowStdout { - outStream.Write(logLine) - } - if msg.Source == "stderr" && config.ShowStderr { - errStream.Write(logLine) - } - } -} - -type byKey []backend.LogAttr - -func (b byKey) Len() int { return len(b) } -func (b byKey) Less(i, j int) bool { return b[i].Key < b[j].Key } -func (b byKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] } - -func attrsByteSlice(a []backend.LogAttr) []byte { - // Note this sorts "a" in-place. That is fine here - nothing else is - // going to use Attrs or care about the order. - sort.Sort(byKey(a)) - - var ret []byte - for i, pair := range a { - k, v := url.QueryEscape(pair.Key), url.QueryEscape(pair.Value) - ret = append(ret, []byte(k)...) - ret = append(ret, '=') - ret = append(ret, []byte(v)...) - if i != len(a)-1 { - ret = append(ret, ',') - } - } - return ret -} diff --git a/vendor/github.com/docker/docker/api/server/middleware.go b/vendor/github.com/docker/docker/api/server/middleware.go deleted file mode 100644 index 3c5683fad..000000000 --- a/vendor/github.com/docker/docker/api/server/middleware.go +++ /dev/null @@ -1,24 +0,0 @@ -package server // import "github.com/docker/docker/api/server" - -import ( - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/middleware" - "github.com/sirupsen/logrus" -) - -// handlerWithGlobalMiddlewares wraps the handler function for a request with -// the server's global middlewares. The order of the middlewares is backwards, -// meaning that the first in the list will be evaluated last. -func (s *Server) handlerWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { - next := handler - - for _, m := range s.middlewares { - next = m.WrapHandler(next) - } - - if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { - next = middleware.DebugRequestMiddleware(next) - } - - return next -} diff --git a/vendor/github.com/docker/docker/api/server/middleware/cors.go b/vendor/github.com/docker/docker/api/server/middleware/cors.go deleted file mode 100644 index 54374690e..000000000 --- a/vendor/github.com/docker/docker/api/server/middleware/cors.go +++ /dev/null @@ -1,37 +0,0 @@ -package middleware // import "github.com/docker/docker/api/server/middleware" - -import ( - "context" - "net/http" - - "github.com/sirupsen/logrus" -) - -// CORSMiddleware injects CORS headers to each request -// when it's configured. -type CORSMiddleware struct { - defaultHeaders string -} - -// NewCORSMiddleware creates a new CORSMiddleware with default headers. -func NewCORSMiddleware(d string) CORSMiddleware { - return CORSMiddleware{defaultHeaders: d} -} - -// WrapHandler returns a new handler function wrapping the previous one in the request chain. -func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" - // otherwise, all head values will be passed to HTTP handler - corsHeaders := c.defaultHeaders - if corsHeaders == "" { - corsHeaders = "*" - } - - logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) - w.Header().Add("Access-Control-Allow-Origin", corsHeaders) - w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") - w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") - return handler(ctx, w, r, vars) - } -} diff --git a/vendor/github.com/docker/docker/api/server/middleware/debug.go b/vendor/github.com/docker/docker/api/server/middleware/debug.go deleted file mode 100644 index 2cef1d46c..000000000 --- a/vendor/github.com/docker/docker/api/server/middleware/debug.go +++ /dev/null @@ -1,94 +0,0 @@ -package middleware // import "github.com/docker/docker/api/server/middleware" - -import ( - "bufio" - "context" - "encoding/json" - "io" - "net/http" - "strings" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/pkg/ioutils" - "github.com/sirupsen/logrus" -) - -// DebugRequestMiddleware dumps the request to logger -func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - logrus.Debugf("Calling %s %s", r.Method, r.RequestURI) - - if r.Method != "POST" { - return handler(ctx, w, r, vars) - } - if err := httputils.CheckForJSON(r); err != nil { - return handler(ctx, w, r, vars) - } - maxBodySize := 4096 // 4KB - if r.ContentLength > int64(maxBodySize) { - return handler(ctx, w, r, vars) - } - - body := r.Body - bufReader := bufio.NewReaderSize(body, maxBodySize) - r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) - - b, err := bufReader.Peek(maxBodySize) - if err != io.EOF { - // either there was an error reading, or the buffer is full (in which case the request is too large) - return handler(ctx, w, r, vars) - } - - var postForm map[string]interface{} - if err := json.Unmarshal(b, &postForm); err == nil { - maskSecretKeys(postForm, r.RequestURI) - formStr, errMarshal := json.Marshal(postForm) - if errMarshal == nil { - logrus.Debugf("form data: %s", string(formStr)) - } else { - logrus.Debugf("form data: %q", postForm) - } - } - - return handler(ctx, w, r, vars) - } -} - -func maskSecretKeys(inp interface{}, path string) { - // Remove any query string from the path - idx := strings.Index(path, "?") - if idx != -1 { - path = path[:idx] - } - // Remove trailing / characters - path = strings.TrimRight(path, "/") - - if arr, ok := inp.([]interface{}); ok { - for _, f := range arr { - maskSecretKeys(f, path) - } - return - } - - if form, ok := inp.(map[string]interface{}); ok { - loop0: - for k, v := range form { - for _, m := range []string{"password", "secret", "jointoken", "unlockkey", "signingcakey"} { - if strings.EqualFold(m, k) { - form[k] = "*****" - continue loop0 - } - } - maskSecretKeys(v, path) - } - - // Route-specific redactions - if strings.HasSuffix(path, "/secrets/create") { - for k := range form { - if k == "Data" { - form[k] = "*****" - } - } - } - } -} diff --git a/vendor/github.com/docker/docker/api/server/middleware/experimental.go b/vendor/github.com/docker/docker/api/server/middleware/experimental.go deleted file mode 100644 index 4df5decce..000000000 --- a/vendor/github.com/docker/docker/api/server/middleware/experimental.go +++ /dev/null @@ -1,28 +0,0 @@ -package middleware // import "github.com/docker/docker/api/server/middleware" - -import ( - "context" - "net/http" -) - -// ExperimentalMiddleware is a the middleware in charge of adding the -// 'Docker-Experimental' header to every outgoing request -type ExperimentalMiddleware struct { - experimental string -} - -// NewExperimentalMiddleware creates a new ExperimentalMiddleware -func NewExperimentalMiddleware(experimentalEnabled bool) ExperimentalMiddleware { - if experimentalEnabled { - return ExperimentalMiddleware{"true"} - } - return ExperimentalMiddleware{"false"} -} - -// WrapHandler returns a new handler function wrapping the previous one in the request chain. -func (e ExperimentalMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.Header().Set("Docker-Experimental", e.experimental) - return handler(ctx, w, r, vars) - } -} diff --git a/vendor/github.com/docker/docker/api/server/middleware/middleware.go b/vendor/github.com/docker/docker/api/server/middleware/middleware.go deleted file mode 100644 index 43483f1e4..000000000 --- a/vendor/github.com/docker/docker/api/server/middleware/middleware.go +++ /dev/null @@ -1,12 +0,0 @@ -package middleware // import "github.com/docker/docker/api/server/middleware" - -import ( - "context" - "net/http" -) - -// Middleware is an interface to allow the use of ordinary functions as Docker API filters. -// Any struct that has the appropriate signature can be registered as a middleware. -type Middleware interface { - WrapHandler(func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error -} diff --git a/vendor/github.com/docker/docker/api/server/middleware/version.go b/vendor/github.com/docker/docker/api/server/middleware/version.go deleted file mode 100644 index 88b11ca37..000000000 --- a/vendor/github.com/docker/docker/api/server/middleware/version.go +++ /dev/null @@ -1,65 +0,0 @@ -package middleware // import "github.com/docker/docker/api/server/middleware" - -import ( - "context" - "fmt" - "net/http" - "runtime" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types/versions" -) - -// VersionMiddleware is a middleware that -// validates the client and server versions. -type VersionMiddleware struct { - serverVersion string - defaultVersion string - minVersion string -} - -// NewVersionMiddleware creates a new VersionMiddleware -// with the default versions. -func NewVersionMiddleware(s, d, m string) VersionMiddleware { - return VersionMiddleware{ - serverVersion: s, - defaultVersion: d, - minVersion: m, - } -} - -type versionUnsupportedError struct { - version, minVersion, maxVersion string -} - -func (e versionUnsupportedError) Error() string { - if e.minVersion != "" { - return fmt.Sprintf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", e.version, e.minVersion) - } - return fmt.Sprintf("client version %s is too new. Maximum supported API version is %s", e.version, e.maxVersion) -} - -func (e versionUnsupportedError) InvalidParameter() {} - -// WrapHandler returns a new handler function wrapping the previous one in the request chain. -func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.Header().Set("Server", fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS)) - w.Header().Set("API-Version", v.defaultVersion) - w.Header().Set("OSType", runtime.GOOS) - - apiVersion := vars["version"] - if apiVersion == "" { - apiVersion = v.defaultVersion - } - if versions.LessThan(apiVersion, v.minVersion) { - return versionUnsupportedError{version: apiVersion, minVersion: v.minVersion} - } - if versions.GreaterThan(apiVersion, v.defaultVersion) { - return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultVersion} - } - ctx = context.WithValue(ctx, httputils.APIVersionKey, apiVersion) - return handler(ctx, w, r, vars) - } - -} diff --git a/vendor/github.com/docker/docker/api/server/router/build/backend.go b/vendor/github.com/docker/docker/api/server/router/build/backend.go deleted file mode 100644 index d82ef63af..000000000 --- a/vendor/github.com/docker/docker/api/server/router/build/backend.go +++ /dev/null @@ -1,22 +0,0 @@ -package build // import "github.com/docker/docker/api/server/router/build" - -import ( - "context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" -) - -// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. -type Backend interface { - // Build a Docker image returning the id of the image - // TODO: make this return a reference instead of string - Build(context.Context, backend.BuildConfig) (string, error) - - // Prune build cache - PruneCache(context.Context) (*types.BuildCachePruneReport, error) -} - -type experimentalProvider interface { - HasExperimental() bool -} diff --git a/vendor/github.com/docker/docker/api/server/router/build/build.go b/vendor/github.com/docker/docker/api/server/router/build/build.go deleted file mode 100644 index dc13a1060..000000000 --- a/vendor/github.com/docker/docker/api/server/router/build/build.go +++ /dev/null @@ -1,29 +0,0 @@ -package build // import "github.com/docker/docker/api/server/router/build" - -import "github.com/docker/docker/api/server/router" - -// buildRouter is a router to talk with the build controller -type buildRouter struct { - backend Backend - daemon experimentalProvider - routes []router.Route -} - -// NewRouter initializes a new build router -func NewRouter(b Backend, d experimentalProvider) router.Router { - r := &buildRouter{backend: b, daemon: d} - r.initRoutes() - return r -} - -// Routes returns the available routers to the build controller -func (r *buildRouter) Routes() []router.Route { - return r.routes -} - -func (r *buildRouter) initRoutes() { - r.routes = []router.Route{ - router.NewPostRoute("/build", r.postBuild, router.WithCancel), - router.NewPostRoute("/build/prune", r.postPrune, router.WithCancel), - } -} diff --git a/vendor/github.com/docker/docker/api/server/router/build/build_routes.go b/vendor/github.com/docker/docker/api/server/router/build/build_routes.go deleted file mode 100644 index 3e3668c42..000000000 --- a/vendor/github.com/docker/docker/api/server/router/build/build_routes.go +++ /dev/null @@ -1,269 +0,0 @@ -package build // import "github.com/docker/docker/api/server/router/build" - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "runtime" - "strconv" - "strings" - "sync" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/system" - "github.com/docker/go-units" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type invalidIsolationError string - -func (e invalidIsolationError) Error() string { - return fmt.Sprintf("Unsupported isolation: %q", string(e)) -} - -func (e invalidIsolationError) InvalidParameter() {} - -func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { - version := httputils.VersionFromContext(ctx) - options := &types.ImageBuildOptions{} - if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") { - options.Remove = true - } else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") { - options.Remove = true - } else { - options.Remove = httputils.BoolValue(r, "rm") - } - if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") { - options.PullParent = true - } - - options.Dockerfile = r.FormValue("dockerfile") - options.SuppressOutput = httputils.BoolValue(r, "q") - options.NoCache = httputils.BoolValue(r, "nocache") - options.ForceRemove = httputils.BoolValue(r, "forcerm") - options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") - options.Memory = httputils.Int64ValueOrZero(r, "memory") - options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") - options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") - options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") - options.CPUSetCPUs = r.FormValue("cpusetcpus") - options.CPUSetMems = r.FormValue("cpusetmems") - options.CgroupParent = r.FormValue("cgroupparent") - options.NetworkMode = r.FormValue("networkmode") - options.Tags = r.Form["t"] - options.ExtraHosts = r.Form["extrahosts"] - options.SecurityOpt = r.Form["securityopt"] - options.Squash = httputils.BoolValue(r, "squash") - options.Target = r.FormValue("target") - options.RemoteContext = r.FormValue("remote") - if versions.GreaterThanOrEqualTo(version, "1.32") { - apiPlatform := r.FormValue("platform") - p := system.ParsePlatform(apiPlatform) - if err := system.ValidatePlatform(p); err != nil { - return nil, errdefs.InvalidParameter(errors.Errorf("invalid platform: %s", err)) - } - options.Platform = p.OS - } - - if r.Form.Get("shmsize") != "" { - shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) - if err != nil { - return nil, err - } - options.ShmSize = shmSize - } - - if i := container.Isolation(r.FormValue("isolation")); i != "" { - if !container.Isolation.IsValid(i) { - return nil, invalidIsolationError(i) - } - options.Isolation = i - } - - if runtime.GOOS != "windows" && options.SecurityOpt != nil { - return nil, errdefs.InvalidParameter(errors.New("The daemon on this platform does not support setting security options on build")) - } - - var buildUlimits = []*units.Ulimit{} - ulimitsJSON := r.FormValue("ulimits") - if ulimitsJSON != "" { - if err := json.Unmarshal([]byte(ulimitsJSON), &buildUlimits); err != nil { - return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading ulimit settings") - } - options.Ulimits = buildUlimits - } - - // Note that there are two ways a --build-arg might appear in the - // json of the query param: - // "foo":"bar" - // and "foo":nil - // The first is the normal case, ie. --build-arg foo=bar - // or --build-arg foo - // where foo's value was picked up from an env var. - // The second ("foo":nil) is where they put --build-arg foo - // but "foo" isn't set as an env var. In that case we can't just drop - // the fact they mentioned it, we need to pass that along to the builder - // so that it can print a warning about "foo" being unused if there is - // no "ARG foo" in the Dockerfile. - buildArgsJSON := r.FormValue("buildargs") - if buildArgsJSON != "" { - var buildArgs = map[string]*string{} - if err := json.Unmarshal([]byte(buildArgsJSON), &buildArgs); err != nil { - return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading build args") - } - options.BuildArgs = buildArgs - } - - labelsJSON := r.FormValue("labels") - if labelsJSON != "" { - var labels = map[string]string{} - if err := json.Unmarshal([]byte(labelsJSON), &labels); err != nil { - return nil, errors.Wrap(errdefs.InvalidParameter(err), "error reading labels") - } - options.Labels = labels - } - - cacheFromJSON := r.FormValue("cachefrom") - if cacheFromJSON != "" { - var cacheFrom = []string{} - if err := json.Unmarshal([]byte(cacheFromJSON), &cacheFrom); err != nil { - return nil, err - } - options.CacheFrom = cacheFrom - } - options.SessionID = r.FormValue("session") - - return options, nil -} - -func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - report, err := br.backend.PruneCache(ctx) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, report) -} - -func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var ( - notVerboseBuffer = bytes.NewBuffer(nil) - version = httputils.VersionFromContext(ctx) - ) - - w.Header().Set("Content-Type", "application/json") - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - errf := func(err error) error { - if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { - output.Write(notVerboseBuffer.Bytes()) - } - // Do not write the error in the http output if it's still empty. - // This prevents from writing a 200(OK) when there is an internal error. - if !output.Flushed() { - return err - } - _, err = w.Write(streamformatter.FormatError(err)) - if err != nil { - logrus.Warnf("could not write error response: %v", err) - } - return nil - } - - buildOptions, err := newImageBuildOptions(ctx, r) - if err != nil { - return errf(err) - } - buildOptions.AuthConfigs = getAuthConfigs(r.Header) - - if buildOptions.Squash && !br.daemon.HasExperimental() { - return errdefs.InvalidParameter(errors.New("squash is only supported with experimental mode")) - } - - out := io.Writer(output) - if buildOptions.SuppressOutput { - out = notVerboseBuffer - } - - // Currently, only used if context is from a remote url. - // Look at code in DetectContextFromRemoteURL for more information. - createProgressReader := func(in io.ReadCloser) io.ReadCloser { - progressOutput := streamformatter.NewJSONProgressOutput(out, true) - return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", buildOptions.RemoteContext) - } - - wantAux := versions.GreaterThanOrEqualTo(version, "1.30") - - imgID, err := br.backend.Build(ctx, backend.BuildConfig{ - Source: r.Body, - Options: buildOptions, - ProgressWriter: buildProgressWriter(out, wantAux, createProgressReader), - }) - if err != nil { - return errf(err) - } - - // Everything worked so if -q was provided the output from the daemon - // should be just the image ID and we'll print that to stdout. - if buildOptions.SuppressOutput { - fmt.Fprintln(streamformatter.NewStdoutWriter(output), imgID) - } - return nil -} - -func getAuthConfigs(header http.Header) map[string]types.AuthConfig { - authConfigs := map[string]types.AuthConfig{} - authConfigsEncoded := header.Get("X-Registry-Config") - - if authConfigsEncoded == "" { - return authConfigs - } - - authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) - // Pulling an image does not error when no auth is provided so to remain - // consistent with the existing api decode errors are ignored - json.NewDecoder(authConfigsJSON).Decode(&authConfigs) - return authConfigs -} - -type syncWriter struct { - w io.Writer - mu sync.Mutex -} - -func (s *syncWriter) Write(b []byte) (count int, err error) { - s.mu.Lock() - count, err = s.w.Write(b) - s.mu.Unlock() - return -} - -func buildProgressWriter(out io.Writer, wantAux bool, createProgressReader func(io.ReadCloser) io.ReadCloser) backend.ProgressWriter { - out = &syncWriter{w: out} - - var aux *streamformatter.AuxFormatter - if wantAux { - aux = &streamformatter.AuxFormatter{Writer: out} - } - - return backend.ProgressWriter{ - Output: out, - StdoutFormatter: streamformatter.NewStdoutWriter(out), - StderrFormatter: streamformatter.NewStderrWriter(out), - AuxFormatter: aux, - ProgressReaderFunc: createProgressReader, - } -} diff --git a/vendor/github.com/docker/docker/api/server/router/checkpoint/backend.go b/vendor/github.com/docker/docker/api/server/router/checkpoint/backend.go deleted file mode 100644 index 90c5d1a98..000000000 --- a/vendor/github.com/docker/docker/api/server/router/checkpoint/backend.go +++ /dev/null @@ -1,10 +0,0 @@ -package checkpoint // import "github.com/docker/docker/api/server/router/checkpoint" - -import "github.com/docker/docker/api/types" - -// Backend for Checkpoint -type Backend interface { - CheckpointCreate(container string, config types.CheckpointCreateOptions) error - CheckpointDelete(container string, config types.CheckpointDeleteOptions) error - CheckpointList(container string, config types.CheckpointListOptions) ([]types.Checkpoint, error) -} diff --git a/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go b/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go deleted file mode 100644 index 37bd0bdad..000000000 --- a/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint.go +++ /dev/null @@ -1,36 +0,0 @@ -package checkpoint // import "github.com/docker/docker/api/server/router/checkpoint" - -import ( - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/router" -) - -// checkpointRouter is a router to talk with the checkpoint controller -type checkpointRouter struct { - backend Backend - decoder httputils.ContainerDecoder - routes []router.Route -} - -// NewRouter initializes a new checkpoint router -func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { - r := &checkpointRouter{ - backend: b, - decoder: decoder, - } - r.initRoutes() - return r -} - -// Routes returns the available routers to the checkpoint controller -func (r *checkpointRouter) Routes() []router.Route { - return r.routes -} - -func (r *checkpointRouter) initRoutes() { - r.routes = []router.Route{ - router.NewGetRoute("/containers/{name:.*}/checkpoints", r.getContainerCheckpoints, router.Experimental), - router.NewPostRoute("/containers/{name:.*}/checkpoints", r.postContainerCheckpoint, router.Experimental), - router.NewDeleteRoute("/containers/{name}/checkpoints/{checkpoint}", r.deleteContainerCheckpoint, router.Experimental), - } -} diff --git a/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint_routes.go b/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint_routes.go deleted file mode 100644 index 6c03f976e..000000000 --- a/vendor/github.com/docker/docker/api/server/router/checkpoint/checkpoint_routes.go +++ /dev/null @@ -1,65 +0,0 @@ -package checkpoint // import "github.com/docker/docker/api/server/router/checkpoint" - -import ( - "context" - "encoding/json" - "net/http" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types" -) - -func (s *checkpointRouter) postContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - var options types.CheckpointCreateOptions - - decoder := json.NewDecoder(r.Body) - if err := decoder.Decode(&options); err != nil { - return err - } - - err := s.backend.CheckpointCreate(vars["name"], options) - if err != nil { - return err - } - - w.WriteHeader(http.StatusCreated) - return nil -} - -func (s *checkpointRouter) getContainerCheckpoints(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - checkpoints, err := s.backend.CheckpointList(vars["name"], types.CheckpointListOptions{ - CheckpointDir: r.Form.Get("dir"), - }) - - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, checkpoints) -} - -func (s *checkpointRouter) deleteContainerCheckpoint(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - err := s.backend.CheckpointDelete(vars["name"], types.CheckpointDeleteOptions{ - CheckpointDir: r.Form.Get("dir"), - CheckpointID: vars["checkpoint"], - }) - - if err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - return nil -} diff --git a/vendor/github.com/docker/docker/api/server/router/container/backend.go b/vendor/github.com/docker/docker/api/server/router/container/backend.go deleted file mode 100644 index 75ea1d82b..000000000 --- a/vendor/github.com/docker/docker/api/server/router/container/backend.go +++ /dev/null @@ -1,83 +0,0 @@ -package container // import "github.com/docker/docker/api/server/router/container" - -import ( - "context" - "io" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - containerpkg "github.com/docker/docker/container" - "github.com/docker/docker/pkg/archive" -) - -// execBackend includes functions to implement to provide exec functionality. -type execBackend interface { - ContainerExecCreate(name string, config *types.ExecConfig) (string, error) - ContainerExecInspect(id string) (*backend.ExecInspect, error) - ContainerExecResize(name string, height, width int) error - ContainerExecStart(ctx context.Context, name string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error - ExecExists(name string) (bool, error) -} - -// copyBackend includes functions to implement to provide container copy functionality. -type copyBackend interface { - ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) - ContainerCopy(name string, res string) (io.ReadCloser, error) - ContainerExport(name string, out io.Writer) error - ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error - ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) -} - -// stateBackend includes functions to implement to provide container state lifecycle functionality. -type stateBackend interface { - ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) - ContainerKill(name string, sig uint64) error - ContainerPause(name string) error - ContainerRename(oldName, newName string) error - ContainerResize(name string, height, width int) error - ContainerRestart(name string, seconds *int) error - ContainerRm(name string, config *types.ContainerRmConfig) error - ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error - ContainerStop(name string, seconds *int) error - ContainerUnpause(name string) error - ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) - ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) -} - -// monitorBackend includes functions to implement to provide containers monitoring functionality. -type monitorBackend interface { - ContainerChanges(name string) ([]archive.Change, error) - ContainerInspect(name string, size bool, version string) (interface{}, error) - ContainerLogs(ctx context.Context, name string, config *types.ContainerLogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error) - ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error - ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error) - - Containers(config *types.ContainerListOptions) ([]*types.Container, error) -} - -// attachBackend includes function to implement to provide container attaching functionality. -type attachBackend interface { - ContainerAttach(name string, c *backend.ContainerAttachConfig) error -} - -// systemBackend includes functions to implement to provide system wide containers functionality -type systemBackend interface { - ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error) -} - -type commitBackend interface { - CreateImageFromContainer(name string, config *backend.CreateImageConfig) (imageID string, err error) -} - -// Backend is all the methods that need to be implemented to provide container specific functionality. -type Backend interface { - commitBackend - execBackend - copyBackend - stateBackend - monitorBackend - attachBackend - systemBackend -} diff --git a/vendor/github.com/docker/docker/api/server/router/container/container.go b/vendor/github.com/docker/docker/api/server/router/container/container.go deleted file mode 100644 index 358f2bc2c..000000000 --- a/vendor/github.com/docker/docker/api/server/router/container/container.go +++ /dev/null @@ -1,70 +0,0 @@ -package container // import "github.com/docker/docker/api/server/router/container" - -import ( - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/router" -) - -// containerRouter is a router to talk with the container controller -type containerRouter struct { - backend Backend - decoder httputils.ContainerDecoder - routes []router.Route -} - -// NewRouter initializes a new container router -func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { - r := &containerRouter{ - backend: b, - decoder: decoder, - } - r.initRoutes() - return r -} - -// Routes returns the available routes to the container controller -func (r *containerRouter) Routes() []router.Route { - return r.routes -} - -// initRoutes initializes the routes in container router -func (r *containerRouter) initRoutes() { - r.routes = []router.Route{ - // HEAD - router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), - // GET - router.NewGetRoute("/containers/json", r.getContainersJSON), - router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), - router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), - router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), - router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), - router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs, router.WithCancel), - router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats, router.WithCancel), - router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), - router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), - router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), - // POST - router.NewPostRoute("/containers/create", r.postContainersCreate), - router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), - router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), - router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), - router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), - router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), - router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), - router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait, router.WithCancel), - router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), - router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), - router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12 - router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), - router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), - router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), - router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), - router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), - router.NewPostRoute("/containers/prune", r.postContainersPrune, router.WithCancel), - router.NewPostRoute("/commit", r.postCommit), - // PUT - router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), - // DELETE - router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), - } -} diff --git a/vendor/github.com/docker/docker/api/server/router/container/container_routes.go b/vendor/github.com/docker/docker/api/server/router/container/container_routes.go deleted file mode 100644 index 9282cea09..000000000 --- a/vendor/github.com/docker/docker/api/server/router/container/container_routes.go +++ /dev/null @@ -1,661 +0,0 @@ -package container // import "github.com/docker/docker/api/server/router/container" - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "strconv" - "syscall" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/versions" - containerpkg "github.com/docker/docker/container" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/signal" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/net/websocket" -) - -func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - // TODO: remove pause arg, and always pause in backend - pause := httputils.BoolValue(r, "pause") - version := httputils.VersionFromContext(ctx) - if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") { - pause = true - } - - config, _, _, err := s.decoder.DecodeConfig(r.Body) - if err != nil && err != io.EOF { //Do not fail if body is empty. - return err - } - - commitCfg := &backend.CreateImageConfig{ - Pause: pause, - Repo: r.Form.Get("repo"), - Tag: r.Form.Get("tag"), - Author: r.Form.Get("author"), - Comment: r.Form.Get("comment"), - Config: config, - Changes: r.Form["changes"], - } - - imgID, err := s.backend.CreateImageFromContainer(r.Form.Get("container"), commitCfg) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{ID: imgID}) -} - -func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - filter, err := filters.FromJSON(r.Form.Get("filters")) - if err != nil { - return err - } - - config := &types.ContainerListOptions{ - All: httputils.BoolValue(r, "all"), - Size: httputils.BoolValue(r, "size"), - Since: r.Form.Get("since"), - Before: r.Form.Get("before"), - Filters: filter, - } - - if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { - limit, err := strconv.Atoi(tmpLimit) - if err != nil { - return err - } - config.Limit = limit - } - - containers, err := s.backend.Containers(config) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, containers) -} - -func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - stream := httputils.BoolValueOrDefault(r, "stream", true) - if !stream { - w.Header().Set("Content-Type", "application/json") - } - - config := &backend.ContainerStatsConfig{ - Stream: stream, - OutStream: w, - Version: httputils.VersionFromContext(ctx), - } - - return s.backend.ContainerStats(ctx, vars["name"], config) -} - -func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - // Args are validated before the stream starts because when it starts we're - // sending HTTP 200 by writing an empty chunk of data to tell the client that - // daemon is going to stream. By sending this initial HTTP 200 we can't report - // any error after the stream starts (i.e. container not found, wrong parameters) - // with the appropriate status code. - stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") - if !(stdout || stderr) { - return errdefs.InvalidParameter(errors.New("Bad parameters: you must choose at least one stream")) - } - - containerName := vars["name"] - logsConfig := &types.ContainerLogsOptions{ - Follow: httputils.BoolValue(r, "follow"), - Timestamps: httputils.BoolValue(r, "timestamps"), - Since: r.Form.Get("since"), - Until: r.Form.Get("until"), - Tail: r.Form.Get("tail"), - ShowStdout: stdout, - ShowStderr: stderr, - Details: httputils.BoolValue(r, "details"), - } - - msgs, tty, err := s.backend.ContainerLogs(ctx, containerName, logsConfig) - if err != nil { - return err - } - - // if has a tty, we're not muxing streams. if it doesn't, we are. simple. - // this is the point of no return for writing a response. once we call - // WriteLogStream, the response has been started and errors will be - // returned in band by WriteLogStream - httputils.WriteLogStream(ctx, w, msgs, logsConfig, !tty) - return nil -} - -func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return s.backend.ContainerExport(vars["name"], w) -} - -type bodyOnStartError struct{} - -func (bodyOnStartError) Error() string { - return "starting container with non-empty request body was deprecated since API v1.22 and removed in v1.24" -} - -func (bodyOnStartError) InvalidParameter() {} - -func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - // If contentLength is -1, we can assumed chunked encoding - // or more technically that the length is unknown - // https://golang.org/src/pkg/net/http/request.go#L139 - // net/http otherwise seems to swallow any headers related to chunked encoding - // including r.TransferEncoding - // allow a nil body for backwards compatibility - - version := httputils.VersionFromContext(ctx) - var hostConfig *container.HostConfig - // A non-nil json object is at least 7 characters. - if r.ContentLength > 7 || r.ContentLength == -1 { - if versions.GreaterThanOrEqualTo(version, "1.24") { - return bodyOnStartError{} - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - c, err := s.decoder.DecodeHostConfig(r.Body) - if err != nil { - return err - } - hostConfig = c - } - - if err := httputils.ParseForm(r); err != nil { - return err - } - - checkpoint := r.Form.Get("checkpoint") - checkpointDir := r.Form.Get("checkpoint-dir") - if err := s.backend.ContainerStart(vars["name"], hostConfig, checkpoint, checkpointDir); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - var seconds *int - if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" { - valSeconds, err := strconv.Atoi(tmpSeconds) - if err != nil { - return err - } - seconds = &valSeconds - } - - if err := s.backend.ContainerStop(vars["name"], seconds); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - var sig syscall.Signal - name := vars["name"] - - // If we have a signal, look at it. Otherwise, do nothing - if sigStr := r.Form.Get("signal"); sigStr != "" { - var err error - if sig, err = signal.ParseSignal(sigStr); err != nil { - return errdefs.InvalidParameter(err) - } - } - - if err := s.backend.ContainerKill(name, uint64(sig)); err != nil { - var isStopped bool - if errdefs.IsConflict(err) { - isStopped = true - } - - // Return error that's not caused because the container is stopped. - // Return error if the container is not running and the api is >= 1.20 - // to keep backwards compatibility. - version := httputils.VersionFromContext(ctx) - if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped { - return errors.Wrapf(err, "Cannot kill container: %s", name) - } - } - - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - var seconds *int - if tmpSeconds := r.Form.Get("t"); tmpSeconds != "" { - valSeconds, err := strconv.Atoi(tmpSeconds) - if err != nil { - return err - } - seconds = &valSeconds - } - - if err := s.backend.ContainerRestart(vars["name"], seconds); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := s.backend.ContainerPause(vars["name"]); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := s.backend.ContainerUnpause(vars["name"]); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - // Behavior changed in version 1.30 to handle wait condition and to - // return headers immediately. - version := httputils.VersionFromContext(ctx) - legacyBehaviorPre130 := versions.LessThan(version, "1.30") - legacyRemovalWaitPre134 := false - - // The wait condition defaults to "not-running". - waitCondition := containerpkg.WaitConditionNotRunning - if !legacyBehaviorPre130 { - if err := httputils.ParseForm(r); err != nil { - return err - } - switch container.WaitCondition(r.Form.Get("condition")) { - case container.WaitConditionNextExit: - waitCondition = containerpkg.WaitConditionNextExit - case container.WaitConditionRemoved: - waitCondition = containerpkg.WaitConditionRemoved - legacyRemovalWaitPre134 = versions.LessThan(version, "1.34") - } - } - - // Note: the context should get canceled if the client closes the - // connection since this handler has been wrapped by the - // router.WithCancel() wrapper. - waitC, err := s.backend.ContainerWait(ctx, vars["name"], waitCondition) - if err != nil { - return err - } - - w.Header().Set("Content-Type", "application/json") - - if !legacyBehaviorPre130 { - // Write response header immediately. - w.WriteHeader(http.StatusOK) - if flusher, ok := w.(http.Flusher); ok { - flusher.Flush() - } - } - - // Block on the result of the wait operation. - status := <-waitC - - // With API < 1.34, wait on WaitConditionRemoved did not return - // in case container removal failed. The only way to report an - // error back to the client is to not write anything (i.e. send - // an empty response which will be treated as an error). - if legacyRemovalWaitPre134 && status.Err() != nil { - return nil - } - - var waitError *container.ContainerWaitOKBodyError - if status.Err() != nil { - waitError = &container.ContainerWaitOKBodyError{Message: status.Err().Error()} - } - - return json.NewEncoder(w).Encode(&container.ContainerWaitOKBody{ - StatusCode: int64(status.ExitCode()), - Error: waitError, - }) -} - -func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - changes, err := s.backend.ContainerChanges(vars["name"]) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, changes) -} - -func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args")) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, procList) -} - -func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - name := vars["name"] - newName := r.Form.Get("name") - if err := s.backend.ContainerRename(name, newName); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - var updateConfig container.UpdateConfig - - decoder := json.NewDecoder(r.Body) - if err := decoder.Decode(&updateConfig); err != nil { - return err - } - - hostConfig := &container.HostConfig{ - Resources: updateConfig.Resources, - RestartPolicy: updateConfig.RestartPolicy, - } - - name := vars["name"] - resp, err := s.backend.ContainerUpdate(name, hostConfig) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, resp) -} - -func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - name := r.Form.Get("name") - - config, hostConfig, networkingConfig, err := s.decoder.DecodeConfig(r.Body) - if err != nil { - return err - } - version := httputils.VersionFromContext(ctx) - adjustCPUShares := versions.LessThan(version, "1.19") - - // When using API 1.24 and under, the client is responsible for removing the container - if hostConfig != nil && versions.LessThan(version, "1.25") { - hostConfig.AutoRemove = false - } - - ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ - Name: name, - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - AdjustCPUShares: adjustCPUShares, - }) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, ccr) -} - -func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - name := vars["name"] - config := &types.ContainerRmConfig{ - ForceRemove: httputils.BoolValue(r, "force"), - RemoveVolume: httputils.BoolValue(r, "v"), - RemoveLink: httputils.BoolValue(r, "link"), - } - - if err := s.backend.ContainerRm(name, config); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - height, err := strconv.Atoi(r.Form.Get("h")) - if err != nil { - return errdefs.InvalidParameter(err) - } - width, err := strconv.Atoi(r.Form.Get("w")) - if err != nil { - return errdefs.InvalidParameter(err) - } - - return s.backend.ContainerResize(vars["name"], height, width) -} - -func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - err := httputils.ParseForm(r) - if err != nil { - return err - } - containerName := vars["name"] - - _, upgrade := r.Header["Upgrade"] - detachKeys := r.FormValue("detachKeys") - - hijacker, ok := w.(http.Hijacker) - if !ok { - return errdefs.InvalidParameter(errors.Errorf("error attaching to container %s, hijack connection missing", containerName)) - } - - setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { - conn, _, err := hijacker.Hijack() - if err != nil { - return nil, nil, nil, err - } - - // set raw mode - conn.Write([]byte{}) - - if upgrade { - fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") - } else { - fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") - } - - closer := func() error { - httputils.CloseStreams(conn) - return nil - } - return ioutils.NewReadCloserWrapper(conn, closer), conn, conn, nil - } - - attachConfig := &backend.ContainerAttachConfig{ - GetStreams: setupStreams, - UseStdin: httputils.BoolValue(r, "stdin"), - UseStdout: httputils.BoolValue(r, "stdout"), - UseStderr: httputils.BoolValue(r, "stderr"), - Logs: httputils.BoolValue(r, "logs"), - Stream: httputils.BoolValue(r, "stream"), - DetachKeys: detachKeys, - MuxStreams: true, - } - - if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil { - logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) - // Remember to close stream if error happens - conn, _, errHijack := hijacker.Hijack() - if errHijack == nil { - statusCode := httputils.GetHTTPErrorStatusCode(err) - statusText := http.StatusText(statusCode) - fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n%s\r\n", statusCode, statusText, err.Error()) - httputils.CloseStreams(conn) - } else { - logrus.Errorf("Error Hijacking: %v", err) - } - } - return nil -} - -func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - containerName := vars["name"] - - var err error - detachKeys := r.FormValue("detachKeys") - - done := make(chan struct{}) - started := make(chan struct{}) - - version := httputils.VersionFromContext(ctx) - - setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { - wsChan := make(chan *websocket.Conn) - h := func(conn *websocket.Conn) { - wsChan <- conn - <-done - } - - srv := websocket.Server{Handler: h, Handshake: nil} - go func() { - close(started) - srv.ServeHTTP(w, r) - }() - - conn := <-wsChan - // In case version 1.28 and above, a binary frame will be sent. - // See 28176 for details. - if versions.GreaterThanOrEqualTo(version, "1.28") { - conn.PayloadType = websocket.BinaryFrame - } - return conn, conn, conn, nil - } - - attachConfig := &backend.ContainerAttachConfig{ - GetStreams: setupStreams, - Logs: httputils.BoolValue(r, "logs"), - Stream: httputils.BoolValue(r, "stream"), - DetachKeys: detachKeys, - UseStdin: true, - UseStdout: true, - UseStderr: true, - MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr - } - - err = s.backend.ContainerAttach(containerName, attachConfig) - close(done) - select { - case <-started: - if err != nil { - logrus.Errorf("Error attaching websocket: %s", err) - } else { - logrus.Debug("websocket connection was closed by client") - } - return nil - default: - } - return err -} - -func (s *containerRouter) postContainersPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - pruneFilters, err := filters.FromJSON(r.Form.Get("filters")) - if err != nil { - return errdefs.InvalidParameter(err) - } - - pruneReport, err := s.backend.ContainersPrune(ctx, pruneFilters) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, pruneReport) -} diff --git a/vendor/github.com/docker/docker/api/server/router/container/copy.go b/vendor/github.com/docker/docker/api/server/router/container/copy.go deleted file mode 100644 index 837836d00..000000000 --- a/vendor/github.com/docker/docker/api/server/router/container/copy.go +++ /dev/null @@ -1,140 +0,0 @@ -package container // import "github.com/docker/docker/api/server/router/container" - -import ( - "compress/flate" - "compress/gzip" - "context" - "encoding/base64" - "encoding/json" - "io" - "net/http" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - gddohttputil "github.com/golang/gddo/httputil" -) - -type pathError struct{} - -func (pathError) Error() string { - return "Path cannot be empty" -} - -func (pathError) InvalidParameter() {} - -// postContainersCopy is deprecated in favor of getContainersArchive. -func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - // Deprecated since 1.8, Errors out since 1.12 - version := httputils.VersionFromContext(ctx) - if versions.GreaterThanOrEqualTo(version, "1.24") { - w.WriteHeader(http.StatusNotFound) - return nil - } - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - cfg := types.CopyConfig{} - if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { - return err - } - - if cfg.Resource == "" { - return pathError{} - } - - data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource) - if err != nil { - return err - } - defer data.Close() - - w.Header().Set("Content-Type", "application/x-tar") - _, err = io.Copy(w, data) - return err -} - -// // Encode the stat to JSON, base64 encode, and place in a header. -func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { - statJSON, err := json.Marshal(stat) - if err != nil { - return err - } - - header.Set( - "X-Docker-Container-Path-Stat", - base64.StdEncoding.EncodeToString(statJSON), - ) - - return nil -} - -func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v, err := httputils.ArchiveFormValues(r, vars) - if err != nil { - return err - } - - stat, err := s.backend.ContainerStatPath(v.Name, v.Path) - if err != nil { - return err - } - - return setContainerPathStatHeader(stat, w.Header()) -} - -func writeCompressedResponse(w http.ResponseWriter, r *http.Request, body io.Reader) error { - var cw io.Writer - switch gddohttputil.NegotiateContentEncoding(r, []string{"gzip", "deflate"}) { - case "gzip": - gw := gzip.NewWriter(w) - defer gw.Close() - cw = gw - w.Header().Set("Content-Encoding", "gzip") - case "deflate": - fw, err := flate.NewWriter(w, flate.DefaultCompression) - if err != nil { - return err - } - defer fw.Close() - cw = fw - w.Header().Set("Content-Encoding", "deflate") - default: - cw = w - } - _, err := io.Copy(cw, body) - return err -} - -func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v, err := httputils.ArchiveFormValues(r, vars) - if err != nil { - return err - } - - tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path) - if err != nil { - return err - } - defer tarArchive.Close() - - if err := setContainerPathStatHeader(stat, w.Header()); err != nil { - return err - } - - w.Header().Set("Content-Type", "application/x-tar") - return writeCompressedResponse(w, r, tarArchive) -} - -func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v, err := httputils.ArchiveFormValues(r, vars) - if err != nil { - return err - } - - noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") - copyUIDGID := httputils.BoolValue(r, "copyUIDGID") - - return s.backend.ContainerExtractToDir(v.Name, v.Path, copyUIDGID, noOverwriteDirNonDir, r.Body) -} diff --git a/vendor/github.com/docker/docker/api/server/router/container/exec.go b/vendor/github.com/docker/docker/api/server/router/container/exec.go deleted file mode 100644 index 25125edb5..000000000 --- a/vendor/github.com/docker/docker/api/server/router/container/exec.go +++ /dev/null @@ -1,149 +0,0 @@ -package container // import "github.com/docker/docker/api/server/router/container" - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "strconv" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/stdcopy" - "github.com/sirupsen/logrus" -) - -func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - eConfig, err := s.backend.ContainerExecInspect(vars["id"]) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, eConfig) -} - -type execCommandError struct{} - -func (execCommandError) Error() string { - return "No exec command specified" -} - -func (execCommandError) InvalidParameter() {} - -func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := httputils.CheckForJSON(r); err != nil { - return err - } - name := vars["name"] - - execConfig := &types.ExecConfig{} - if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { - return err - } - - if len(execConfig.Cmd) == 0 { - return execCommandError{} - } - - // Register an instance of Exec in container. - id, err := s.backend.ContainerExecCreate(name, execConfig) - if err != nil { - logrus.Errorf("Error setting up exec command in container %s: %v", name, err) - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, &types.IDResponse{ - ID: id, - }) -} - -// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. -func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - version := httputils.VersionFromContext(ctx) - if versions.GreaterThan(version, "1.21") { - if err := httputils.CheckForJSON(r); err != nil { - return err - } - } - - var ( - execName = vars["name"] - stdin, inStream io.ReadCloser - stdout, stderr, outStream io.Writer - ) - - execStartCheck := &types.ExecStartCheck{} - if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { - return err - } - - if exists, err := s.backend.ExecExists(execName); !exists { - return err - } - - if !execStartCheck.Detach { - var err error - // Setting up the streaming http interface. - inStream, outStream, err = httputils.HijackConnection(w) - if err != nil { - return err - } - defer httputils.CloseStreams(inStream, outStream) - - if _, ok := r.Header["Upgrade"]; ok { - fmt.Fprint(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n") - } else { - fmt.Fprint(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n") - } - - // copy headers that were removed as part of hijack - if err := w.Header().WriteSubset(outStream, nil); err != nil { - return err - } - fmt.Fprint(outStream, "\r\n") - - stdin = inStream - stdout = outStream - if !execStartCheck.Tty { - stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) - stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } - } - - // Now run the user process in container. - // Maybe we should we pass ctx here if we're not detaching? - if err := s.backend.ContainerExecStart(context.Background(), execName, stdin, stdout, stderr); err != nil { - if execStartCheck.Detach { - return err - } - stdout.Write([]byte(err.Error() + "\r\n")) - logrus.Errorf("Error running exec %s in container: %v", execName, err) - } - return nil -} - -func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - height, err := strconv.Atoi(r.Form.Get("h")) - if err != nil { - return errdefs.InvalidParameter(err) - } - width, err := strconv.Atoi(r.Form.Get("w")) - if err != nil { - return errdefs.InvalidParameter(err) - } - - return s.backend.ContainerExecResize(vars["name"], height, width) -} diff --git a/vendor/github.com/docker/docker/api/server/router/container/inspect.go b/vendor/github.com/docker/docker/api/server/router/container/inspect.go deleted file mode 100644 index 5c78d15bc..000000000 --- a/vendor/github.com/docker/docker/api/server/router/container/inspect.go +++ /dev/null @@ -1,21 +0,0 @@ -package container // import "github.com/docker/docker/api/server/router/container" - -import ( - "context" - "net/http" - - "github.com/docker/docker/api/server/httputils" -) - -// getContainersByName inspects container's configuration and serializes it as json. -func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - displaySize := httputils.BoolValue(r, "size") - - version := httputils.VersionFromContext(ctx) - json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, json) -} diff --git a/vendor/github.com/docker/docker/api/server/router/debug/debug.go b/vendor/github.com/docker/docker/api/server/router/debug/debug.go deleted file mode 100644 index ad05b68eb..000000000 --- a/vendor/github.com/docker/docker/api/server/router/debug/debug.go +++ /dev/null @@ -1,53 +0,0 @@ -package debug // import "github.com/docker/docker/api/server/router/debug" - -import ( - "context" - "expvar" - "net/http" - "net/http/pprof" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/router" -) - -// NewRouter creates a new debug router -// The debug router holds endpoints for debug the daemon, such as those for pprof. -func NewRouter() router.Router { - r := &debugRouter{} - r.initRoutes() - return r -} - -type debugRouter struct { - routes []router.Route -} - -func (r *debugRouter) initRoutes() { - r.routes = []router.Route{ - router.NewGetRoute("/vars", frameworkAdaptHandler(expvar.Handler())), - router.NewGetRoute("/pprof/", frameworkAdaptHandlerFunc(pprof.Index)), - router.NewGetRoute("/pprof/cmdline", frameworkAdaptHandlerFunc(pprof.Cmdline)), - router.NewGetRoute("/pprof/profile", frameworkAdaptHandlerFunc(pprof.Profile)), - router.NewGetRoute("/pprof/symbol", frameworkAdaptHandlerFunc(pprof.Symbol)), - router.NewGetRoute("/pprof/trace", frameworkAdaptHandlerFunc(pprof.Trace)), - router.NewGetRoute("/pprof/{name}", handlePprof), - } -} - -func (r *debugRouter) Routes() []router.Route { - return r.routes -} - -func frameworkAdaptHandler(handler http.Handler) httputils.APIFunc { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - handler.ServeHTTP(w, r) - return nil - } -} - -func frameworkAdaptHandlerFunc(handler http.HandlerFunc) httputils.APIFunc { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - handler(w, r) - return nil - } -} diff --git a/vendor/github.com/docker/docker/api/server/router/debug/debug_routes.go b/vendor/github.com/docker/docker/api/server/router/debug/debug_routes.go deleted file mode 100644 index 125bed72b..000000000 --- a/vendor/github.com/docker/docker/api/server/router/debug/debug_routes.go +++ /dev/null @@ -1,12 +0,0 @@ -package debug // import "github.com/docker/docker/api/server/router/debug" - -import ( - "context" - "net/http" - "net/http/pprof" -) - -func handlePprof(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - pprof.Handler(vars["name"]).ServeHTTP(w, r) - return nil -} diff --git a/vendor/github.com/docker/docker/api/server/router/distribution/backend.go b/vendor/github.com/docker/docker/api/server/router/distribution/backend.go deleted file mode 100644 index 5b881f036..000000000 --- a/vendor/github.com/docker/docker/api/server/router/distribution/backend.go +++ /dev/null @@ -1,15 +0,0 @@ -package distribution // import "github.com/docker/docker/api/server/router/distribution" - -import ( - "context" - - "github.com/docker/distribution" - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// Backend is all the methods that need to be implemented -// to provide image specific functionality. -type Backend interface { - GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error) -} diff --git a/vendor/github.com/docker/docker/api/server/router/distribution/distribution.go b/vendor/github.com/docker/docker/api/server/router/distribution/distribution.go deleted file mode 100644 index 1e9e5ff83..000000000 --- a/vendor/github.com/docker/docker/api/server/router/distribution/distribution.go +++ /dev/null @@ -1,31 +0,0 @@ -package distribution // import "github.com/docker/docker/api/server/router/distribution" - -import "github.com/docker/docker/api/server/router" - -// distributionRouter is a router to talk with the registry -type distributionRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new distribution router -func NewRouter(backend Backend) router.Router { - r := &distributionRouter{ - backend: backend, - } - r.initRoutes() - return r -} - -// Routes returns the available routes -func (r *distributionRouter) Routes() []router.Route { - return r.routes -} - -// initRoutes initializes the routes in the distribution router -func (r *distributionRouter) initRoutes() { - r.routes = []router.Route{ - // GET - router.NewGetRoute("/distribution/{name:.*}/json", r.getDistributionInfo), - } -} diff --git a/vendor/github.com/docker/docker/api/server/router/distribution/distribution_routes.go b/vendor/github.com/docker/docker/api/server/router/distribution/distribution_routes.go deleted file mode 100644 index 531dba69f..000000000 --- a/vendor/github.com/docker/docker/api/server/router/distribution/distribution_routes.go +++ /dev/null @@ -1,138 +0,0 @@ -package distribution // import "github.com/docker/docker/api/server/router/distribution" - -import ( - "context" - "encoding/base64" - "encoding/json" - "net/http" - "strings" - - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func (s *distributionRouter) getDistributionInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - w.Header().Set("Content-Type", "application/json") - - var ( - config = &types.AuthConfig{} - authEncoded = r.Header.Get("X-Registry-Auth") - distributionInspect registrytypes.DistributionInspect - ) - - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(&config); err != nil { - // for a search it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - config = &types.AuthConfig{} - } - } - - image := vars["name"] - - ref, err := reference.ParseAnyReference(image) - if err != nil { - return err - } - namedRef, ok := ref.(reference.Named) - if !ok { - if _, ok := ref.(reference.Digested); ok { - // full image ID - return errors.Errorf("no manifest found for full image ID") - } - return errors.Errorf("unknown image reference format: %s", image) - } - - distrepo, _, err := s.backend.GetRepository(ctx, namedRef, config) - if err != nil { - return err - } - blobsrvc := distrepo.Blobs(ctx) - - if canonicalRef, ok := namedRef.(reference.Canonical); !ok { - namedRef = reference.TagNameOnly(namedRef) - - taggedRef, ok := namedRef.(reference.NamedTagged) - if !ok { - return errors.Errorf("image reference not tagged: %s", image) - } - - descriptor, err := distrepo.Tags(ctx).Get(ctx, taggedRef.Tag()) - if err != nil { - return err - } - distributionInspect.Descriptor = v1.Descriptor{ - MediaType: descriptor.MediaType, - Digest: descriptor.Digest, - Size: descriptor.Size, - } - } else { - // TODO(nishanttotla): Once manifests can be looked up as a blob, the - // descriptor should be set using blobsrvc.Stat(ctx, canonicalRef.Digest()) - // instead of having to manually fill in the fields - distributionInspect.Descriptor.Digest = canonicalRef.Digest() - } - - // we have a digest, so we can retrieve the manifest - mnfstsrvc, err := distrepo.Manifests(ctx) - if err != nil { - return err - } - mnfst, err := mnfstsrvc.Get(ctx, distributionInspect.Descriptor.Digest) - if err != nil { - return err - } - - mediaType, payload, err := mnfst.Payload() - if err != nil { - return err - } - // update MediaType because registry might return something incorrect - distributionInspect.Descriptor.MediaType = mediaType - if distributionInspect.Descriptor.Size == 0 { - distributionInspect.Descriptor.Size = int64(len(payload)) - } - - // retrieve platform information depending on the type of manifest - switch mnfstObj := mnfst.(type) { - case *manifestlist.DeserializedManifestList: - for _, m := range mnfstObj.Manifests { - distributionInspect.Platforms = append(distributionInspect.Platforms, v1.Platform{ - Architecture: m.Platform.Architecture, - OS: m.Platform.OS, - OSVersion: m.Platform.OSVersion, - OSFeatures: m.Platform.OSFeatures, - Variant: m.Platform.Variant, - }) - } - case *schema2.DeserializedManifest: - configJSON, err := blobsrvc.Get(ctx, mnfstObj.Config.Digest) - var platform v1.Platform - if err == nil { - err := json.Unmarshal(configJSON, &platform) - if err == nil && (platform.OS != "" || platform.Architecture != "") { - distributionInspect.Platforms = append(distributionInspect.Platforms, platform) - } - } - case *schema1.SignedManifest: - platform := v1.Platform{ - Architecture: mnfstObj.Architecture, - OS: "linux", - } - distributionInspect.Platforms = append(distributionInspect.Platforms, platform) - } - - return httputils.WriteJSON(w, http.StatusOK, distributionInspect) -} diff --git a/vendor/github.com/docker/docker/api/server/router/experimental.go b/vendor/github.com/docker/docker/api/server/router/experimental.go deleted file mode 100644 index c42e53a3d..000000000 --- a/vendor/github.com/docker/docker/api/server/router/experimental.go +++ /dev/null @@ -1,68 +0,0 @@ -package router // import "github.com/docker/docker/api/server/router" - -import ( - "context" - "net/http" - - "github.com/docker/docker/api/server/httputils" -) - -// ExperimentalRoute defines an experimental API route that can be enabled or disabled. -type ExperimentalRoute interface { - Route - - Enable() - Disable() -} - -// experimentalRoute defines an experimental API route that can be enabled or disabled. -// It implements ExperimentalRoute -type experimentalRoute struct { - local Route - handler httputils.APIFunc -} - -// Enable enables this experimental route -func (r *experimentalRoute) Enable() { - r.handler = r.local.Handler() -} - -// Disable disables the experimental route -func (r *experimentalRoute) Disable() { - r.handler = experimentalHandler -} - -type notImplementedError struct{} - -func (notImplementedError) Error() string { - return "This experimental feature is disabled by default. Start the Docker daemon in experimental mode in order to enable it." -} - -func (notImplementedError) NotImplemented() {} - -func experimentalHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return notImplementedError{} -} - -// Handler returns returns the APIFunc to let the server wrap it in middlewares. -func (r *experimentalRoute) Handler() httputils.APIFunc { - return r.handler -} - -// Method returns the http method that the route responds to. -func (r *experimentalRoute) Method() string { - return r.local.Method() -} - -// Path returns the subpath where the route responds to. -func (r *experimentalRoute) Path() string { - return r.local.Path() -} - -// Experimental will mark a route as experimental. -func Experimental(r Route) Route { - return &experimentalRoute{ - local: r, - handler: experimentalHandler, - } -} diff --git a/vendor/github.com/docker/docker/api/server/router/image/backend.go b/vendor/github.com/docker/docker/api/server/router/image/backend.go deleted file mode 100644 index 93c47cf63..000000000 --- a/vendor/github.com/docker/docker/api/server/router/image/backend.go +++ /dev/null @@ -1,40 +0,0 @@ -package image // import "github.com/docker/docker/api/server/router/image" - -import ( - "context" - "io" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/registry" -) - -// Backend is all the methods that need to be implemented -// to provide image specific functionality. -type Backend interface { - imageBackend - importExportBackend - registryBackend -} - -type imageBackend interface { - ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) - ImageHistory(imageName string) ([]*image.HistoryResponseItem, error) - Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) - LookupImage(name string) (*types.ImageInspect, error) - TagImage(imageName, repository, tag string) (string, error) - ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) -} - -type importExportBackend interface { - LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error - ImportImage(src string, repository, platform string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error - ExportImage(names []string, outStream io.Writer) error -} - -type registryBackend interface { - PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error - PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error - SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) -} diff --git a/vendor/github.com/docker/docker/api/server/router/image/image.go b/vendor/github.com/docker/docker/api/server/router/image/image.go deleted file mode 100644 index 6d5d87f63..000000000 --- a/vendor/github.com/docker/docker/api/server/router/image/image.go +++ /dev/null @@ -1,44 +0,0 @@ -package image // import "github.com/docker/docker/api/server/router/image" - -import ( - "github.com/docker/docker/api/server/router" -) - -// imageRouter is a router to talk with the image controller -type imageRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new image router -func NewRouter(backend Backend) router.Router { - r := &imageRouter{backend: backend} - r.initRoutes() - return r -} - -// Routes returns the available routes to the image controller -func (r *imageRouter) Routes() []router.Route { - return r.routes -} - -// initRoutes initializes the routes in the image router -func (r *imageRouter) initRoutes() { - r.routes = []router.Route{ - // GET - router.NewGetRoute("/images/json", r.getImagesJSON), - router.NewGetRoute("/images/search", r.getImagesSearch), - router.NewGetRoute("/images/get", r.getImagesGet), - router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet), - router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), - router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName), - // POST - router.NewPostRoute("/images/load", r.postImagesLoad), - router.NewPostRoute("/images/create", r.postImagesCreate, router.WithCancel), - router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush, router.WithCancel), - router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), - router.NewPostRoute("/images/prune", r.postImagesPrune, router.WithCancel), - // DELETE - router.NewDeleteRoute("/images/{name:.*}", r.deleteImages), - } -} diff --git a/vendor/github.com/docker/docker/api/server/router/image/image_routes.go b/vendor/github.com/docker/docker/api/server/router/image/image_routes.go deleted file mode 100644 index 8e32d0292..000000000 --- a/vendor/github.com/docker/docker/api/server/router/image/image_routes.go +++ /dev/null @@ -1,314 +0,0 @@ -package image // import "github.com/docker/docker/api/server/router/image" - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "net/http" - "strconv" - "strings" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/registry" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// Creates an image from Pull or from Import -func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - - if err := httputils.ParseForm(r); err != nil { - return err - } - - var ( - image = r.Form.Get("fromImage") - repo = r.Form.Get("repo") - tag = r.Form.Get("tag") - message = r.Form.Get("message") - err error - output = ioutils.NewWriteFlusher(w) - platform = &specs.Platform{} - ) - defer output.Close() - - w.Header().Set("Content-Type", "application/json") - - version := httputils.VersionFromContext(ctx) - if versions.GreaterThanOrEqualTo(version, "1.32") { - apiPlatform := r.FormValue("platform") - platform = system.ParsePlatform(apiPlatform) - if err = system.ValidatePlatform(platform); err != nil { - err = fmt.Errorf("invalid platform: %s", err) - } - } - - if err == nil { - if image != "" { //pull - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - - authEncoded := r.Header.Get("X-Registry-Auth") - authConfig := &types.AuthConfig{} - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - authConfig = &types.AuthConfig{} - } - } - err = s.backend.PullImage(ctx, image, tag, platform.OS, metaHeaders, authConfig, output) - } else { //import - src := r.Form.Get("fromSrc") - // 'err' MUST NOT be defined within this block, we need any error - // generated from the download to be available to the output - // stream processing below - err = s.backend.ImportImage(src, repo, platform.OS, tag, message, r.Body, output, r.Form["changes"]) - } - } - if err != nil { - if !output.Flushed() { - return err - } - output.Write(streamformatter.FormatError(err)) - } - - return nil -} - -func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - if err := httputils.ParseForm(r); err != nil { - return err - } - authConfig := &types.AuthConfig{} - - authEncoded := r.Header.Get("X-Registry-Auth") - if authEncoded != "" { - // the new format is to handle the authConfig as a header - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { - // to increase compatibility to existing api it is defaulting to be empty - authConfig = &types.AuthConfig{} - } - } else { - // the old format is supported for compatibility if there was no authConfig header - if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { - return errors.Wrap(errdefs.InvalidParameter(err), "Bad parameters and missing X-Registry-Auth") - } - } - - image := vars["name"] - tag := r.Form.Get("tag") - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - - w.Header().Set("Content-Type", "application/json") - - if err := s.backend.PushImage(ctx, image, tag, metaHeaders, authConfig, output); err != nil { - if !output.Flushed() { - return err - } - output.Write(streamformatter.FormatError(err)) - } - return nil -} - -func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - w.Header().Set("Content-Type", "application/x-tar") - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - var names []string - if name, ok := vars["name"]; ok { - names = []string{name} - } else { - names = r.Form["names"] - } - - if err := s.backend.ExportImage(names, output); err != nil { - if !output.Flushed() { - return err - } - output.Write(streamformatter.FormatError(err)) - } - return nil -} - -func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - quiet := httputils.BoolValueOrDefault(r, "quiet", true) - - w.Header().Set("Content-Type", "application/json") - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - if err := s.backend.LoadImage(r.Body, output, quiet); err != nil { - output.Write(streamformatter.FormatError(err)) - } - return nil -} - -type missingImageError struct{} - -func (missingImageError) Error() string { - return "image name cannot be blank" -} - -func (missingImageError) InvalidParameter() {} - -func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - name := vars["name"] - - if strings.TrimSpace(name) == "" { - return missingImageError{} - } - - force := httputils.BoolValue(r, "force") - prune := !httputils.BoolValue(r, "noprune") - - list, err := s.backend.ImageDelete(name, force, prune) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, list) -} - -func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - imageInspect, err := s.backend.LookupImage(vars["name"]) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, imageInspect) -} - -func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - imageFilters, err := filters.FromJSON(r.Form.Get("filters")) - if err != nil { - return err - } - - filterParam := r.Form.Get("filter") - // FIXME(vdemeester) This has been deprecated in 1.13, and is target for removal for v17.12 - if filterParam != "" { - imageFilters.Add("reference", filterParam) - } - - images, err := s.backend.Images(imageFilters, httputils.BoolValue(r, "all"), false) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, images) -} - -func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - name := vars["name"] - history, err := s.backend.ImageHistory(name) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, history) -} - -func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if _, err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil { - return err - } - w.WriteHeader(http.StatusCreated) - return nil -} - -func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - var ( - config *types.AuthConfig - authEncoded = r.Header.Get("X-Registry-Auth") - headers = map[string][]string{} - ) - - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(&config); err != nil { - // for a search it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - config = &types.AuthConfig{} - } - } - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - headers[k] = v - } - } - limit := registry.DefaultSearchLimit - if r.Form.Get("limit") != "" { - limitValue, err := strconv.Atoi(r.Form.Get("limit")) - if err != nil { - return err - } - limit = limitValue - } - query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("filters"), r.Form.Get("term"), limit, config, headers) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, query.Results) -} - -func (s *imageRouter) postImagesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - pruneFilters, err := filters.FromJSON(r.Form.Get("filters")) - if err != nil { - return err - } - - pruneReport, err := s.backend.ImagesPrune(ctx, pruneFilters) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, pruneReport) -} diff --git a/vendor/github.com/docker/docker/api/server/router/local.go b/vendor/github.com/docker/docker/api/server/router/local.go deleted file mode 100644 index 79a323928..000000000 --- a/vendor/github.com/docker/docker/api/server/router/local.go +++ /dev/null @@ -1,104 +0,0 @@ -package router // import "github.com/docker/docker/api/server/router" - -import ( - "context" - "net/http" - - "github.com/docker/docker/api/server/httputils" -) - -// RouteWrapper wraps a route with extra functionality. -// It is passed in when creating a new route. -type RouteWrapper func(r Route) Route - -// localRoute defines an individual API route to connect -// with the docker daemon. It implements Route. -type localRoute struct { - method string - path string - handler httputils.APIFunc -} - -// Handler returns the APIFunc to let the server wrap it in middlewares. -func (l localRoute) Handler() httputils.APIFunc { - return l.handler -} - -// Method returns the http method that the route responds to. -func (l localRoute) Method() string { - return l.method -} - -// Path returns the subpath where the route responds to. -func (l localRoute) Path() string { - return l.path -} - -// NewRoute initializes a new local route for the router. -func NewRoute(method, path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { - var r Route = localRoute{method, path, handler} - for _, o := range opts { - r = o(r) - } - return r -} - -// NewGetRoute initializes a new route with the http method GET. -func NewGetRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { - return NewRoute("GET", path, handler, opts...) -} - -// NewPostRoute initializes a new route with the http method POST. -func NewPostRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { - return NewRoute("POST", path, handler, opts...) -} - -// NewPutRoute initializes a new route with the http method PUT. -func NewPutRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { - return NewRoute("PUT", path, handler, opts...) -} - -// NewDeleteRoute initializes a new route with the http method DELETE. -func NewDeleteRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { - return NewRoute("DELETE", path, handler, opts...) -} - -// NewOptionsRoute initializes a new route with the http method OPTIONS. -func NewOptionsRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { - return NewRoute("OPTIONS", path, handler, opts...) -} - -// NewHeadRoute initializes a new route with the http method HEAD. -func NewHeadRoute(path string, handler httputils.APIFunc, opts ...RouteWrapper) Route { - return NewRoute("HEAD", path, handler, opts...) -} - -func cancellableHandler(h httputils.APIFunc) httputils.APIFunc { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if notifier, ok := w.(http.CloseNotifier); ok { - notify := notifier.CloseNotify() - notifyCtx, cancel := context.WithCancel(ctx) - finished := make(chan struct{}) - defer close(finished) - ctx = notifyCtx - go func() { - select { - case <-notify: - cancel() - case <-finished: - } - }() - } - return h(ctx, w, r, vars) - } -} - -// WithCancel makes new route which embeds http.CloseNotifier feature to -// context.Context of handler. -func WithCancel(r Route) Route { - return localRoute{ - method: r.Method(), - path: r.Path(), - handler: cancellableHandler(r.Handler()), - } -} diff --git a/vendor/github.com/docker/docker/api/server/router/network/backend.go b/vendor/github.com/docker/docker/api/server/router/network/backend.go deleted file mode 100644 index 1bab353a5..000000000 --- a/vendor/github.com/docker/docker/api/server/router/network/backend.go +++ /dev/null @@ -1,32 +0,0 @@ -package network // import "github.com/docker/docker/api/server/router/network" - -import ( - "context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" - "github.com/docker/libnetwork" -) - -// Backend is all the methods that need to be implemented -// to provide network specific functionality. -type Backend interface { - FindNetwork(idName string) (libnetwork.Network, error) - GetNetworks() []libnetwork.Network - CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) - ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error - DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error - DeleteNetwork(networkID string) error - NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) -} - -// ClusterBackend is all the methods that need to be implemented -// to provide cluster network specific functionality. -type ClusterBackend interface { - GetNetworks() ([]types.NetworkResource, error) - GetNetwork(name string) (types.NetworkResource, error) - GetNetworksByName(name string) ([]types.NetworkResource, error) - CreateNetwork(nc types.NetworkCreateRequest) (string, error) - RemoveNetwork(name string) error -} diff --git a/vendor/github.com/docker/docker/api/server/router/network/filter.go b/vendor/github.com/docker/docker/api/server/router/network/filter.go deleted file mode 100644 index 02683e800..000000000 --- a/vendor/github.com/docker/docker/api/server/router/network/filter.go +++ /dev/null @@ -1,93 +0,0 @@ -package network // import "github.com/docker/docker/api/server/router/network" - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/runconfig" -) - -func filterNetworkByType(nws []types.NetworkResource, netType string) ([]types.NetworkResource, error) { - retNws := []types.NetworkResource{} - switch netType { - case "builtin": - for _, nw := range nws { - if runconfig.IsPreDefinedNetwork(nw.Name) { - retNws = append(retNws, nw) - } - } - case "custom": - for _, nw := range nws { - if !runconfig.IsPreDefinedNetwork(nw.Name) { - retNws = append(retNws, nw) - } - } - default: - return nil, invalidFilter(netType) - } - return retNws, nil -} - -type invalidFilter string - -func (e invalidFilter) Error() string { - return "Invalid filter: 'type'='" + string(e) + "'" -} - -func (e invalidFilter) InvalidParameter() {} - -// filterNetworks filters network list according to user specified filter -// and returns user chosen networks -func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) { - // if filter is empty, return original network list - if filter.Len() == 0 { - return nws, nil - } - - displayNet := []types.NetworkResource{} - for _, nw := range nws { - if filter.Contains("driver") { - if !filter.ExactMatch("driver", nw.Driver) { - continue - } - } - if filter.Contains("name") { - if !filter.Match("name", nw.Name) { - continue - } - } - if filter.Contains("id") { - if !filter.Match("id", nw.ID) { - continue - } - } - if filter.Contains("label") { - if !filter.MatchKVList("label", nw.Labels) { - continue - } - } - if filter.Contains("scope") { - if !filter.ExactMatch("scope", nw.Scope) { - continue - } - } - displayNet = append(displayNet, nw) - } - - if filter.Contains("type") { - typeNet := []types.NetworkResource{} - errFilter := filter.WalkValues("type", func(fval string) error { - passList, err := filterNetworkByType(displayNet, fval) - if err != nil { - return err - } - typeNet = append(typeNet, passList...) - return nil - }) - if errFilter != nil { - return nil, errFilter - } - displayNet = typeNet - } - - return displayNet, nil -} diff --git a/vendor/github.com/docker/docker/api/server/router/network/network.go b/vendor/github.com/docker/docker/api/server/router/network/network.go deleted file mode 100644 index 4eee97079..000000000 --- a/vendor/github.com/docker/docker/api/server/router/network/network.go +++ /dev/null @@ -1,43 +0,0 @@ -package network // import "github.com/docker/docker/api/server/router/network" - -import ( - "github.com/docker/docker/api/server/router" -) - -// networkRouter is a router to talk with the network controller -type networkRouter struct { - backend Backend - cluster ClusterBackend - routes []router.Route -} - -// NewRouter initializes a new network router -func NewRouter(b Backend, c ClusterBackend) router.Router { - r := &networkRouter{ - backend: b, - cluster: c, - } - r.initRoutes() - return r -} - -// Routes returns the available routes to the network controller -func (r *networkRouter) Routes() []router.Route { - return r.routes -} - -func (r *networkRouter) initRoutes() { - r.routes = []router.Route{ - // GET - router.NewGetRoute("/networks", r.getNetworksList), - router.NewGetRoute("/networks/", r.getNetworksList), - router.NewGetRoute("/networks/{id:.+}", r.getNetwork), - // POST - router.NewPostRoute("/networks/create", r.postNetworkCreate), - router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect), - router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect), - router.NewPostRoute("/networks/prune", r.postNetworksPrune, router.WithCancel), - // DELETE - router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork), - } -} diff --git a/vendor/github.com/docker/docker/api/server/router/network/network_routes.go b/vendor/github.com/docker/docker/api/server/router/network/network_routes.go deleted file mode 100644 index 0248662a4..000000000 --- a/vendor/github.com/docker/docker/api/server/router/network/network_routes.go +++ /dev/null @@ -1,597 +0,0 @@ -package network // import "github.com/docker/docker/api/server/router/network" - -import ( - "context" - "encoding/json" - "net/http" - "strconv" - "strings" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" - "github.com/docker/libnetwork" - netconst "github.com/docker/libnetwork/datastore" - "github.com/docker/libnetwork/networkdb" - "github.com/pkg/errors" -) - -var ( - // acceptedNetworkFilters is a list of acceptable filters - acceptedNetworkFilters = map[string]bool{ - "driver": true, - "type": true, - "name": true, - "id": true, - "label": true, - "scope": true, - } -) - -func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - filter := r.Form.Get("filters") - netFilters, err := filters.FromJSON(filter) - if err != nil { - return err - } - - if err := netFilters.Validate(acceptedNetworkFilters); err != nil { - return err - } - - list := []types.NetworkResource{} - - if nr, err := n.cluster.GetNetworks(); err == nil { - list = append(list, nr...) - } - - // Combine the network list returned by Docker daemon if it is not already - // returned by the cluster manager -SKIP: - for _, nw := range n.backend.GetNetworks() { - for _, nl := range list { - if nl.ID == nw.ID() { - continue SKIP - } - } - - var nr *types.NetworkResource - // Versions < 1.28 fetches all the containers attached to a network - // in a network list api call. It is a heavy weight operation when - // run across all the networks. Starting API version 1.28, this detailed - // info is available for network specific GET API (equivalent to inspect) - if versions.LessThan(httputils.VersionFromContext(ctx), "1.28") { - nr = n.buildDetailedNetworkResources(nw, false) - } else { - nr = n.buildNetworkResource(nw) - } - list = append(list, *nr) - } - - list, err = filterNetworks(list, netFilters) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, list) -} - -type invalidRequestError struct { - cause error -} - -func (e invalidRequestError) Error() string { - return e.cause.Error() -} - -func (e invalidRequestError) InvalidParameter() {} - -type ambigousResultsError string - -func (e ambigousResultsError) Error() string { - return "network " + string(e) + " is ambiguous" -} - -func (ambigousResultsError) InvalidParameter() {} - -func nameConflict(name string) error { - return errdefs.Conflict(libnetwork.NetworkNameError(name)) -} - -func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - term := vars["id"] - var ( - verbose bool - err error - ) - if v := r.URL.Query().Get("verbose"); v != "" { - if verbose, err = strconv.ParseBool(v); err != nil { - return errors.Wrapf(invalidRequestError{err}, "invalid value for verbose: %s", v) - } - } - scope := r.URL.Query().Get("scope") - - isMatchingScope := func(scope, term string) bool { - if term != "" { - return scope == term - } - return true - } - - // In case multiple networks have duplicate names, return error. - // TODO (yongtang): should we wrap with version here for backward compatibility? - - // First find based on full ID, return immediately once one is found. - // If a network appears both in swarm and local, assume it is in local first - - // For full name and partial ID, save the result first, and process later - // in case multiple records was found based on the same term - listByFullName := map[string]types.NetworkResource{} - listByPartialID := map[string]types.NetworkResource{} - - nw := n.backend.GetNetworks() - for _, network := range nw { - if network.ID() == term && isMatchingScope(network.Info().Scope(), scope) { - return httputils.WriteJSON(w, http.StatusOK, *n.buildDetailedNetworkResources(network, verbose)) - } - if network.Name() == term && isMatchingScope(network.Info().Scope(), scope) { - // No need to check the ID collision here as we are still in - // local scope and the network ID is unique in this scope. - listByFullName[network.ID()] = *n.buildDetailedNetworkResources(network, verbose) - } - if strings.HasPrefix(network.ID(), term) && isMatchingScope(network.Info().Scope(), scope) { - // No need to check the ID collision here as we are still in - // local scope and the network ID is unique in this scope. - listByPartialID[network.ID()] = *n.buildDetailedNetworkResources(network, verbose) - } - } - - nwk, err := n.cluster.GetNetwork(term) - if err == nil { - // If the get network is passed with a specific network ID / partial network ID - // or if the get network was passed with a network name and scope as swarm - // return the network. Skipped using isMatchingScope because it is true if the scope - // is not set which would be case if the client API v1.30 - if strings.HasPrefix(nwk.ID, term) || (netconst.SwarmScope == scope) { - // If we have a previous match "backend", return it, we need verbose when enabled - // ex: overlay/partial_ID or name/swarm_scope - if nwv, ok := listByPartialID[nwk.ID]; ok { - nwk = nwv - } else if nwv, ok := listByFullName[nwk.ID]; ok { - nwk = nwv - } - return httputils.WriteJSON(w, http.StatusOK, nwk) - } - } - - nr, _ := n.cluster.GetNetworks() - for _, network := range nr { - if network.ID == term && isMatchingScope(network.Scope, scope) { - return httputils.WriteJSON(w, http.StatusOK, network) - } - if network.Name == term && isMatchingScope(network.Scope, scope) { - // Check the ID collision as we are in swarm scope here, and - // the map (of the listByFullName) may have already had a - // network with the same ID (from local scope previously) - if _, ok := listByFullName[network.ID]; !ok { - listByFullName[network.ID] = network - } - } - if strings.HasPrefix(network.ID, term) && isMatchingScope(network.Scope, scope) { - // Check the ID collision as we are in swarm scope here, and - // the map (of the listByPartialID) may have already had a - // network with the same ID (from local scope previously) - if _, ok := listByPartialID[network.ID]; !ok { - listByPartialID[network.ID] = network - } - } - } - - // Find based on full name, returns true only if no duplicates - if len(listByFullName) == 1 { - for _, v := range listByFullName { - return httputils.WriteJSON(w, http.StatusOK, v) - } - } - if len(listByFullName) > 1 { - return errors.Wrapf(ambigousResultsError(term), "%d matches found based on name", len(listByFullName)) - } - - // Find based on partial ID, returns true only if no duplicates - if len(listByPartialID) == 1 { - for _, v := range listByPartialID { - return httputils.WriteJSON(w, http.StatusOK, v) - } - } - if len(listByPartialID) > 1 { - return errors.Wrapf(ambigousResultsError(term), "%d matches found based on ID prefix", len(listByPartialID)) - } - - return libnetwork.ErrNoSuchNetwork(term) -} - -func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var create types.NetworkCreateRequest - - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - if err := json.NewDecoder(r.Body).Decode(&create); err != nil { - return err - } - - if nws, err := n.cluster.GetNetworksByName(create.Name); err == nil && len(nws) > 0 { - return nameConflict(create.Name) - } - - nw, err := n.backend.CreateNetwork(create) - if err != nil { - var warning string - if _, ok := err.(libnetwork.NetworkNameError); ok { - // check if user defined CheckDuplicate, if set true, return err - // otherwise prepare a warning message - if create.CheckDuplicate { - return nameConflict(create.Name) - } - warning = libnetwork.NetworkNameError(create.Name).Error() - } - - if _, ok := err.(libnetwork.ManagerRedirectError); !ok { - return err - } - id, err := n.cluster.CreateNetwork(create) - if err != nil { - return err - } - nw = &types.NetworkCreateResponse{ - ID: id, - Warning: warning, - } - } - - return httputils.WriteJSON(w, http.StatusCreated, nw) -} - -func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var connect types.NetworkConnect - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - if err := json.NewDecoder(r.Body).Decode(&connect); err != nil { - return err - } - - // Unlike other operations, we does not check ambiguity of the name/ID here. - // The reason is that, In case of attachable network in swarm scope, the actual local network - // may not be available at the time. At the same time, inside daemon `ConnectContainerToNetwork` - // does the ambiguity check anyway. Therefore, passing the name to daemon would be enough. - return n.backend.ConnectContainerToNetwork(connect.Container, vars["id"], connect.EndpointConfig) -} - -func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var disconnect types.NetworkDisconnect - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil { - return err - } - - return n.backend.DisconnectContainerFromNetwork(disconnect.Container, vars["id"], disconnect.Force) -} - -func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - nw, err := n.findUniqueNetwork(vars["id"]) - if err != nil { - return err - } - if nw.Scope == "swarm" { - if err = n.cluster.RemoveNetwork(nw.ID); err != nil { - return err - } - } else { - if err := n.backend.DeleteNetwork(nw.ID); err != nil { - return err - } - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.NetworkResource { - r := &types.NetworkResource{} - if nw == nil { - return r - } - - info := nw.Info() - r.Name = nw.Name() - r.ID = nw.ID() - r.Created = info.Created() - r.Scope = info.Scope() - r.Driver = nw.Type() - r.EnableIPv6 = info.IPv6Enabled() - r.Internal = info.Internal() - r.Attachable = info.Attachable() - r.Ingress = info.Ingress() - r.Options = info.DriverOptions() - r.Containers = make(map[string]types.EndpointResource) - buildIpamResources(r, info) - r.Labels = info.Labels() - r.ConfigOnly = info.ConfigOnly() - - if cn := info.ConfigFrom(); cn != "" { - r.ConfigFrom = network.ConfigReference{Network: cn} - } - - peers := info.Peers() - if len(peers) != 0 { - r.Peers = buildPeerInfoResources(peers) - } - - return r -} - -func (n *networkRouter) buildDetailedNetworkResources(nw libnetwork.Network, verbose bool) *types.NetworkResource { - if nw == nil { - return &types.NetworkResource{} - } - - r := n.buildNetworkResource(nw) - epl := nw.Endpoints() - for _, e := range epl { - ei := e.Info() - if ei == nil { - continue - } - sb := ei.Sandbox() - tmpID := e.ID() - key := "ep-" + tmpID - if sb != nil { - key = sb.ContainerID() - } - - r.Containers[key] = buildEndpointResource(tmpID, e.Name(), ei) - } - if !verbose { - return r - } - services := nw.Info().Services() - r.Services = make(map[string]network.ServiceInfo) - for name, service := range services { - tasks := []network.Task{} - for _, t := range service.Tasks { - tasks = append(tasks, network.Task{ - Name: t.Name, - EndpointID: t.EndpointID, - EndpointIP: t.EndpointIP, - Info: t.Info, - }) - } - r.Services[name] = network.ServiceInfo{ - VIP: service.VIP, - Ports: service.Ports, - Tasks: tasks, - LocalLBIndex: service.LocalLBIndex, - } - } - return r -} - -func buildPeerInfoResources(peers []networkdb.PeerInfo) []network.PeerInfo { - peerInfo := make([]network.PeerInfo, 0, len(peers)) - for _, peer := range peers { - peerInfo = append(peerInfo, network.PeerInfo{ - Name: peer.Name, - IP: peer.IP, - }) - } - return peerInfo -} - -func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) { - id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig() - - ipv4Info, ipv6Info := nwInfo.IpamInfo() - - r.IPAM.Driver = id - - r.IPAM.Options = opts - - r.IPAM.Config = []network.IPAMConfig{} - for _, ip4 := range ipv4conf { - if ip4.PreferredPool == "" { - continue - } - iData := network.IPAMConfig{} - iData.Subnet = ip4.PreferredPool - iData.IPRange = ip4.SubPool - iData.Gateway = ip4.Gateway - iData.AuxAddress = ip4.AuxAddresses - r.IPAM.Config = append(r.IPAM.Config, iData) - } - - if len(r.IPAM.Config) == 0 { - for _, ip4Info := range ipv4Info { - iData := network.IPAMConfig{} - iData.Subnet = ip4Info.IPAMData.Pool.String() - if ip4Info.IPAMData.Gateway != nil { - iData.Gateway = ip4Info.IPAMData.Gateway.IP.String() - } - r.IPAM.Config = append(r.IPAM.Config, iData) - } - } - - hasIpv6Conf := false - for _, ip6 := range ipv6conf { - if ip6.PreferredPool == "" { - continue - } - hasIpv6Conf = true - iData := network.IPAMConfig{} - iData.Subnet = ip6.PreferredPool - iData.IPRange = ip6.SubPool - iData.Gateway = ip6.Gateway - iData.AuxAddress = ip6.AuxAddresses - r.IPAM.Config = append(r.IPAM.Config, iData) - } - - if !hasIpv6Conf { - for _, ip6Info := range ipv6Info { - if ip6Info.IPAMData.Pool == nil { - continue - } - iData := network.IPAMConfig{} - iData.Subnet = ip6Info.IPAMData.Pool.String() - iData.Gateway = ip6Info.IPAMData.Gateway.String() - r.IPAM.Config = append(r.IPAM.Config, iData) - } - } -} - -func buildEndpointResource(id string, name string, info libnetwork.EndpointInfo) types.EndpointResource { - er := types.EndpointResource{} - - er.EndpointID = id - er.Name = name - ei := info - if ei == nil { - return er - } - - if iface := ei.Iface(); iface != nil { - if mac := iface.MacAddress(); mac != nil { - er.MacAddress = mac.String() - } - if ip := iface.Address(); ip != nil && len(ip.IP) > 0 { - er.IPv4Address = ip.String() - } - - if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 { - er.IPv6Address = ipv6.String() - } - } - return er -} - -func (n *networkRouter) postNetworksPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - pruneFilters, err := filters.FromJSON(r.Form.Get("filters")) - if err != nil { - return err - } - - pruneReport, err := n.backend.NetworksPrune(ctx, pruneFilters) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, pruneReport) -} - -// findUniqueNetwork will search network across different scopes (both local and swarm). -// NOTE: This findUniqueNetwork is different from FindNetwork in the daemon. -// In case multiple networks have duplicate names, return error. -// First find based on full ID, return immediately once one is found. -// If a network appears both in swarm and local, assume it is in local first -// For full name and partial ID, save the result first, and process later -// in case multiple records was found based on the same term -// TODO (yongtang): should we wrap with version here for backward compatibility? -func (n *networkRouter) findUniqueNetwork(term string) (types.NetworkResource, error) { - listByFullName := map[string]types.NetworkResource{} - listByPartialID := map[string]types.NetworkResource{} - - nw := n.backend.GetNetworks() - for _, network := range nw { - if network.ID() == term { - return *n.buildDetailedNetworkResources(network, false), nil - - } - if network.Name() == term && !network.Info().Ingress() { - // No need to check the ID collision here as we are still in - // local scope and the network ID is unique in this scope. - listByFullName[network.ID()] = *n.buildDetailedNetworkResources(network, false) - } - if strings.HasPrefix(network.ID(), term) { - // No need to check the ID collision here as we are still in - // local scope and the network ID is unique in this scope. - listByPartialID[network.ID()] = *n.buildDetailedNetworkResources(network, false) - } - } - - nr, _ := n.cluster.GetNetworks() - for _, network := range nr { - if network.ID == term { - return network, nil - } - if network.Name == term { - // Check the ID collision as we are in swarm scope here, and - // the map (of the listByFullName) may have already had a - // network with the same ID (from local scope previously) - if _, ok := listByFullName[network.ID]; !ok { - listByFullName[network.ID] = network - } - } - if strings.HasPrefix(network.ID, term) { - // Check the ID collision as we are in swarm scope here, and - // the map (of the listByPartialID) may have already had a - // network with the same ID (from local scope previously) - if _, ok := listByPartialID[network.ID]; !ok { - listByPartialID[network.ID] = network - } - } - } - - // Find based on full name, returns true only if no duplicates - if len(listByFullName) == 1 { - for _, v := range listByFullName { - return v, nil - } - } - if len(listByFullName) > 1 { - return types.NetworkResource{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on name)", term, len(listByFullName))) - } - - // Find based on partial ID, returns true only if no duplicates - if len(listByPartialID) == 1 { - for _, v := range listByPartialID { - return v, nil - } - } - if len(listByPartialID) > 1 { - return types.NetworkResource{}, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID))) - } - - return types.NetworkResource{}, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term)) -} diff --git a/vendor/github.com/docker/docker/api/server/router/plugin/backend.go b/vendor/github.com/docker/docker/api/server/router/plugin/backend.go deleted file mode 100644 index d885ebb33..000000000 --- a/vendor/github.com/docker/docker/api/server/router/plugin/backend.go +++ /dev/null @@ -1,27 +0,0 @@ -package plugin // import "github.com/docker/docker/api/server/router/plugin" - -import ( - "context" - "io" - "net/http" - - "github.com/docker/distribution/reference" - enginetypes "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/plugin" -) - -// Backend for Plugin -type Backend interface { - Disable(name string, config *enginetypes.PluginDisableConfig) error - Enable(name string, config *enginetypes.PluginEnableConfig) error - List(filters.Args) ([]enginetypes.Plugin, error) - Inspect(name string) (*enginetypes.Plugin, error) - Remove(name string, config *enginetypes.PluginRmConfig) error - Set(name string, args []string) error - Privileges(ctx context.Context, ref reference.Named, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error) - Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error - Push(ctx context.Context, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, outStream io.Writer) error - Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error - CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *enginetypes.PluginCreateOptions) error -} diff --git a/vendor/github.com/docker/docker/api/server/router/plugin/plugin.go b/vendor/github.com/docker/docker/api/server/router/plugin/plugin.go deleted file mode 100644 index 7a4f987aa..000000000 --- a/vendor/github.com/docker/docker/api/server/router/plugin/plugin.go +++ /dev/null @@ -1,39 +0,0 @@ -package plugin // import "github.com/docker/docker/api/server/router/plugin" - -import "github.com/docker/docker/api/server/router" - -// pluginRouter is a router to talk with the plugin controller -type pluginRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new plugin router -func NewRouter(b Backend) router.Router { - r := &pluginRouter{ - backend: b, - } - r.initRoutes() - return r -} - -// Routes returns the available routers to the plugin controller -func (r *pluginRouter) Routes() []router.Route { - return r.routes -} - -func (r *pluginRouter) initRoutes() { - r.routes = []router.Route{ - router.NewGetRoute("/plugins", r.listPlugins), - router.NewGetRoute("/plugins/{name:.*}/json", r.inspectPlugin), - router.NewGetRoute("/plugins/privileges", r.getPrivileges), - router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin), - router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH? - router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin), - router.NewPostRoute("/plugins/pull", r.pullPlugin, router.WithCancel), - router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin, router.WithCancel), - router.NewPostRoute("/plugins/{name:.*}/upgrade", r.upgradePlugin, router.WithCancel), - router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin), - router.NewPostRoute("/plugins/create", r.createPlugin), - } -} diff --git a/vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go b/vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go deleted file mode 100644 index 4e816391d..000000000 --- a/vendor/github.com/docker/docker/api/server/router/plugin/plugin_routes.go +++ /dev/null @@ -1,310 +0,0 @@ -package plugin // import "github.com/docker/docker/api/server/router/plugin" - -import ( - "context" - "encoding/base64" - "encoding/json" - "net/http" - "strconv" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/streamformatter" - "github.com/pkg/errors" -) - -func parseHeaders(headers http.Header) (map[string][]string, *types.AuthConfig) { - - metaHeaders := map[string][]string{} - for k, v := range headers { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - - // Get X-Registry-Auth - authEncoded := headers.Get("X-Registry-Auth") - authConfig := &types.AuthConfig{} - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { - authConfig = &types.AuthConfig{} - } - } - - return metaHeaders, authConfig -} - -// parseRemoteRef parses the remote reference into a reference.Named -// returning the tag associated with the reference. In the case the -// given reference string includes both digest and tag, the returned -// reference will have the digest without the tag, but the tag will -// be returned. -func parseRemoteRef(remote string) (reference.Named, string, error) { - // Parse remote reference, supporting remotes with name and tag - remoteRef, err := reference.ParseNormalizedNamed(remote) - if err != nil { - return nil, "", err - } - - type canonicalWithTag interface { - reference.Canonical - Tag() string - } - - if canonical, ok := remoteRef.(canonicalWithTag); ok { - remoteRef, err = reference.WithDigest(reference.TrimNamed(remoteRef), canonical.Digest()) - if err != nil { - return nil, "", err - } - return remoteRef, canonical.Tag(), nil - } - - remoteRef = reference.TagNameOnly(remoteRef) - - return remoteRef, "", nil -} - -func (pr *pluginRouter) getPrivileges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - metaHeaders, authConfig := parseHeaders(r.Header) - - ref, _, err := parseRemoteRef(r.FormValue("remote")) - if err != nil { - return err - } - - privileges, err := pr.backend.Privileges(ctx, ref, metaHeaders, authConfig) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, privileges) -} - -func (pr *pluginRouter) upgradePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return errors.Wrap(err, "failed to parse form") - } - - var privileges types.PluginPrivileges - dec := json.NewDecoder(r.Body) - if err := dec.Decode(&privileges); err != nil { - return errors.Wrap(err, "failed to parse privileges") - } - if dec.More() { - return errors.New("invalid privileges") - } - - metaHeaders, authConfig := parseHeaders(r.Header) - ref, tag, err := parseRemoteRef(r.FormValue("remote")) - if err != nil { - return err - } - - name, err := getName(ref, tag, vars["name"]) - if err != nil { - return err - } - w.Header().Set("Docker-Plugin-Name", name) - - w.Header().Set("Content-Type", "application/json") - output := ioutils.NewWriteFlusher(w) - - if err := pr.backend.Upgrade(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil { - if !output.Flushed() { - return err - } - output.Write(streamformatter.FormatError(err)) - } - - return nil -} - -func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return errors.Wrap(err, "failed to parse form") - } - - var privileges types.PluginPrivileges - dec := json.NewDecoder(r.Body) - if err := dec.Decode(&privileges); err != nil { - return errors.Wrap(err, "failed to parse privileges") - } - if dec.More() { - return errors.New("invalid privileges") - } - - metaHeaders, authConfig := parseHeaders(r.Header) - ref, tag, err := parseRemoteRef(r.FormValue("remote")) - if err != nil { - return err - } - - name, err := getName(ref, tag, r.FormValue("name")) - if err != nil { - return err - } - w.Header().Set("Docker-Plugin-Name", name) - - w.Header().Set("Content-Type", "application/json") - output := ioutils.NewWriteFlusher(w) - - if err := pr.backend.Pull(ctx, ref, name, metaHeaders, authConfig, privileges, output); err != nil { - if !output.Flushed() { - return err - } - output.Write(streamformatter.FormatError(err)) - } - - return nil -} - -func getName(ref reference.Named, tag, name string) (string, error) { - if name == "" { - if _, ok := ref.(reference.Canonical); ok { - trimmed := reference.TrimNamed(ref) - if tag != "" { - nt, err := reference.WithTag(trimmed, tag) - if err != nil { - return "", err - } - name = reference.FamiliarString(nt) - } else { - name = reference.FamiliarString(reference.TagNameOnly(trimmed)) - } - } else { - name = reference.FamiliarString(ref) - } - } else { - localRef, err := reference.ParseNormalizedNamed(name) - if err != nil { - return "", err - } - if _, ok := localRef.(reference.Canonical); ok { - return "", errors.New("cannot use digest in plugin tag") - } - if reference.IsNameOnly(localRef) { - // TODO: log change in name to out stream - name = reference.FamiliarString(reference.TagNameOnly(localRef)) - } - } - return name, nil -} - -func (pr *pluginRouter) createPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - options := &types.PluginCreateOptions{ - RepoName: r.FormValue("name")} - - if err := pr.backend.CreateFromContext(ctx, r.Body, options); err != nil { - return err - } - //TODO: send progress bar - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (pr *pluginRouter) enablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - name := vars["name"] - timeout, err := strconv.Atoi(r.Form.Get("timeout")) - if err != nil { - return err - } - config := &types.PluginEnableConfig{Timeout: timeout} - - return pr.backend.Enable(name, config) -} - -func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - name := vars["name"] - config := &types.PluginDisableConfig{ - ForceDisable: httputils.BoolValue(r, "force"), - } - - return pr.backend.Disable(name, config) -} - -func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - name := vars["name"] - config := &types.PluginRmConfig{ - ForceRemove: httputils.BoolValue(r, "force"), - } - return pr.backend.Remove(name, config) -} - -func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return errors.Wrap(err, "failed to parse form") - } - - metaHeaders, authConfig := parseHeaders(r.Header) - - w.Header().Set("Content-Type", "application/json") - output := ioutils.NewWriteFlusher(w) - - if err := pr.backend.Push(ctx, vars["name"], metaHeaders, authConfig, output); err != nil { - if !output.Flushed() { - return err - } - output.Write(streamformatter.FormatError(err)) - } - return nil -} - -func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var args []string - if err := json.NewDecoder(r.Body).Decode(&args); err != nil { - return err - } - if err := pr.backend.Set(vars["name"], args); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (pr *pluginRouter) listPlugins(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - pluginFilters, err := filters.FromJSON(r.Form.Get("filters")) - if err != nil { - return err - } - l, err := pr.backend.List(pluginFilters) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, l) -} - -func (pr *pluginRouter) inspectPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - result, err := pr.backend.Inspect(vars["name"]) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, result) -} diff --git a/vendor/github.com/docker/docker/api/server/router/router.go b/vendor/github.com/docker/docker/api/server/router/router.go deleted file mode 100644 index e62faed71..000000000 --- a/vendor/github.com/docker/docker/api/server/router/router.go +++ /dev/null @@ -1,19 +0,0 @@ -package router // import "github.com/docker/docker/api/server/router" - -import "github.com/docker/docker/api/server/httputils" - -// Router defines an interface to specify a group of routes to add to the docker server. -type Router interface { - // Routes returns the list of routes to add to the docker server. - Routes() []Route -} - -// Route defines an individual API route in the docker server. -type Route interface { - // Handler returns the raw function to create the http handler. - Handler() httputils.APIFunc - // Method returns the http method that the route responds to. - Method() string - // Path returns the subpath where the route responds to. - Path() string -} diff --git a/vendor/github.com/docker/docker/api/server/router/session/backend.go b/vendor/github.com/docker/docker/api/server/router/session/backend.go deleted file mode 100644 index d9b14d480..000000000 --- a/vendor/github.com/docker/docker/api/server/router/session/backend.go +++ /dev/null @@ -1,11 +0,0 @@ -package session // import "github.com/docker/docker/api/server/router/session" - -import ( - "context" - "net/http" -) - -// Backend abstracts an session receiver from an http request. -type Backend interface { - HandleHTTPRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error -} diff --git a/vendor/github.com/docker/docker/api/server/router/session/session.go b/vendor/github.com/docker/docker/api/server/router/session/session.go deleted file mode 100644 index de6d63008..000000000 --- a/vendor/github.com/docker/docker/api/server/router/session/session.go +++ /dev/null @@ -1,29 +0,0 @@ -package session // import "github.com/docker/docker/api/server/router/session" - -import "github.com/docker/docker/api/server/router" - -// sessionRouter is a router to talk with the session controller -type sessionRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new session router -func NewRouter(b Backend) router.Router { - r := &sessionRouter{ - backend: b, - } - r.initRoutes() - return r -} - -// Routes returns the available routers to the session controller -func (r *sessionRouter) Routes() []router.Route { - return r.routes -} - -func (r *sessionRouter) initRoutes() { - r.routes = []router.Route{ - router.Experimental(router.NewPostRoute("/session", r.startSession)), - } -} diff --git a/vendor/github.com/docker/docker/api/server/router/session/session_routes.go b/vendor/github.com/docker/docker/api/server/router/session/session_routes.go deleted file mode 100644 index 691ac6228..000000000 --- a/vendor/github.com/docker/docker/api/server/router/session/session_routes.go +++ /dev/null @@ -1,16 +0,0 @@ -package session // import "github.com/docker/docker/api/server/router/session" - -import ( - "context" - "net/http" - - "github.com/docker/docker/errdefs" -) - -func (sr *sessionRouter) startSession(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - err := sr.backend.HandleHTTPRequest(ctx, w, r) - if err != nil { - return errdefs.InvalidParameter(err) - } - return nil -} diff --git a/vendor/github.com/docker/docker/api/server/router/swarm/backend.go b/vendor/github.com/docker/docker/api/server/router/swarm/backend.go deleted file mode 100644 index d0c7e60fb..000000000 --- a/vendor/github.com/docker/docker/api/server/router/swarm/backend.go +++ /dev/null @@ -1,48 +0,0 @@ -package swarm // import "github.com/docker/docker/api/server/router/swarm" - -import ( - "context" - - basictypes "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - types "github.com/docker/docker/api/types/swarm" -) - -// Backend abstracts a swarm manager. -type Backend interface { - Init(req types.InitRequest) (string, error) - Join(req types.JoinRequest) error - Leave(force bool) error - Inspect() (types.Swarm, error) - Update(uint64, types.Spec, types.UpdateFlags) error - GetUnlockKey() (string, error) - UnlockSwarm(req types.UnlockRequest) error - - GetServices(basictypes.ServiceListOptions) ([]types.Service, error) - GetService(idOrName string, insertDefaults bool) (types.Service, error) - CreateService(types.ServiceSpec, string, bool) (*basictypes.ServiceCreateResponse, error) - UpdateService(string, uint64, types.ServiceSpec, basictypes.ServiceUpdateOptions, bool) (*basictypes.ServiceUpdateResponse, error) - RemoveService(string) error - - ServiceLogs(context.Context, *backend.LogSelector, *basictypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) - - GetNodes(basictypes.NodeListOptions) ([]types.Node, error) - GetNode(string) (types.Node, error) - UpdateNode(string, uint64, types.NodeSpec) error - RemoveNode(string, bool) error - - GetTasks(basictypes.TaskListOptions) ([]types.Task, error) - GetTask(string) (types.Task, error) - - GetSecrets(opts basictypes.SecretListOptions) ([]types.Secret, error) - CreateSecret(s types.SecretSpec) (string, error) - RemoveSecret(idOrName string) error - GetSecret(id string) (types.Secret, error) - UpdateSecret(idOrName string, version uint64, spec types.SecretSpec) error - - GetConfigs(opts basictypes.ConfigListOptions) ([]types.Config, error) - CreateConfig(s types.ConfigSpec) (string, error) - RemoveConfig(id string) error - GetConfig(id string) (types.Config, error) - UpdateConfig(idOrName string, version uint64, spec types.ConfigSpec) error -} diff --git a/vendor/github.com/docker/docker/api/server/router/swarm/cluster.go b/vendor/github.com/docker/docker/api/server/router/swarm/cluster.go deleted file mode 100644 index 52f950a3a..000000000 --- a/vendor/github.com/docker/docker/api/server/router/swarm/cluster.go +++ /dev/null @@ -1,63 +0,0 @@ -package swarm // import "github.com/docker/docker/api/server/router/swarm" - -import "github.com/docker/docker/api/server/router" - -// swarmRouter is a router to talk with the build controller -type swarmRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new build router -func NewRouter(b Backend) router.Router { - r := &swarmRouter{ - backend: b, - } - r.initRoutes() - return r -} - -// Routes returns the available routers to the swarm controller -func (sr *swarmRouter) Routes() []router.Route { - return sr.routes -} - -func (sr *swarmRouter) initRoutes() { - sr.routes = []router.Route{ - router.NewPostRoute("/swarm/init", sr.initCluster), - router.NewPostRoute("/swarm/join", sr.joinCluster), - router.NewPostRoute("/swarm/leave", sr.leaveCluster), - router.NewGetRoute("/swarm", sr.inspectCluster), - router.NewGetRoute("/swarm/unlockkey", sr.getUnlockKey), - router.NewPostRoute("/swarm/update", sr.updateCluster), - router.NewPostRoute("/swarm/unlock", sr.unlockCluster), - - router.NewGetRoute("/services", sr.getServices), - router.NewGetRoute("/services/{id}", sr.getService), - router.NewPostRoute("/services/create", sr.createService), - router.NewPostRoute("/services/{id}/update", sr.updateService), - router.NewDeleteRoute("/services/{id}", sr.removeService), - router.NewGetRoute("/services/{id}/logs", sr.getServiceLogs, router.WithCancel), - - router.NewGetRoute("/nodes", sr.getNodes), - router.NewGetRoute("/nodes/{id}", sr.getNode), - router.NewDeleteRoute("/nodes/{id}", sr.removeNode), - router.NewPostRoute("/nodes/{id}/update", sr.updateNode), - - router.NewGetRoute("/tasks", sr.getTasks), - router.NewGetRoute("/tasks/{id}", sr.getTask), - router.NewGetRoute("/tasks/{id}/logs", sr.getTaskLogs, router.WithCancel), - - router.NewGetRoute("/secrets", sr.getSecrets), - router.NewPostRoute("/secrets/create", sr.createSecret), - router.NewDeleteRoute("/secrets/{id}", sr.removeSecret), - router.NewGetRoute("/secrets/{id}", sr.getSecret), - router.NewPostRoute("/secrets/{id}/update", sr.updateSecret), - - router.NewGetRoute("/configs", sr.getConfigs), - router.NewPostRoute("/configs/create", sr.createConfig), - router.NewDeleteRoute("/configs/{id}", sr.removeConfig), - router.NewGetRoute("/configs/{id}", sr.getConfig), - router.NewPostRoute("/configs/{id}/update", sr.updateConfig), - } -} diff --git a/vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go b/vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go deleted file mode 100644 index a70248860..000000000 --- a/vendor/github.com/docker/docker/api/server/router/swarm/cluster_routes.go +++ /dev/null @@ -1,494 +0,0 @@ -package swarm // import "github.com/docker/docker/api/server/router/swarm" - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "strconv" - - "github.com/docker/docker/api/server/httputils" - basictypes "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/filters" - types "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var req types.InitRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return err - } - nodeID, err := sr.backend.Init(req) - if err != nil { - logrus.Errorf("Error initializing swarm: %v", err) - return err - } - return httputils.WriteJSON(w, http.StatusOK, nodeID) -} - -func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var req types.JoinRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return err - } - return sr.backend.Join(req) -} - -func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - force := httputils.BoolValue(r, "force") - return sr.backend.Leave(force) -} - -func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - swarm, err := sr.backend.Inspect() - if err != nil { - logrus.Errorf("Error getting swarm: %v", err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, swarm) -} - -func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var swarm types.Spec - if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil { - return err - } - - rawVersion := r.URL.Query().Get("version") - version, err := strconv.ParseUint(rawVersion, 10, 64) - if err != nil { - err := fmt.Errorf("invalid swarm version '%s': %v", rawVersion, err) - return errdefs.InvalidParameter(err) - } - - var flags types.UpdateFlags - - if value := r.URL.Query().Get("rotateWorkerToken"); value != "" { - rot, err := strconv.ParseBool(value) - if err != nil { - err := fmt.Errorf("invalid value for rotateWorkerToken: %s", value) - return errdefs.InvalidParameter(err) - } - - flags.RotateWorkerToken = rot - } - - if value := r.URL.Query().Get("rotateManagerToken"); value != "" { - rot, err := strconv.ParseBool(value) - if err != nil { - err := fmt.Errorf("invalid value for rotateManagerToken: %s", value) - return errdefs.InvalidParameter(err) - } - - flags.RotateManagerToken = rot - } - - if value := r.URL.Query().Get("rotateManagerUnlockKey"); value != "" { - rot, err := strconv.ParseBool(value) - if err != nil { - return errdefs.InvalidParameter(fmt.Errorf("invalid value for rotateManagerUnlockKey: %s", value)) - } - - flags.RotateManagerUnlockKey = rot - } - - if err := sr.backend.Update(version, swarm, flags); err != nil { - logrus.Errorf("Error configuring swarm: %v", err) - return err - } - return nil -} - -func (sr *swarmRouter) unlockCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var req types.UnlockRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return err - } - - if err := sr.backend.UnlockSwarm(req); err != nil { - logrus.Errorf("Error unlocking swarm: %v", err) - return err - } - return nil -} - -func (sr *swarmRouter) getUnlockKey(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - unlockKey, err := sr.backend.GetUnlockKey() - if err != nil { - logrus.WithError(err).Errorf("Error retrieving swarm unlock key") - return err - } - - return httputils.WriteJSON(w, http.StatusOK, &basictypes.SwarmUnlockKeyResponse{ - UnlockKey: unlockKey, - }) -} - -func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - filter, err := filters.FromJSON(r.Form.Get("filters")) - if err != nil { - return errdefs.InvalidParameter(err) - } - - services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filters: filter}) - if err != nil { - logrus.Errorf("Error getting services: %v", err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, services) -} - -func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var insertDefaults bool - if value := r.URL.Query().Get("insertDefaults"); value != "" { - var err error - insertDefaults, err = strconv.ParseBool(value) - if err != nil { - err := fmt.Errorf("invalid value for insertDefaults: %s", value) - return errors.Wrapf(errdefs.InvalidParameter(err), "invalid value for insertDefaults: %s", value) - } - } - - service, err := sr.backend.GetService(vars["id"], insertDefaults) - if err != nil { - logrus.Errorf("Error getting service %s: %v", vars["id"], err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, service) -} - -func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var service types.ServiceSpec - if err := json.NewDecoder(r.Body).Decode(&service); err != nil { - return err - } - - // Get returns "" if the header does not exist - encodedAuth := r.Header.Get("X-Registry-Auth") - cliVersion := r.Header.Get("version") - queryRegistry := false - if cliVersion != "" && versions.LessThan(cliVersion, "1.30") { - queryRegistry = true - } - - resp, err := sr.backend.CreateService(service, encodedAuth, queryRegistry) - if err != nil { - logrus.Errorf("Error creating service %s: %v", service.Name, err) - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, resp) -} - -func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var service types.ServiceSpec - if err := json.NewDecoder(r.Body).Decode(&service); err != nil { - return err - } - - rawVersion := r.URL.Query().Get("version") - version, err := strconv.ParseUint(rawVersion, 10, 64) - if err != nil { - err := fmt.Errorf("invalid service version '%s': %v", rawVersion, err) - return errdefs.InvalidParameter(err) - } - - var flags basictypes.ServiceUpdateOptions - - // Get returns "" if the header does not exist - flags.EncodedRegistryAuth = r.Header.Get("X-Registry-Auth") - flags.RegistryAuthFrom = r.URL.Query().Get("registryAuthFrom") - flags.Rollback = r.URL.Query().Get("rollback") - cliVersion := r.Header.Get("version") - queryRegistry := false - if cliVersion != "" && versions.LessThan(cliVersion, "1.30") { - queryRegistry = true - } - - resp, err := sr.backend.UpdateService(vars["id"], version, service, flags, queryRegistry) - if err != nil { - logrus.Errorf("Error updating service %s: %v", vars["id"], err) - return err - } - return httputils.WriteJSON(w, http.StatusOK, resp) -} - -func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := sr.backend.RemoveService(vars["id"]); err != nil { - logrus.Errorf("Error removing service %s: %v", vars["id"], err) - return err - } - return nil -} - -func (sr *swarmRouter) getTaskLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - // make a selector to pass to the helper function - selector := &backend.LogSelector{ - Tasks: []string{vars["id"]}, - } - return sr.swarmLogs(ctx, w, r, selector) -} - -func (sr *swarmRouter) getServiceLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - // make a selector to pass to the helper function - selector := &backend.LogSelector{ - Services: []string{vars["id"]}, - } - return sr.swarmLogs(ctx, w, r, selector) -} - -func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - filter, err := filters.FromJSON(r.Form.Get("filters")) - if err != nil { - return err - } - - nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filters: filter}) - if err != nil { - logrus.Errorf("Error getting nodes: %v", err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, nodes) -} - -func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - node, err := sr.backend.GetNode(vars["id"]) - if err != nil { - logrus.Errorf("Error getting node %s: %v", vars["id"], err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, node) -} - -func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var node types.NodeSpec - if err := json.NewDecoder(r.Body).Decode(&node); err != nil { - return err - } - - rawVersion := r.URL.Query().Get("version") - version, err := strconv.ParseUint(rawVersion, 10, 64) - if err != nil { - err := fmt.Errorf("invalid node version '%s': %v", rawVersion, err) - return errdefs.InvalidParameter(err) - } - - if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil { - logrus.Errorf("Error updating node %s: %v", vars["id"], err) - return err - } - return nil -} - -func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - force := httputils.BoolValue(r, "force") - - if err := sr.backend.RemoveNode(vars["id"], force); err != nil { - logrus.Errorf("Error removing node %s: %v", vars["id"], err) - return err - } - return nil -} - -func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - filter, err := filters.FromJSON(r.Form.Get("filters")) - if err != nil { - return err - } - - tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filters: filter}) - if err != nil { - logrus.Errorf("Error getting tasks: %v", err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, tasks) -} - -func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - task, err := sr.backend.GetTask(vars["id"]) - if err != nil { - logrus.Errorf("Error getting task %s: %v", vars["id"], err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, task) -} - -func (sr *swarmRouter) getSecrets(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - filters, err := filters.FromJSON(r.Form.Get("filters")) - if err != nil { - return err - } - - secrets, err := sr.backend.GetSecrets(basictypes.SecretListOptions{Filters: filters}) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, secrets) -} - -func (sr *swarmRouter) createSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var secret types.SecretSpec - if err := json.NewDecoder(r.Body).Decode(&secret); err != nil { - return err - } - version := httputils.VersionFromContext(ctx) - if secret.Templating != nil && versions.LessThan(version, "1.37") { - return errdefs.InvalidParameter(errors.Errorf("secret templating is not supported on the specified API version: %s", version)) - } - - id, err := sr.backend.CreateSecret(secret) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, &basictypes.SecretCreateResponse{ - ID: id, - }) -} - -func (sr *swarmRouter) removeSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := sr.backend.RemoveSecret(vars["id"]); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (sr *swarmRouter) getSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - secret, err := sr.backend.GetSecret(vars["id"]) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, secret) -} - -func (sr *swarmRouter) updateSecret(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var secret types.SecretSpec - if err := json.NewDecoder(r.Body).Decode(&secret); err != nil { - return errdefs.InvalidParameter(err) - } - - rawVersion := r.URL.Query().Get("version") - version, err := strconv.ParseUint(rawVersion, 10, 64) - if err != nil { - return errdefs.InvalidParameter(fmt.Errorf("invalid secret version")) - } - - id := vars["id"] - return sr.backend.UpdateSecret(id, version, secret) -} - -func (sr *swarmRouter) getConfigs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - filters, err := filters.FromJSON(r.Form.Get("filters")) - if err != nil { - return err - } - - configs, err := sr.backend.GetConfigs(basictypes.ConfigListOptions{Filters: filters}) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, configs) -} - -func (sr *swarmRouter) createConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var config types.ConfigSpec - if err := json.NewDecoder(r.Body).Decode(&config); err != nil { - return err - } - - version := httputils.VersionFromContext(ctx) - if config.Templating != nil && versions.LessThan(version, "1.37") { - return errdefs.InvalidParameter(errors.Errorf("config templating is not supported on the specified API version: %s", version)) - } - - id, err := sr.backend.CreateConfig(config) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, &basictypes.ConfigCreateResponse{ - ID: id, - }) -} - -func (sr *swarmRouter) removeConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := sr.backend.RemoveConfig(vars["id"]); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (sr *swarmRouter) getConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - config, err := sr.backend.GetConfig(vars["id"]) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, config) -} - -func (sr *swarmRouter) updateConfig(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var config types.ConfigSpec - if err := json.NewDecoder(r.Body).Decode(&config); err != nil { - return errdefs.InvalidParameter(err) - } - - rawVersion := r.URL.Query().Get("version") - version, err := strconv.ParseUint(rawVersion, 10, 64) - if err != nil { - return errdefs.InvalidParameter(fmt.Errorf("invalid config version")) - } - - id := vars["id"] - return sr.backend.UpdateConfig(id, version, config) -} diff --git a/vendor/github.com/docker/docker/api/server/router/swarm/helpers.go b/vendor/github.com/docker/docker/api/server/router/swarm/helpers.go deleted file mode 100644 index 1f57074f9..000000000 --- a/vendor/github.com/docker/docker/api/server/router/swarm/helpers.go +++ /dev/null @@ -1,66 +0,0 @@ -package swarm // import "github.com/docker/docker/api/server/router/swarm" - -import ( - "context" - "fmt" - "io" - "net/http" - - "github.com/docker/docker/api/server/httputils" - basictypes "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" -) - -// swarmLogs takes an http response, request, and selector, and writes the logs -// specified by the selector to the response -func (sr *swarmRouter) swarmLogs(ctx context.Context, w io.Writer, r *http.Request, selector *backend.LogSelector) error { - // Args are validated before the stream starts because when it starts we're - // sending HTTP 200 by writing an empty chunk of data to tell the client that - // daemon is going to stream. By sending this initial HTTP 200 we can't report - // any error after the stream starts (i.e. container not found, wrong parameters) - // with the appropriate status code. - stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") - if !(stdout || stderr) { - return fmt.Errorf("Bad parameters: you must choose at least one stream") - } - - // there is probably a neater way to manufacture the ContainerLogsOptions - // struct, probably in the caller, to eliminate the dependency on net/http - logsConfig := &basictypes.ContainerLogsOptions{ - Follow: httputils.BoolValue(r, "follow"), - Timestamps: httputils.BoolValue(r, "timestamps"), - Since: r.Form.Get("since"), - Tail: r.Form.Get("tail"), - ShowStdout: stdout, - ShowStderr: stderr, - Details: httputils.BoolValue(r, "details"), - } - - tty := false - // checking for whether logs are TTY involves iterating over every service - // and task. idk if there is a better way - for _, service := range selector.Services { - s, err := sr.backend.GetService(service, false) - if err != nil { - // maybe should return some context with this error? - return err - } - tty = (s.Spec.TaskTemplate.ContainerSpec != nil && s.Spec.TaskTemplate.ContainerSpec.TTY) || tty - } - for _, task := range selector.Tasks { - t, err := sr.backend.GetTask(task) - if err != nil { - // as above - return err - } - tty = t.Spec.ContainerSpec.TTY || tty - } - - msgs, err := sr.backend.ServiceLogs(ctx, selector, logsConfig) - if err != nil { - return err - } - - httputils.WriteLogStream(ctx, w, msgs, logsConfig, !tty) - return nil -} diff --git a/vendor/github.com/docker/docker/api/server/router/system/backend.go b/vendor/github.com/docker/docker/api/server/router/system/backend.go deleted file mode 100644 index f5d2d9810..000000000 --- a/vendor/github.com/docker/docker/api/server/router/system/backend.go +++ /dev/null @@ -1,28 +0,0 @@ -package system // import "github.com/docker/docker/api/server/router/system" - -import ( - "context" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// Backend is the methods that need to be implemented to provide -// system specific functionality. -type Backend interface { - SystemInfo() (*types.Info, error) - SystemVersion() types.Version - SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error) - SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{}) - UnsubscribeFromEvents(chan interface{}) - AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) -} - -// ClusterBackend is all the methods that need to be implemented -// to provide cluster system specific functionality. -type ClusterBackend interface { - Info() swarm.Info -} diff --git a/vendor/github.com/docker/docker/api/server/router/system/system.go b/vendor/github.com/docker/docker/api/server/router/system/system.go deleted file mode 100644 index ebb840a89..000000000 --- a/vendor/github.com/docker/docker/api/server/router/system/system.go +++ /dev/null @@ -1,41 +0,0 @@ -package system // import "github.com/docker/docker/api/server/router/system" - -import ( - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/builder/fscache" -) - -// systemRouter provides information about the Docker system overall. -// It gathers information about host, daemon and container events. -type systemRouter struct { - backend Backend - cluster ClusterBackend - routes []router.Route - builder *fscache.FSCache -} - -// NewRouter initializes a new system router -func NewRouter(b Backend, c ClusterBackend, fscache *fscache.FSCache) router.Router { - r := &systemRouter{ - backend: b, - cluster: c, - builder: fscache, - } - - r.routes = []router.Route{ - router.NewOptionsRoute("/{anyroute:.*}", optionsHandler), - router.NewGetRoute("/_ping", pingHandler), - router.NewGetRoute("/events", r.getEvents, router.WithCancel), - router.NewGetRoute("/info", r.getInfo), - router.NewGetRoute("/version", r.getVersion), - router.NewGetRoute("/system/df", r.getDiskUsage, router.WithCancel), - router.NewPostRoute("/auth", r.postAuth), - } - - return r -} - -// Routes returns all the API routes dedicated to the docker system -func (s *systemRouter) Routes() []router.Route { - return s.routes -} diff --git a/vendor/github.com/docker/docker/api/server/router/system/system_routes.go b/vendor/github.com/docker/docker/api/server/router/system/system_routes.go deleted file mode 100644 index 573496886..000000000 --- a/vendor/github.com/docker/docker/api/server/router/system/system_routes.go +++ /dev/null @@ -1,199 +0,0 @@ -package system // import "github.com/docker/docker/api/server/router/system" - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" - timetypes "github.com/docker/docker/api/types/time" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/pkg/ioutils" - pkgerrors "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.WriteHeader(http.StatusOK) - return nil -} - -func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - _, err := w.Write([]byte{'O', 'K'}) - return err -} - -func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - info, err := s.backend.SystemInfo() - if err != nil { - return err - } - if s.cluster != nil { - info.Swarm = s.cluster.Info() - } - - if versions.LessThan(httputils.VersionFromContext(ctx), "1.25") { - // TODO: handle this conversion in engine-api - type oldInfo struct { - *types.Info - ExecutionDriver string - } - old := &oldInfo{ - Info: info, - ExecutionDriver: "", - } - nameOnlySecurityOptions := []string{} - kvSecOpts, err := types.DecodeSecurityOptions(old.SecurityOptions) - if err != nil { - return err - } - for _, s := range kvSecOpts { - nameOnlySecurityOptions = append(nameOnlySecurityOptions, s.Name) - } - old.SecurityOptions = nameOnlySecurityOptions - return httputils.WriteJSON(w, http.StatusOK, old) - } - return httputils.WriteJSON(w, http.StatusOK, info) -} - -func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - info := s.backend.SystemVersion() - - return httputils.WriteJSON(w, http.StatusOK, info) -} - -func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - du, err := s.backend.SystemDiskUsage(ctx) - if err != nil { - return err - } - builderSize, err := s.builder.DiskUsage(ctx) - if err != nil { - return pkgerrors.Wrap(err, "error getting build cache usage") - } - du.BuilderSize = builderSize - - return httputils.WriteJSON(w, http.StatusOK, du) -} - -type invalidRequestError struct { - Err error -} - -func (e invalidRequestError) Error() string { - return e.Err.Error() -} - -func (e invalidRequestError) InvalidParameter() {} - -func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - since, err := eventTime(r.Form.Get("since")) - if err != nil { - return err - } - until, err := eventTime(r.Form.Get("until")) - if err != nil { - return err - } - - var ( - timeout <-chan time.Time - onlyPastEvents bool - ) - if !until.IsZero() { - if until.Before(since) { - return invalidRequestError{fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))} - } - - now := time.Now() - - onlyPastEvents = until.Before(now) - - if !onlyPastEvents { - dur := until.Sub(now) - timeout = time.After(dur) - } - } - - ef, err := filters.FromJSON(r.Form.Get("filters")) - if err != nil { - return err - } - - w.Header().Set("Content-Type", "application/json") - output := ioutils.NewWriteFlusher(w) - defer output.Close() - output.Flush() - - enc := json.NewEncoder(output) - - buffered, l := s.backend.SubscribeToEvents(since, until, ef) - defer s.backend.UnsubscribeFromEvents(l) - - for _, ev := range buffered { - if err := enc.Encode(ev); err != nil { - return err - } - } - - if onlyPastEvents { - return nil - } - - for { - select { - case ev := <-l: - jev, ok := ev.(events.Message) - if !ok { - logrus.Warnf("unexpected event message: %q", ev) - continue - } - if err := enc.Encode(jev); err != nil { - return err - } - case <-timeout: - return nil - case <-ctx.Done(): - logrus.Debug("Client context cancelled, stop sending events") - return nil - } - } -} - -func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var config *types.AuthConfig - err := json.NewDecoder(r.Body).Decode(&config) - r.Body.Close() - if err != nil { - return err - } - status, token, err := s.backend.AuthenticateToRegistry(ctx, config) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, ®istry.AuthenticateOKBody{ - Status: status, - IdentityToken: token, - }) -} - -func eventTime(formTime string) (time.Time, error) { - t, tNano, err := timetypes.ParseTimestamps(formTime, -1) - if err != nil { - return time.Time{}, err - } - if t == -1 { - return time.Time{}, nil - } - return time.Unix(t, tNano), nil -} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/backend.go b/vendor/github.com/docker/docker/api/server/router/volume/backend.go deleted file mode 100644 index 31558c178..000000000 --- a/vendor/github.com/docker/docker/api/server/router/volume/backend.go +++ /dev/null @@ -1,20 +0,0 @@ -package volume // import "github.com/docker/docker/api/server/router/volume" - -import ( - "context" - - "github.com/docker/docker/volume/service/opts" - // TODO return types need to be refactored into pkg - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// Backend is the methods that need to be implemented to provide -// volume specific functionality -type Backend interface { - List(ctx context.Context, filter filters.Args) ([]*types.Volume, []string, error) - Get(ctx context.Context, name string, opts ...opts.GetOption) (*types.Volume, error) - Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) - Remove(ctx context.Context, name string, opts ...opts.RemoveOption) error - Prune(ctx context.Context, pruneFilters filters.Args) (*types.VolumesPruneReport, error) -} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/volume.go b/vendor/github.com/docker/docker/api/server/router/volume/volume.go deleted file mode 100644 index 04f365e37..000000000 --- a/vendor/github.com/docker/docker/api/server/router/volume/volume.go +++ /dev/null @@ -1,36 +0,0 @@ -package volume // import "github.com/docker/docker/api/server/router/volume" - -import "github.com/docker/docker/api/server/router" - -// volumeRouter is a router to talk with the volumes controller -type volumeRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new volume router -func NewRouter(b Backend) router.Router { - r := &volumeRouter{ - backend: b, - } - r.initRoutes() - return r -} - -// Routes returns the available routes to the volumes controller -func (r *volumeRouter) Routes() []router.Route { - return r.routes -} - -func (r *volumeRouter) initRoutes() { - r.routes = []router.Route{ - // GET - router.NewGetRoute("/volumes", r.getVolumesList), - router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), - // POST - router.NewPostRoute("/volumes/create", r.postVolumesCreate), - router.NewPostRoute("/volumes/prune", r.postVolumesPrune, router.WithCancel), - // DELETE - router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), - } -} diff --git a/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go b/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go deleted file mode 100644 index e892d1a52..000000000 --- a/vendor/github.com/docker/docker/api/server/router/volume/volume_routes.go +++ /dev/null @@ -1,96 +0,0 @@ -package volume // import "github.com/docker/docker/api/server/router/volume" - -import ( - "context" - "encoding/json" - "io" - "net/http" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types/filters" - volumetypes "github.com/docker/docker/api/types/volume" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/volume/service/opts" - "github.com/pkg/errors" -) - -func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - filters, err := filters.FromJSON(r.Form.Get("filters")) - if err != nil { - return errdefs.InvalidParameter(errors.Wrap(err, "error reading volume filters")) - } - volumes, warnings, err := v.backend.List(ctx, filters) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, &volumetypes.VolumeListOKBody{Volumes: volumes, Warnings: warnings}) -} - -func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - volume, err := v.backend.Get(ctx, vars["name"], opts.WithGetResolveStatus) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, volume) -} - -func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - var req volumetypes.VolumeCreateBody - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - if err == io.EOF { - return errdefs.InvalidParameter(errors.New("got EOF while reading request body")) - } - return err - } - - volume, err := v.backend.Create(ctx, req.Name, req.Driver, opts.WithCreateOptions(req.DriverOpts), opts.WithCreateLabels(req.Labels)) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusCreated, volume) -} - -func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - force := httputils.BoolValue(r, "force") - if err := v.backend.Remove(ctx, vars["name"], opts.WithPurgeOnError(force)); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (v *volumeRouter) postVolumesPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - pruneFilters, err := filters.FromJSON(r.Form.Get("filters")) - if err != nil { - return err - } - - pruneReport, err := v.backend.Prune(ctx, pruneFilters) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, pruneReport) -} diff --git a/vendor/github.com/docker/docker/api/server/router_swapper.go b/vendor/github.com/docker/docker/api/server/router_swapper.go deleted file mode 100644 index e8087492c..000000000 --- a/vendor/github.com/docker/docker/api/server/router_swapper.go +++ /dev/null @@ -1,30 +0,0 @@ -package server // import "github.com/docker/docker/api/server" - -import ( - "net/http" - "sync" - - "github.com/gorilla/mux" -) - -// routerSwapper is an http.Handler that allows you to swap -// mux routers. -type routerSwapper struct { - mu sync.Mutex - router *mux.Router -} - -// Swap changes the old router with the new one. -func (rs *routerSwapper) Swap(newRouter *mux.Router) { - rs.mu.Lock() - rs.router = newRouter - rs.mu.Unlock() -} - -// ServeHTTP makes the routerSwapper to implement the http.Handler interface. -func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { - rs.mu.Lock() - router := rs.router - rs.mu.Unlock() - router.ServeHTTP(w, r) -} diff --git a/vendor/github.com/docker/docker/api/server/server.go b/vendor/github.com/docker/docker/api/server/server.go deleted file mode 100644 index 3874a56ce..000000000 --- a/vendor/github.com/docker/docker/api/server/server.go +++ /dev/null @@ -1,209 +0,0 @@ -package server // import "github.com/docker/docker/api/server" - -import ( - "context" - "crypto/tls" - "net" - "net/http" - "strings" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/middleware" - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/api/server/router/debug" - "github.com/docker/docker/dockerversion" - "github.com/gorilla/mux" - "github.com/sirupsen/logrus" -) - -// versionMatcher defines a variable matcher to be parsed by the router -// when a request is about to be served. -const versionMatcher = "/v{version:[0-9.]+}" - -// Config provides the configuration for the API server -type Config struct { - Logging bool - CorsHeaders string - Version string - SocketGroup string - TLSConfig *tls.Config -} - -// Server contains instance details for the server -type Server struct { - cfg *Config - servers []*HTTPServer - routers []router.Router - routerSwapper *routerSwapper - middlewares []middleware.Middleware -} - -// New returns a new instance of the server based on the specified configuration. -// It allocates resources which will be needed for ServeAPI(ports, unix-sockets). -func New(cfg *Config) *Server { - return &Server{ - cfg: cfg, - } -} - -// UseMiddleware appends a new middleware to the request chain. -// This needs to be called before the API routes are configured. -func (s *Server) UseMiddleware(m middleware.Middleware) { - s.middlewares = append(s.middlewares, m) -} - -// Accept sets a listener the server accepts connections into. -func (s *Server) Accept(addr string, listeners ...net.Listener) { - for _, listener := range listeners { - httpServer := &HTTPServer{ - srv: &http.Server{ - Addr: addr, - }, - l: listener, - } - s.servers = append(s.servers, httpServer) - } -} - -// Close closes servers and thus stop receiving requests -func (s *Server) Close() { - for _, srv := range s.servers { - if err := srv.Close(); err != nil { - logrus.Error(err) - } - } -} - -// serveAPI loops through all initialized servers and spawns goroutine -// with Serve method for each. It sets createMux() as Handler also. -func (s *Server) serveAPI() error { - var chErrors = make(chan error, len(s.servers)) - for _, srv := range s.servers { - srv.srv.Handler = s.routerSwapper - go func(srv *HTTPServer) { - var err error - logrus.Infof("API listen on %s", srv.l.Addr()) - if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { - err = nil - } - chErrors <- err - }(srv) - } - - for range s.servers { - err := <-chErrors - if err != nil { - return err - } - } - return nil -} - -// HTTPServer contains an instance of http server and the listener. -// srv *http.Server, contains configuration to create an http server and a mux router with all api end points. -// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. -type HTTPServer struct { - srv *http.Server - l net.Listener -} - -// Serve starts listening for inbound requests. -func (s *HTTPServer) Serve() error { - return s.srv.Serve(s.l) -} - -// Close closes the HTTPServer from listening for the inbound requests. -func (s *HTTPServer) Close() error { - return s.l.Close() -} - -func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - // Define the context that we'll pass around to share info - // like the docker-request-id. - // - // The 'context' will be used for global data that should - // apply to all requests. Data that is specific to the - // immediate function being called should still be passed - // as 'args' on the function call. - - // use intermediate variable to prevent "should not use basic type - // string as key in context.WithValue" golint errors - var ki interface{} = dockerversion.UAStringKey - ctx := context.WithValue(context.Background(), ki, r.Header.Get("User-Agent")) - handlerFunc := s.handlerWithGlobalMiddlewares(handler) - - vars := mux.Vars(r) - if vars == nil { - vars = make(map[string]string) - } - - if err := handlerFunc(ctx, w, r, vars); err != nil { - statusCode := httputils.GetHTTPErrorStatusCode(err) - if statusCode >= 500 { - logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) - } - httputils.MakeErrorHandler(err)(w, r) - } - } -} - -// InitRouter initializes the list of routers for the server. -// This method also enables the Go profiler if enableProfiler is true. -func (s *Server) InitRouter(routers ...router.Router) { - s.routers = append(s.routers, routers...) - - m := s.createMux() - s.routerSwapper = &routerSwapper{ - router: m, - } -} - -type pageNotFoundError struct{} - -func (pageNotFoundError) Error() string { - return "page not found" -} - -func (pageNotFoundError) NotFound() {} - -// createMux initializes the main router the server uses. -func (s *Server) createMux() *mux.Router { - m := mux.NewRouter() - - logrus.Debug("Registering routers") - for _, apiRouter := range s.routers { - for _, r := range apiRouter.Routes() { - f := s.makeHTTPHandler(r.Handler()) - - logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) - m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) - m.Path(r.Path()).Methods(r.Method()).Handler(f) - } - } - - debugRouter := debug.NewRouter() - s.routers = append(s.routers, debugRouter) - for _, r := range debugRouter.Routes() { - f := s.makeHTTPHandler(r.Handler()) - m.Path("/debug" + r.Path()).Handler(f) - } - - notFoundHandler := httputils.MakeErrorHandler(pageNotFoundError{}) - m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler) - m.NotFoundHandler = notFoundHandler - - return m -} - -// Wait blocks the server goroutine until it exits. -// It sends an error message if there is any error during -// the API execution. -func (s *Server) Wait(waitChan chan error) { - if err := s.serveAPI(); err != nil { - logrus.Errorf("ServeAPI error: %v", err) - waitChan <- err - return - } - waitChan <- nil -} diff --git a/vendor/github.com/docker/docker/api/templates/server/operation.gotmpl b/vendor/github.com/docker/docker/api/templates/server/operation.gotmpl deleted file mode 100644 index 8bed59d92..000000000 --- a/vendor/github.com/docker/docker/api/templates/server/operation.gotmpl +++ /dev/null @@ -1,26 +0,0 @@ -package {{ .Package }} - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -import ( - "net/http" - - context "golang.org/x/net/context" - - {{ range .DefaultImports }}{{ printf "%q" . }} - {{ end }} - {{ range $key, $value := .Imports }}{{ $key }} {{ printf "%q" $value }} - {{ end }} -) - - -{{ range .ExtraSchemas }} -// {{ .Name }} {{ comment .Description }} -// swagger:model {{ .Name }} -{{ template "schema" . }} -{{ end }} diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go deleted file mode 100644 index ddf15bb18..000000000 --- a/vendor/github.com/docker/docker/api/types/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/backend/backend.go b/vendor/github.com/docker/docker/api/types/backend/backend.go deleted file mode 100644 index ef1e669c3..000000000 --- a/vendor/github.com/docker/docker/api/types/backend/backend.go +++ /dev/null @@ -1,128 +0,0 @@ -// Package backend includes types to send information to server backends. -package backend // import "github.com/docker/docker/api/types/backend" - -import ( - "io" - "time" - - "github.com/docker/docker/api/types/container" -) - -// ContainerAttachConfig holds the streams to use when connecting to a container to view logs. -type ContainerAttachConfig struct { - GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error) - UseStdin bool - UseStdout bool - UseStderr bool - Logs bool - Stream bool - DetachKeys string - - // Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/stderr messages accordingly. - // TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change... - // HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream. - // Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately. - MuxStreams bool -} - -// PartialLogMetaData provides meta data for a partial log message. Messages -// exceeding a predefined size are split into chunks with this metadata. The -// expectation is for the logger endpoints to assemble the chunks using this -// metadata. -type PartialLogMetaData struct { - Last bool //true if this message is last of a partial - ID string // identifies group of messages comprising a single record - Ordinal int // ordering of message in partial group -} - -// LogMessage is datastructure that represents piece of output produced by some -// container. The Line member is a slice of an array whose contents can be -// changed after a log driver's Log() method returns. -// changes to this struct need to be reflect in the reset method in -// daemon/logger/logger.go -type LogMessage struct { - Line []byte - Source string - Timestamp time.Time - Attrs []LogAttr - PLogMetaData *PartialLogMetaData - - // Err is an error associated with a message. Completeness of a message - // with Err is not expected, tho it may be partially complete (fields may - // be missing, gibberish, or nil) - Err error -} - -// LogAttr is used to hold the extra attributes available in the log message. -type LogAttr struct { - Key string - Value string -} - -// LogSelector is a list of services and tasks that should be returned as part -// of a log stream. It is similar to swarmapi.LogSelector, with the difference -// that the names don't have to be resolved to IDs; this is mostly to avoid -// accidents later where a swarmapi LogSelector might have been incorrectly -// used verbatim (and to avoid the handler having to import swarmapi types) -type LogSelector struct { - Services []string - Tasks []string -} - -// ContainerStatsConfig holds information for configuring the runtime -// behavior of a backend.ContainerStats() call. -type ContainerStatsConfig struct { - Stream bool - OutStream io.Writer - Version string -} - -// ExecInspect holds information about a running process started -// with docker exec. -type ExecInspect struct { - ID string - Running bool - ExitCode *int - ProcessConfig *ExecProcessConfig - OpenStdin bool - OpenStderr bool - OpenStdout bool - CanRemove bool - ContainerID string - DetachKeys []byte - Pid int -} - -// ExecProcessConfig holds information about the exec process -// running on the host. -type ExecProcessConfig struct { - Tty bool `json:"tty"` - Entrypoint string `json:"entrypoint"` - Arguments []string `json:"arguments"` - Privileged *bool `json:"privileged,omitempty"` - User string `json:"user,omitempty"` -} - -// CreateImageConfig is the configuration for creating an image from a -// container. -type CreateImageConfig struct { - Repo string - Tag string - Pause bool - Author string - Comment string - Config *container.Config - Changes []string -} - -// CommitConfig is the configuration for creating an image as part of a build. -type CommitConfig struct { - Author string - Comment string - Config *container.Config - ContainerConfig *container.Config - ContainerID string - ContainerMountLabel string - ContainerOS string - ParentImageID string -} diff --git a/vendor/github.com/docker/docker/api/types/backend/build.go b/vendor/github.com/docker/docker/api/types/backend/build.go deleted file mode 100644 index 31e00ec6c..000000000 --- a/vendor/github.com/docker/docker/api/types/backend/build.go +++ /dev/null @@ -1,44 +0,0 @@ -package backend // import "github.com/docker/docker/api/types/backend" - -import ( - "io" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/streamformatter" -) - -// PullOption defines different modes for accessing images -type PullOption int - -const ( - // PullOptionNoPull only returns local images - PullOptionNoPull PullOption = iota - // PullOptionForcePull always tries to pull a ref from the registry first - PullOptionForcePull - // PullOptionPreferLocal uses local image if it exists, otherwise pulls - PullOptionPreferLocal -) - -// ProgressWriter is a data object to transport progress streams to the client -type ProgressWriter struct { - Output io.Writer - StdoutFormatter io.Writer - StderrFormatter io.Writer - AuxFormatter *streamformatter.AuxFormatter - ProgressReaderFunc func(io.ReadCloser) io.ReadCloser -} - -// BuildConfig is the configuration used by a BuildManager to start a build -type BuildConfig struct { - Source io.ReadCloser - ProgressWriter ProgressWriter - Options *types.ImageBuildOptions -} - -// GetImageAndLayerOptions are the options supported by GetImageAndReleasableLayer -type GetImageAndLayerOptions struct { - PullOption PullOption - AuthConfig map[string]types.AuthConfig - Output io.Writer - OS string -} diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go deleted file mode 100644 index bf3463b90..000000000 --- a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go +++ /dev/null @@ -1,23 +0,0 @@ -package blkiodev // import "github.com/docker/docker/api/types/blkiodev" - -import "fmt" - -// WeightDevice is a structure that holds device:weight pair -type WeightDevice struct { - Path string - Weight uint16 -} - -func (w *WeightDevice) String() string { - return fmt.Sprintf("%s:%d", w.Path, w.Weight) -} - -// ThrottleDevice is a structure that holds device:rate_per_second pair -type ThrottleDevice struct { - Path string - Rate uint64 -} - -func (t *ThrottleDevice) String() string { - return fmt.Sprintf("%s:%d", t.Path, t.Rate) -} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go deleted file mode 100644 index 3d2e057c9..000000000 --- a/vendor/github.com/docker/docker/api/types/client.go +++ /dev/null @@ -1,390 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "bufio" - "io" - "net" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/go-units" -) - -// CheckpointCreateOptions holds parameters to create a checkpoint from a container -type CheckpointCreateOptions struct { - CheckpointID string - CheckpointDir string - Exit bool -} - -// CheckpointListOptions holds parameters to list checkpoints for a container -type CheckpointListOptions struct { - CheckpointDir string -} - -// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container -type CheckpointDeleteOptions struct { - CheckpointID string - CheckpointDir string -} - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string - Logs bool -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - -// ContainerExecInspect holds information returned by exec inspect. -type ContainerExecInspect struct { - ExecID string - ContainerID string - Running bool - ExitCode int - Pid int -} - -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Quiet bool - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filters filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Until string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// ContainerStartOptions holds parameters to start containers. -type ContainerStartOptions struct { - CheckpointID string - CheckpointDir string -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool - CopyUIDGID bool -} - -// EventsOptions holds parameters to filter events with. -type EventsOptions struct { - Since string - Until string - Filters filters.Args -} - -// NetworkListOptions holds parameters to filter the list of networks with. -type NetworkListOptions struct { - Filters filters.Args -} - -// HijackedResponse holds connection information for a hijacked request. -type HijackedResponse struct { - Conn net.Conn - Reader *bufio.Reader -} - -// Close closes the hijacked connection and reader. -func (h *HijackedResponse) Close() { - h.Conn.Close() -} - -// CloseWriter is an interface that implements structs -// that close input streams to prevent from writing. -type CloseWriter interface { - CloseWrite() error -} - -// CloseWrite closes a readWriter for writing. -func (h *HijackedResponse) CloseWrite() error { - if conn, ok := h.Conn.(CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - NetworkMode string - ShmSize int64 - Dockerfile string - Ulimits []*units.Ulimit - // BuildArgs needs to be a *string instead of just a string so that - // we can tell the difference between "" (empty string) and no value - // at all (nil). See the parsing of buildArgs in - // api/server/router/build/build_routes.go for even more info. - BuildArgs map[string]*string - AuthConfigs map[string]AuthConfig - Context io.Reader - Labels map[string]string - // squash the resulting image's layers to the parent - // preserves the original image and creates a new one from the parent with all - // the changes applied to a single layer - Squash bool - // CacheFrom specifies images that are used for matching cache. Images - // specified here do not need to have a valid parent chain to match cache. - CacheFrom []string - SecurityOpt []string - ExtraHosts []string // List of extra hosts - Target string - SessionID string - Platform string -} - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// ImageCreateOptions holds information to create images. -type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. - Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. -} - -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. - SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. -} - -// ImageImportOptions holds information to import images from the client host. -type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image - Platform string // Platform is the target platform of the image -} - -// ImageListOptions holds parameters to filter the list of images with. -type ImageListOptions struct { - All bool - Filters filters.Args -} - -// ImageLoadResponse returns information to the client about a load process. -type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} - -// ImagePullOptions holds information to pull images. -type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc - Platform string -} - -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - -//ImagePushOptions holds information to push images. -type ImagePushOptions ImagePullOptions - -// ImageRemoveOptions holds parameters to remove images. -type ImageRemoveOptions struct { - Force bool - PruneChildren bool -} - -// ImageSearchOptions holds parameters to search images with. -type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args - Limit int -} - -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height uint - Width uint -} - -// NodeListOptions holds parameters to list nodes with. -type NodeListOptions struct { - Filters filters.Args -} - -// NodeRemoveOptions holds parameters to remove nodes with. -type NodeRemoveOptions struct { - Force bool -} - -// ServiceCreateOptions contains the options to use when creating a service. -type ServiceCreateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -type ServiceCreateResponse struct { - // ID is the ID of the created service. - ID string - // Warnings is a set of non-fatal warning messages to pass on to the user. - Warnings []string `json:",omitempty"` -} - -// Values for RegistryAuthFrom in ServiceUpdateOptions -const ( - RegistryAuthFromSpec = "spec" - RegistryAuthFromPreviousSpec = "previous-spec" -) - -// ServiceUpdateOptions contains the options to be used for updating services. -type ServiceUpdateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate - // into this field. While it does open API users up to racy writes, most - // users may not need that level of consistency in practice. - - // RegistryAuthFrom specifies where to find the registry authorization - // credentials if they are not given in EncodedRegistryAuth. Valid - // values are "spec" and "previous-spec". - RegistryAuthFrom string - - // Rollback indicates whether a server-side rollback should be - // performed. When this is set, the provided spec will be ignored. - // The valid values are "previous" and "none". An empty value is the - // same as "none". - Rollback string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceListOptions holds parameters to list services with. -type ServiceListOptions struct { - Filters filters.Args -} - -// ServiceInspectOptions holds parameters related to the "service inspect" -// operation. -type ServiceInspectOptions struct { - InsertDefaults bool -} - -// TaskListOptions holds parameters to list tasks with. -type TaskListOptions struct { - Filters filters.Args -} - -// PluginRemoveOptions holds parameters to remove plugins. -type PluginRemoveOptions struct { - Force bool -} - -// PluginEnableOptions holds parameters to enable plugins. -type PluginEnableOptions struct { - Timeout int -} - -// PluginDisableOptions holds parameters to disable plugins. -type PluginDisableOptions struct { - Force bool -} - -// PluginInstallOptions holds parameters to install a plugin. -type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - RemoteRef string // RemoteRef is the plugin name on the registry - PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(PluginPrivileges) (bool, error) - Args []string -} - -// SwarmUnlockKeyResponse contains the response for Engine API: -// GET /swarm/unlockkey -type SwarmUnlockKeyResponse struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// PluginCreateOptions hold all options to plugin create. -type PluginCreateOptions struct { - RepoName string -} diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go deleted file mode 100644 index f6537a27f..000000000 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ /dev/null @@ -1,57 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard error - AttachStdout bool // Attach the standard output - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Env []string // Environment variables - WorkingDir string // Working directory - Cmd []string // Execution commands and args -} - -// PluginRmConfig holds arguments for plugin remove. -type PluginRmConfig struct { - ForceRemove bool -} - -// PluginEnableConfig holds arguments for plugin enable -type PluginEnableConfig struct { - Timeout int -} - -// PluginDisableConfig holds arguments for plugin disable. -type PluginDisableConfig struct { - ForceDisable bool -} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go deleted file mode 100644 index 89ad08c23..000000000 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ /dev/null @@ -1,69 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "time" - - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// MinimumDuration puts a minimum on user configured duration. -// This is to prevent API error on time unit. For example, API may -// set 3 as healthcheck interval with intention of 3 seconds, but -// Docker interprets it as 3 nanoseconds. -const MinimumDuration = 1 * time.Millisecond - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go deleted file mode 100644 index c909d6ca3..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_changes.go +++ /dev/null @@ -1,21 +0,0 @@ -package container - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerChangeResponseItem change item in response to ContainerChanges operation -// swagger:model ContainerChangeResponseItem -type ContainerChangeResponseItem struct { - - // Kind of change - // Required: true - Kind uint8 `json:"Kind"` - - // Path to file that has changed - // Required: true - Path string `json:"Path"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go deleted file mode 100644 index 49efa0f2c..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_create.go +++ /dev/null @@ -1,21 +0,0 @@ -package container - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerCreateCreatedBody OK response to ContainerCreate operation -// swagger:model ContainerCreateCreatedBody -type ContainerCreateCreatedBody struct { - - // The ID of the created container - // Required: true - ID string `json:"Id"` - - // Warnings encountered when creating the container - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go deleted file mode 100644 index ba41edcf3..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_top.go +++ /dev/null @@ -1,21 +0,0 @@ -package container - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerTopOKBody OK response to ContainerTop operation -// swagger:model ContainerTopOKBody -type ContainerTopOKBody struct { - - // Each process running in the container, where each is process is an array of values corresponding to the titles - // Required: true - Processes [][]string `json:"Processes"` - - // The ps column titles - // Required: true - Titles []string `json:"Titles"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go deleted file mode 100644 index 7630ae54c..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ /dev/null @@ -1,17 +0,0 @@ -package container - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerUpdateOKBody OK response to ContainerUpdate operation -// swagger:model ContainerUpdateOKBody -type ContainerUpdateOKBody struct { - - // warnings - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go deleted file mode 100644 index 9e3910a6b..000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ /dev/null @@ -1,29 +0,0 @@ -package container - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerWaitOKBodyError container waiting error, if any -// swagger:model ContainerWaitOKBodyError -type ContainerWaitOKBodyError struct { - - // Details of an error - Message string `json:"Message,omitempty"` -} - -// ContainerWaitOKBody OK response to ContainerWait operation -// swagger:model ContainerWaitOKBody -type ContainerWaitOKBody struct { - - // error - // Required: true - Error *ContainerWaitOKBodyError `json:"Error"` - - // Exit code of the container - // Required: true - StatusCode int64 `json:"StatusCode"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go deleted file mode 100644 index 4ef26fa6c..000000000 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ /dev/null @@ -1,412 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -import ( - "strings" - - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" - "github.com/docker/go-units" -) - -// Isolation represents the isolation technology of a container. The supported -// values are platform specific -type Isolation string - -// IsDefault indicates the default isolation technology of a container. On Linux this -// is the native driver. On Windows, this is a Windows Server Container. -func (i Isolation) IsDefault() bool { - return strings.ToLower(string(i)) == "default" || string(i) == "" -} - -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - return strings.ToLower(string(i)) == "process" -} - -const ( - // IsolationEmpty is unspecified (same behavior as default) - IsolationEmpty = Isolation("") - // IsolationDefault is the default isolation mode on current daemon - IsolationDefault = Isolation("default") - // IsolationProcess is process isolation mode - IsolationProcess = Isolation("process") - // IsolationHyperV is HyperV isolation mode - IsolationHyperV = Isolation("hyperv") -) - -// IpcMode represents the container ipc stack. -type IpcMode string - -// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. -func (n IpcMode) IsPrivate() bool { - return n == "private" -} - -// IsHost indicates whether the container shares the host's ipc namespace. -func (n IpcMode) IsHost() bool { - return n == "host" -} - -// IsShareable indicates whether the container's ipc namespace can be shared with another container. -func (n IpcMode) IsShareable() bool { - return n == "shareable" -} - -// IsContainer indicates whether the container uses another container's ipc namespace. -func (n IpcMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// IsNone indicates whether container IpcMode is set to "none". -func (n IpcMode) IsNone() bool { - return n == "none" -} - -// IsEmpty indicates whether container IpcMode is empty -func (n IpcMode) IsEmpty() bool { - return n == "" -} - -// Valid indicates whether the ipc mode is valid. -func (n IpcMode) Valid() bool { - return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() -} - -// Container returns the name of the container ipc stack is going to be used. -func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 && parts[0] == "container" { - return parts[1] - } - return "" -} - -// NetworkMode represents the container network stack. -type NetworkMode string - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} - -// UsernsMode represents userns mode in the container. -type UsernsMode string - -// IsHost indicates whether the container uses the host's userns. -func (n UsernsMode) IsHost() bool { - return n == "host" -} - -// IsPrivate indicates whether the container uses the a private userns. -func (n UsernsMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// Valid indicates whether the userns is valid. -func (n UsernsMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// CgroupSpec represents the cgroup to use for the container. -type CgroupSpec string - -// IsContainer indicates whether the container is using another container cgroup -func (c CgroupSpec) IsContainer() bool { - parts := strings.SplitN(string(c), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the cgroup spec is valid. -func (c CgroupSpec) Valid() bool { - return c.IsContainer() || c == "" -} - -// Container returns the name of the container whose cgroup will be used. -func (c CgroupSpec) Container() string { - parts := strings.SplitN(string(c), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UTSMode represents the UTS namespace of the container. -type UTSMode string - -// IsPrivate indicates whether the container uses its private UTS namespace. -func (n UTSMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// IsHost indicates whether the container uses the host's UTS namespace. -func (n UTSMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the UTS namespace is valid. -func (n UTSMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// PidMode represents the pid namespace of the container. -type PidMode string - -// IsPrivate indicates whether the container uses its own new pid namespace. -func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's pid namespace. -func (n PidMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's pid namespace. -func (n PidMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the pid namespace is valid. -func (n PidMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container whose pid namespace is going to be used. -func (n PidMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// DeviceMapping represents the device mapping between the host and the container. -type DeviceMapping struct { - PathOnHost string - PathInContainer string - CgroupPermissions string -} - -// RestartPolicy represents the restart policies of the container. -type RestartPolicy struct { - Name string - MaximumRetryCount int -} - -// IsNone indicates whether the container has the "no" restart policy. -// This means the container will not automatically restart when exiting. -func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" || rp.Name == "" -} - -// IsAlways indicates whether the container has the "always" restart policy. -// This means the container will automatically restart regardless of the exit status. -func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" -} - -// IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the container will automatically restart of exiting with a non-zero exit status. -func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" -} - -// IsUnlessStopped indicates whether the container has the -// "unless-stopped" restart policy. This means the container will -// automatically restart unless user has put it to stopped state. -func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" -} - -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - -// LogMode is a type to define the available modes for logging -// These modes affect how logs are handled when log messages start piling up. -type LogMode string - -// Available logging modes -const ( - LogModeUnset = "" - LogModeBlocking LogMode = "blocking" - LogModeNonBlock LogMode = "non-blocking" -) - -// LogConfig represents the logging configuration of the container. -type LogConfig struct { - Type string - Config map[string]string -} - -// Resources contains container's resources (cgroups config, ulimits...) -type Resources struct { - // Applicable to all platforms - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - Memory int64 // Memory limit (in bytes) - NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. - - // Applicable to UNIX platforms - CgroupParent string // Parent cgroup. - BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) - BlkioWeightDevice []*blkiodev.WeightDevice - BlkioDeviceReadBps []*blkiodev.ThrottleDevice - BlkioDeviceWriteBps []*blkiodev.ThrottleDevice - BlkioDeviceReadIOps []*blkiodev.ThrottleDevice - BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period - CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - Devices []DeviceMapping // List of devices to map inside the container - DeviceCgroupRules []string // List of rule to be added to the device cgroup - DiskQuota int64 // Disk limit (in bytes) - KernelMemory int64 // Kernel memory limit (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit int64 // Setting pids limit for a container - Ulimits []*units.Ulimit // List of ulimits to be set in the container - - // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive -} - -// UpdateConfig holds the mutable attributes of a Container. -// Those attributes can be updated at runtime. -type UpdateConfig struct { - // Contains container's resources (cgroups, ulimits) - Resources - RestartPolicy RestartPolicy -} - -// HostConfig the non-portable Config structure of a container. -// Here, "non-portable" means "dependent of the host we are running on". -// Portable information *should* appear in Config. -type HostConfig struct { - // Applicable to all platforms - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - LogConfig LogConfig // Configuration of the logs for this container - NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - RestartPolicy RestartPolicy // Restart policy to be used for the container - AutoRemove bool // Automatically remove container when it exits - VolumeDriver string // Name of the volume driver used to mount volumes - VolumesFrom []string // List of volumes to take from other container - - // Applicable to UNIX platforms - CapAdd strslice.StrSlice // List of kernel capabilities to add to the container - CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - GroupAdd []string // List of additional groups that the container process will run as - IpcMode IpcMode // IPC namespace to use for the container - Cgroup CgroupSpec // Cgroup to use for the container - Links []string // List of links (in the name:alias form) - OomScoreAdj int // Container preference for OOM-killing - PidMode PidMode // PID namespace to use for the container - Privileged bool // Is the container in privileged mode - PublishAllPorts bool // Should docker publish all exposed port for the container - ReadonlyRootfs bool // Is the container root filesystem in read-only - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. - Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container - UTSMode UTSMode // UTS namespace to use for the container - UsernsMode UsernsMode // The user namespace to use for the container - ShmSize int64 // Total shm memory usage - Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container - Runtime string `json:",omitempty"` // Runtime to use with this container - - // Applicable to Windows - ConsoleSize [2]uint // Initial console size (height,width) - Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) - - // Contains container's resources (cgroups, ulimits) - Resources - - // Mounts specs used by the container - Mounts []mount.Mount `json:",omitempty"` - - // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) - MaskedPaths []string - - // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) - ReadonlyPaths []string - - // Run a custom init inside the container, if null, use the daemon's configured settings - Init *bool `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go deleted file mode 100644 index cf6fdf440..000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build !windows - -package container // import "github.com/docker/docker/api/types/container" - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return "bridge" - } else if n.IsHost() { - return "host" - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return "none" - } else if n.IsDefault() { - return "default" - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - -// IsBridge indicates whether container uses the bridge network stack -func (n NetworkMode) IsBridge() bool { - return n == "bridge" -} - -// IsHost indicates whether container uses the host network stack. -func (n NetworkMode) IsHost() bool { - return n == "host" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() -} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go deleted file mode 100644 index 99f803a5b..000000000 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ /dev/null @@ -1,40 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// IsBridge indicates whether container uses the bridge network stack -// in windows it is given the name NAT -func (n NetworkMode) IsBridge() bool { - return n == "nat" -} - -// IsHost indicates whether container uses the host network stack. -// returns false as this is not supported by windows -func (n NetworkMode) IsHost() bool { - return false -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() -} - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() || i.IsHyperV() || i.IsProcess() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsDefault() { - return "default" - } else if n.IsBridge() { - return "nat" - } else if n.IsNone() { - return "none" - } else if n.IsContainer() { - return "container" - } else if n.IsUserDefined() { - return n.UserDefined() - } - - return "" -} diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go deleted file mode 100644 index cd8311f99..000000000 --- a/vendor/github.com/docker/docker/api/types/container/waitcondition.go +++ /dev/null @@ -1,22 +0,0 @@ -package container // import "github.com/docker/docker/api/types/container" - -// WaitCondition is a type used to specify a container state for which -// to wait. -type WaitCondition string - -// Possible WaitCondition Values. -// -// WaitConditionNotRunning (default) is used to wait for any of the non-running -// states: "created", "exited", "dead", "removing", or "removed". -// -// WaitConditionNextExit is used to wait for the next time the state changes -// to a non-running state. If the state is currently "created" or "exited", -// this would cause Wait() to block until either the container runs and exits -// or is removed. -// -// WaitConditionRemoved is used to wait for the container to be removed. -const ( - WaitConditionNotRunning WaitCondition = "not-running" - WaitConditionNextExit WaitCondition = "next-exit" - WaitConditionRemoved WaitCondition = "removed" -) diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go deleted file mode 100644 index dc942d9d9..000000000 --- a/vendor/github.com/docker/docker/api/types/error_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ErrorResponse Represents an error. -// swagger:model ErrorResponse -type ErrorResponse struct { - - // The error message. - // Required: true - Message string `json:"message"` -} diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go deleted file mode 100644 index 027c6edb7..000000000 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ /dev/null @@ -1,52 +0,0 @@ -package events // import "github.com/docker/docker/api/types/events" - -const ( - // ContainerEventType is the event type that containers generate - ContainerEventType = "container" - // DaemonEventType is the event type that daemon generate - DaemonEventType = "daemon" - // ImageEventType is the event type that images generate - ImageEventType = "image" - // NetworkEventType is the event type that networks generate - NetworkEventType = "network" - // PluginEventType is the event type that plugins generate - PluginEventType = "plugin" - // VolumeEventType is the event type that volumes generate - VolumeEventType = "volume" - // ServiceEventType is the event type that services generate - ServiceEventType = "service" - // NodeEventType is the event type that nodes generate - NodeEventType = "node" - // SecretEventType is the event type that secrets generate - SecretEventType = "secret" - // ConfigEventType is the event type that configs generate - ConfigEventType = "config" -) - -// Actor describes something that generates events, -// like a container, or a network, or a volume. -// It has a defined name and a set or attributes. -// The container attributes are its labels, other actors -// can generate these attributes from other properties. -type Actor struct { - ID string - Attributes map[string]string -} - -// Message represents the information an event contains -type Message struct { - // Deprecated information from JSONMessage. - // With data only in container events. - Status string `json:"status,omitempty"` - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - - Type string - Action string - Actor Actor - // Engine events are local scope. Cluster events are swarm scope. - Scope string `json:"scope,omitempty"` - - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go deleted file mode 100644 index a41e3d8d9..000000000 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ /dev/null @@ -1,350 +0,0 @@ -/*Package filters provides tools for encoding a mapping of keys to a set of -multiple values. -*/ -package filters // import "github.com/docker/docker/api/types/filters" - -import ( - "encoding/json" - "errors" - "regexp" - "strings" - - "github.com/docker/docker/api/types/versions" -) - -// Args stores a mapping of keys to a set of multiple values. -type Args struct { - fields map[string]map[string]bool -} - -// KeyValuePair are used to initialize a new Args -type KeyValuePair struct { - Key string - Value string -} - -// Arg creates a new KeyValuePair for initializing Args -func Arg(key, value string) KeyValuePair { - return KeyValuePair{Key: key, Value: value} -} - -// NewArgs returns a new Args populated with the initial args -func NewArgs(initialArgs ...KeyValuePair) Args { - args := Args{fields: map[string]map[string]bool{}} - for _, arg := range initialArgs { - args.Add(arg.Key, arg.Value) - } - return args -} - -// ParseFlag parses a key=value string and adds it to an Args. -// -// Deprecated: Use Args.Add() -func ParseFlag(arg string, prev Args) (Args, error) { - filters := prev - if len(arg) == 0 { - return filters, nil - } - - if !strings.Contains(arg, "=") { - return filters, ErrBadFormat - } - - f := strings.SplitN(arg, "=", 2) - - name := strings.ToLower(strings.TrimSpace(f[0])) - value := strings.TrimSpace(f[1]) - - filters.Add(name, value) - - return filters, nil -} - -// ErrBadFormat is an error returned when a filter is not in the form key=value -// -// Deprecated: this error will be removed in a future version -var ErrBadFormat = errors.New("bad format of filter (expected name=value)") - -// ToParam encodes the Args as args JSON encoded string -// -// Deprecated: use ToJSON -func ToParam(a Args) (string, error) { - return ToJSON(a) -} - -// MarshalJSON returns a JSON byte representation of the Args -func (args Args) MarshalJSON() ([]byte, error) { - if len(args.fields) == 0 { - return []byte{}, nil - } - return json.Marshal(args.fields) -} - -// ToJSON returns the Args as a JSON encoded string -func ToJSON(a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - buf, err := json.Marshal(a) - return string(buf), err -} - -// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 -// then the encoded format will use an older legacy format where the values are a -// list of strings, instead of a set. -// -// Deprecated: Use ToJSON -func ToParamWithVersion(version string, a Args) (string, error) { - if a.Len() == 0 { - return "", nil - } - - if version != "" && versions.LessThan(version, "1.22") { - buf, err := json.Marshal(convertArgsToSlice(a.fields)) - return string(buf), err - } - - return ToJSON(a) -} - -// FromParam decodes a JSON encoded string into Args -// -// Deprecated: use FromJSON -func FromParam(p string) (Args, error) { - return FromJSON(p) -} - -// FromJSON decodes a JSON encoded string into Args -func FromJSON(p string) (Args, error) { - args := NewArgs() - - if p == "" { - return args, nil - } - - raw := []byte(p) - err := json.Unmarshal(raw, &args) - if err == nil { - return args, nil - } - - // Fallback to parsing arguments in the legacy slice format - deprecated := map[string][]string{} - if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, err - } - - args.fields = deprecatedArgs(deprecated) - return args, nil -} - -// UnmarshalJSON populates the Args from JSON encode bytes -func (args Args) UnmarshalJSON(raw []byte) error { - if len(raw) == 0 { - return nil - } - return json.Unmarshal(raw, &args.fields) -} - -// Get returns the list of values associated with the key -func (args Args) Get(key string) []string { - values := args.fields[key] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add a new value to the set of values -func (args Args) Add(key, value string) { - if _, ok := args.fields[key]; ok { - args.fields[key][value] = true - } else { - args.fields[key] = map[string]bool{value: true} - } -} - -// Del removes a value from the set -func (args Args) Del(key, value string) { - if _, ok := args.fields[key]; ok { - delete(args.fields[key], value) - if len(args.fields[key]) == 0 { - delete(args.fields, key) - } - } -} - -// Len returns the number of keys in the mapping -func (args Args) Len() int { - return len(args.fields) -} - -// MatchKVList returns true if all the pairs in sources exist as key=value -// pairs in the mapping at key, or if there are no values at key. -func (args Args) MatchKVList(key string, sources map[string]string) bool { - fieldValues := args.fields[key] - - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if len(sources) == 0 { - return false - } - - for value := range fieldValues { - testKV := strings.SplitN(value, "=", 2) - - v, ok := sources[testKV[0]] - if !ok { - return false - } - if len(testKV) == 2 && testKV[1] != v { - return false - } - } - - return true -} - -// Match returns true if any of the values at key match the source string -func (args Args) Match(field, source string) bool { - if args.ExactMatch(field, source) { - return true - } - - fieldValues := args.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// ExactMatch returns true if the source matches exactly one of the values. -func (args Args) ExactMatch(key, source string) bool { - fieldValues, ok := args.fields[key] - //do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// UniqueExactMatch returns true if there is only one value and the source -// matches exactly the value. -func (args Args) UniqueExactMatch(key, source string) bool { - fieldValues := args.fields[key] - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - if len(args.fields[key]) != 1 { - return false - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// FuzzyMatch returns true if the source matches exactly one value, or the -// source has one of the values as a prefix. -func (args Args) FuzzyMatch(key, source string) bool { - if args.ExactMatch(key, source) { - return true - } - - fieldValues := args.fields[key] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Include returns true if the key exists in the mapping -// -// Deprecated: use Contains -func (args Args) Include(field string) bool { - _, ok := args.fields[field] - return ok -} - -// Contains returns true if the key exists in the mapping -func (args Args) Contains(field string) bool { - _, ok := args.fields[field] - return ok -} - -type invalidFilter string - -func (e invalidFilter) Error() string { - return "Invalid filter '" + string(e) + "'" -} - -func (invalidFilter) InvalidParameter() {} - -// Validate compared the set of accepted keys against the keys in the mapping. -// An error is returned if any mapping keys are not in the accepted set. -func (args Args) Validate(accepted map[string]bool) error { - for name := range args.fields { - if !accepted[name] { - return invalidFilter(name) - } - } - return nil -} - -// WalkValues iterates over the list of values for a key in the mapping and calls -// op() for each value. If op returns an error the iteration stops and the -// error is returned. -func (args Args) WalkValues(field string, op func(value string) error) error { - if _, ok := args.fields[field]; !ok { - return nil - } - for v := range args.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go deleted file mode 100644 index 4d9bf1c62..000000000 --- a/vendor/github.com/docker/docker/api/types/graph_driver_data.go +++ /dev/null @@ -1,17 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// GraphDriverData Information about a container's graph driver. -// swagger:model GraphDriverData -type GraphDriverData struct { - - // data - // Required: true - Data map[string]string `json:"Data"` - - // name - // Required: true - Name string `json:"Name"` -} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go deleted file mode 100644 index 7592d2f8b..000000000 --- a/vendor/github.com/docker/docker/api/types/id_response.go +++ /dev/null @@ -1,13 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// IDResponse Response to an API call that returns just an Id -// swagger:model IdResponse -type IDResponse struct { - - // The id of the newly created object. - // Required: true - ID string `json:"Id"` -} diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go deleted file mode 100644 index d6b354bcd..000000000 --- a/vendor/github.com/docker/docker/api/types/image/image_history.go +++ /dev/null @@ -1,37 +0,0 @@ -package image - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// HistoryResponseItem individual image layer information in response to ImageHistory operation -// swagger:model HistoryResponseItem -type HistoryResponseItem struct { - - // comment - // Required: true - Comment string `json:"Comment"` - - // created - // Required: true - Created int64 `json:"Created"` - - // created by - // Required: true - CreatedBy string `json:"CreatedBy"` - - // Id - // Required: true - ID string `json:"Id"` - - // size - // Required: true - Size int64 `json:"Size"` - - // tags - // Required: true - Tags []string `json:"Tags"` -} diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go deleted file mode 100644 index b9a65a0d8..000000000 --- a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go +++ /dev/null @@ -1,15 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageDeleteResponseItem image delete response item -// swagger:model ImageDeleteResponseItem -type ImageDeleteResponseItem struct { - - // The image ID of an image that was deleted - Deleted string `json:"Deleted,omitempty"` - - // The image ID of an image that was untagged - Untagged string `json:"Untagged,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go deleted file mode 100644 index e145b3dcf..000000000 --- a/vendor/github.com/docker/docker/api/types/image_summary.go +++ /dev/null @@ -1,49 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageSummary image summary -// swagger:model ImageSummary -type ImageSummary struct { - - // containers - // Required: true - Containers int64 `json:"Containers"` - - // created - // Required: true - Created int64 `json:"Created"` - - // Id - // Required: true - ID string `json:"Id"` - - // labels - // Required: true - Labels map[string]string `json:"Labels"` - - // parent Id - // Required: true - ParentID string `json:"ParentId"` - - // repo digests - // Required: true - RepoDigests []string `json:"RepoDigests"` - - // repo tags - // Required: true - RepoTags []string `json:"RepoTags"` - - // shared size - // Required: true - SharedSize int64 `json:"SharedSize"` - - // size - // Required: true - Size int64 `json:"Size"` - - // virtual size - // Required: true - VirtualSize int64 `json:"VirtualSize"` -} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go deleted file mode 100644 index 3fef974df..000000000 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ /dev/null @@ -1,130 +0,0 @@ -package mount // import "github.com/docker/docker/api/types/mount" - -import ( - "os" -) - -// Type represents the type of a mount. -type Type string - -// Type constants -const ( - // TypeBind is the type for mounting host dir - TypeBind Type = "bind" - // TypeVolume is the type for remote storage volumes - TypeVolume Type = "volume" - // TypeTmpfs is the type for mounting tmpfs - TypeTmpfs Type = "tmpfs" - // TypeNamedPipe is the type for mounting Windows named pipes - TypeNamedPipe Type = "npipe" -) - -// Mount represents a mount (volume). -type Mount struct { - Type Type `json:",omitempty"` - // Source specifies the name of the mount. Depending on mount type, this - // may be a volume name or a host path, or even ignored. - // Source is not supported for tmpfs (must be an empty value) - Source string `json:",omitempty"` - Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Consistency Consistency `json:",omitempty"` - - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` - TmpfsOptions *TmpfsOptions `json:",omitempty"` -} - -// Propagation represents the propagation of a mount. -type Propagation string - -const ( - // PropagationRPrivate RPRIVATE - PropagationRPrivate Propagation = "rprivate" - // PropagationPrivate PRIVATE - PropagationPrivate Propagation = "private" - // PropagationRShared RSHARED - PropagationRShared Propagation = "rshared" - // PropagationShared SHARED - PropagationShared Propagation = "shared" - // PropagationRSlave RSLAVE - PropagationRSlave Propagation = "rslave" - // PropagationSlave SLAVE - PropagationSlave Propagation = "slave" -) - -// Propagations is the list of all valid mount propagations -var Propagations = []Propagation{ - PropagationRPrivate, - PropagationPrivate, - PropagationRShared, - PropagationShared, - PropagationRSlave, - PropagationSlave, -} - -// Consistency represents the consistency requirements of a mount. -type Consistency string - -const ( - // ConsistencyFull guarantees bind mount-like consistency - ConsistencyFull Consistency = "consistent" - // ConsistencyCached mounts can cache read data and FS structure - ConsistencyCached Consistency = "cached" - // ConsistencyDelegated mounts can cache read and written data and structure - ConsistencyDelegated Consistency = "delegated" - // ConsistencyDefault provides "consistent" behavior unless overridden - ConsistencyDefault Consistency = "default" -) - -// BindOptions defines options specific to mounts of type "bind". -type BindOptions struct { - Propagation Propagation `json:",omitempty"` -} - -// VolumeOptions represents the options for a mount of type volume. -type VolumeOptions struct { - NoCopy bool `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - DriverConfig *Driver `json:",omitempty"` -} - -// Driver represents a volume driver. -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TmpfsOptions defines options specific to mounts of type "tmpfs". -type TmpfsOptions struct { - // Size sets the size of the tmpfs, in bytes. - // - // This will be converted to an operating system specific value - // depending on the host. For example, on linux, it will be converted to - // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with - // docker, uses a straight byte value. - // - // Percentages are not supported. - SizeBytes int64 `json:",omitempty"` - // Mode of the tmpfs upon creation - Mode os.FileMode `json:",omitempty"` - - // TODO(stevvooe): There are several more tmpfs flags, specified in the - // daemon, that are accepted. Only the most basic are added for now. - // - // From docker/docker/pkg/mount/flags.go: - // - // var validFlags = map[string]bool{ - // "": true, - // "size": true, X - // "mode": true, X - // "uid": true, - // "gid": true, - // "nr_inodes": true, - // "nr_blocks": true, - // "mpol": true, - // } - // - // Some of these may be straightforward to add, but others, such as - // uid/gid have implications in a clustered system. -} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go deleted file mode 100644 index 761d0b34f..000000000 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ /dev/null @@ -1,108 +0,0 @@ -package network // import "github.com/docker/docker/api/types/network" - -// Address represents an IP address -type Address struct { - Addr string - PrefixLen int -} - -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string //Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` -} - -// Copy makes a copy of the endpoint ipam config -func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { - cfgCopy := *cfg - cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) - cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) - return &cfgCopy -} - -// PeerInfo represents one peer of an overlay network -type PeerInfo struct { - Name string - IP string -} - -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string - DriverOpts map[string]string -} - -// Task carries the information about one backend task -type Task struct { - Name string - EndpointID string - EndpointIP string - Info map[string]string -} - -// ServiceInfo represents service parameters with the list of service's tasks -type ServiceInfo struct { - VIP string - Ports []string - LocalLBIndex int - Tasks []Task -} - -// Copy makes a deep copy of `EndpointSettings` -func (es *EndpointSettings) Copy() *EndpointSettings { - epCopy := *es - if es.IPAMConfig != nil { - epCopy.IPAMConfig = es.IPAMConfig.Copy() - } - - if es.Links != nil { - links := make([]string, 0, len(es.Links)) - epCopy.Links = append(links, es.Links...) - } - - if es.Aliases != nil { - aliases := make([]string, 0, len(es.Aliases)) - epCopy.Aliases = append(aliases, es.Aliases...) - } - return &epCopy -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network -} - -// ConfigReference specifies the source which provides a network's configuration -type ConfigReference struct { - Network string -} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go deleted file mode 100644 index abae48b9a..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin.go +++ /dev/null @@ -1,203 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Plugin A plugin for the Engine API -// swagger:model Plugin -type Plugin struct { - - // config - // Required: true - Config PluginConfig `json:"Config"` - - // True if the plugin is running. False if the plugin is not running, only installed. - // Required: true - Enabled bool `json:"Enabled"` - - // Id - ID string `json:"Id,omitempty"` - - // name - // Required: true - Name string `json:"Name"` - - // plugin remote reference used to push/pull the plugin - PluginReference string `json:"PluginReference,omitempty"` - - // settings - // Required: true - Settings PluginSettings `json:"Settings"` -} - -// PluginConfig The config of a plugin. -// swagger:model PluginConfig -type PluginConfig struct { - - // args - // Required: true - Args PluginConfigArgs `json:"Args"` - - // description - // Required: true - Description string `json:"Description"` - - // Docker Version used to create the plugin - DockerVersion string `json:"DockerVersion,omitempty"` - - // documentation - // Required: true - Documentation string `json:"Documentation"` - - // entrypoint - // Required: true - Entrypoint []string `json:"Entrypoint"` - - // env - // Required: true - Env []PluginEnv `json:"Env"` - - // interface - // Required: true - Interface PluginConfigInterface `json:"Interface"` - - // ipc host - // Required: true - IpcHost bool `json:"IpcHost"` - - // linux - // Required: true - Linux PluginConfigLinux `json:"Linux"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` - - // network - // Required: true - Network PluginConfigNetwork `json:"Network"` - - // pid host - // Required: true - PidHost bool `json:"PidHost"` - - // propagated mount - // Required: true - PropagatedMount string `json:"PropagatedMount"` - - // user - User PluginConfigUser `json:"User,omitempty"` - - // work dir - // Required: true - WorkDir string `json:"WorkDir"` - - // rootfs - Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` -} - -// PluginConfigArgs plugin config args -// swagger:model PluginConfigArgs -type PluginConfigArgs struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value []string `json:"Value"` -} - -// PluginConfigInterface The interface between Docker and the plugin -// swagger:model PluginConfigInterface -type PluginConfigInterface struct { - - // Protocol to use for clients connecting to the plugin. - ProtocolScheme string `json:"ProtocolScheme,omitempty"` - - // socket - // Required: true - Socket string `json:"Socket"` - - // types - // Required: true - Types []PluginInterfaceType `json:"Types"` -} - -// PluginConfigLinux plugin config linux -// swagger:model PluginConfigLinux -type PluginConfigLinux struct { - - // allow all devices - // Required: true - AllowAllDevices bool `json:"AllowAllDevices"` - - // capabilities - // Required: true - Capabilities []string `json:"Capabilities"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` -} - -// PluginConfigNetwork plugin config network -// swagger:model PluginConfigNetwork -type PluginConfigNetwork struct { - - // type - // Required: true - Type string `json:"Type"` -} - -// PluginConfigRootfs plugin config rootfs -// swagger:model PluginConfigRootfs -type PluginConfigRootfs struct { - - // diff ids - DiffIds []string `json:"diff_ids"` - - // type - Type string `json:"type,omitempty"` -} - -// PluginConfigUser plugin config user -// swagger:model PluginConfigUser -type PluginConfigUser struct { - - // g ID - GID uint32 `json:"GID,omitempty"` - - // UID - UID uint32 `json:"UID,omitempty"` -} - -// PluginSettings Settings that can be modified by users. -// swagger:model PluginSettings -type PluginSettings struct { - - // args - // Required: true - Args []string `json:"Args"` - - // devices - // Required: true - Devices []PluginDevice `json:"Devices"` - - // env - // Required: true - Env []string `json:"Env"` - - // mounts - // Required: true - Mounts []PluginMount `json:"Mounts"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go deleted file mode 100644 index 569901067..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_device.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginDevice plugin device -// swagger:model PluginDevice -type PluginDevice struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // path - // Required: true - Path *string `json:"Path"` - - // settable - // Required: true - Settable []string `json:"Settable"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go deleted file mode 100644 index 32962dc2e..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_env.go +++ /dev/null @@ -1,25 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginEnv plugin env -// swagger:model PluginEnv -type PluginEnv struct { - - // description - // Required: true - Description string `json:"Description"` - - // name - // Required: true - Name string `json:"Name"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // value - // Required: true - Value *string `json:"Value"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go deleted file mode 100644 index c82f204e8..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go +++ /dev/null @@ -1,21 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginInterfaceType plugin interface type -// swagger:model PluginInterfaceType -type PluginInterfaceType struct { - - // capability - // Required: true - Capability string `json:"Capability"` - - // prefix - // Required: true - Prefix string `json:"Prefix"` - - // version - // Required: true - Version string `json:"Version"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go deleted file mode 100644 index 5c031cf8b..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_mount.go +++ /dev/null @@ -1,37 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// PluginMount plugin mount -// swagger:model PluginMount -type PluginMount struct { - - // description - // Required: true - Description string `json:"Description"` - - // destination - // Required: true - Destination string `json:"Destination"` - - // name - // Required: true - Name string `json:"Name"` - - // options - // Required: true - Options []string `json:"Options"` - - // settable - // Required: true - Settable []string `json:"Settable"` - - // source - // Required: true - Source *string `json:"Source"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go deleted file mode 100644 index 60d1fb5ad..000000000 --- a/vendor/github.com/docker/docker/api/types/plugin_responses.go +++ /dev/null @@ -1,71 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "encoding/json" - "fmt" - "sort" -) - -// PluginsListResponse contains the response for the Engine API -type PluginsListResponse []*Plugin - -// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType -func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { - versionIndex := len(p) - prefixIndex := 0 - if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { - return fmt.Errorf("%q is not a plugin interface type", p) - } - p = p[1 : len(p)-1] -loop: - for i, b := range p { - switch b { - case '.': - prefixIndex = i - case '/': - versionIndex = i - break loop - } - } - t.Prefix = string(p[:prefixIndex]) - t.Capability = string(p[prefixIndex+1 : versionIndex]) - if versionIndex < len(p) { - t.Version = string(p[versionIndex+1:]) - } - return nil -} - -// MarshalJSON implements json.Marshaler for PluginInterfaceType -func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String implements fmt.Stringer for PluginInterfaceType -func (t PluginInterfaceType) String() string { - return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string - Description string - Value []string -} - -// PluginPrivileges is a list of PluginPrivilege -type PluginPrivileges []PluginPrivilege - -func (s PluginPrivileges) Len() int { - return len(s) -} - -func (s PluginPrivileges) Less(i, j int) bool { - return s[i].Name < s[j].Name -} - -func (s PluginPrivileges) Swap(i, j int) { - sort.Strings(s[i].Value) - sort.Strings(s[j].Value) - s[i], s[j] = s[j], s[i] -} diff --git a/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.pb.go b/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.pb.go deleted file mode 100644 index 5d7d8b4c4..000000000 --- a/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.pb.go +++ /dev/null @@ -1,449 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: entry.proto -// DO NOT EDIT! - -/* - Package logdriver is a generated protocol buffer package. - - It is generated from these files: - entry.proto - - It has these top-level messages: - LogEntry -*/ -package logdriver - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type LogEntry struct { - Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` - TimeNano int64 `protobuf:"varint,2,opt,name=time_nano,json=timeNano,proto3" json:"time_nano,omitempty"` - Line []byte `protobuf:"bytes,3,opt,name=line,proto3" json:"line,omitempty"` - Partial bool `protobuf:"varint,4,opt,name=partial,proto3" json:"partial,omitempty"` -} - -func (m *LogEntry) Reset() { *m = LogEntry{} } -func (m *LogEntry) String() string { return proto.CompactTextString(m) } -func (*LogEntry) ProtoMessage() {} -func (*LogEntry) Descriptor() ([]byte, []int) { return fileDescriptorEntry, []int{0} } - -func (m *LogEntry) GetSource() string { - if m != nil { - return m.Source - } - return "" -} - -func (m *LogEntry) GetTimeNano() int64 { - if m != nil { - return m.TimeNano - } - return 0 -} - -func (m *LogEntry) GetLine() []byte { - if m != nil { - return m.Line - } - return nil -} - -func (m *LogEntry) GetPartial() bool { - if m != nil { - return m.Partial - } - return false -} - -func init() { - proto.RegisterType((*LogEntry)(nil), "LogEntry") -} -func (m *LogEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LogEntry) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Source) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintEntry(dAtA, i, uint64(len(m.Source))) - i += copy(dAtA[i:], m.Source) - } - if m.TimeNano != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintEntry(dAtA, i, uint64(m.TimeNano)) - } - if len(m.Line) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintEntry(dAtA, i, uint64(len(m.Line))) - i += copy(dAtA[i:], m.Line) - } - if m.Partial { - dAtA[i] = 0x20 - i++ - if m.Partial { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func encodeFixed64Entry(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Entry(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintEntry(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *LogEntry) Size() (n int) { - var l int - _ = l - l = len(m.Source) - if l > 0 { - n += 1 + l + sovEntry(uint64(l)) - } - if m.TimeNano != 0 { - n += 1 + sovEntry(uint64(m.TimeNano)) - } - l = len(m.Line) - if l > 0 { - n += 1 + l + sovEntry(uint64(l)) - } - if m.Partial { - n += 2 - } - return n -} - -func sovEntry(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozEntry(x uint64) (n int) { - return sovEntry(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *LogEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LogEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LogEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEntry - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Source = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeNano", wireType) - } - m.TimeNano = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TimeNano |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthEntry - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Line = append(m.Line[:0], dAtA[iNdEx:postIndex]...) - if m.Line == nil { - m.Line = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partial", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEntry - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Partial = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipEntry(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthEntry - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipEntry(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEntry - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEntry - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEntry - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthEntry - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEntry - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipEntry(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthEntry = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEntry = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("entry.proto", fileDescriptorEntry) } - -var fileDescriptorEntry = []byte{ - // 149 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4e, 0xcd, 0x2b, 0x29, - 0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0xca, 0xe5, 0xe2, 0xf0, 0xc9, 0x4f, 0x77, 0x05, - 0x89, 0x08, 0x89, 0x71, 0xb1, 0x15, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0x4a, 0x30, 0x2a, 0x30, 0x6a, - 0x70, 0x06, 0x41, 0x79, 0x42, 0xd2, 0x5c, 0x9c, 0x25, 0x99, 0xb9, 0xa9, 0xf1, 0x79, 0x89, 0x79, - 0xf9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x1c, 0x20, 0x01, 0xbf, 0xc4, 0xbc, 0x7c, 0x21, - 0x21, 0x2e, 0x96, 0x9c, 0xcc, 0xbc, 0x54, 0x09, 0x66, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x30, 0x5b, - 0x48, 0x82, 0x8b, 0xbd, 0x20, 0xb1, 0xa8, 0x24, 0x33, 0x31, 0x47, 0x82, 0x45, 0x81, 0x51, 0x83, - 0x23, 0x08, 0xc6, 0x75, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, - 0xe4, 0x18, 0x93, 0xd8, 0xc0, 0x6e, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x2d, 0x24, 0x5a, - 0xd4, 0x92, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/docker/docker/api/types/plugins/logdriver/gen.go b/vendor/github.com/docker/docker/api/types/plugins/logdriver/gen.go deleted file mode 100644 index e5f10b5e0..000000000 --- a/vendor/github.com/docker/docker/api/types/plugins/logdriver/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc --gogofast_out=import_path=github.com/docker/docker/api/types/plugins/logdriver:. entry.proto - -package logdriver // import "github.com/docker/docker/api/types/plugins/logdriver" diff --git a/vendor/github.com/docker/docker/api/types/plugins/logdriver/io.go b/vendor/github.com/docker/docker/api/types/plugins/logdriver/io.go deleted file mode 100644 index 9081b3b45..000000000 --- a/vendor/github.com/docker/docker/api/types/plugins/logdriver/io.go +++ /dev/null @@ -1,87 +0,0 @@ -package logdriver // import "github.com/docker/docker/api/types/plugins/logdriver" - -import ( - "encoding/binary" - "io" -) - -const binaryEncodeLen = 4 - -// LogEntryEncoder encodes a LogEntry to a protobuf stream -// The stream should look like: -// -// [uint32 binary encoded message size][protobuf message] -// -// To decode an entry, read the first 4 bytes to get the size of the entry, -// then read `size` bytes from the stream. -type LogEntryEncoder interface { - Encode(*LogEntry) error -} - -// NewLogEntryEncoder creates a protobuf stream encoder for log entries. -// This is used to write out log entries to a stream. -func NewLogEntryEncoder(w io.Writer) LogEntryEncoder { - return &logEntryEncoder{ - w: w, - buf: make([]byte, 1024), - } -} - -type logEntryEncoder struct { - buf []byte - w io.Writer -} - -func (e *logEntryEncoder) Encode(l *LogEntry) error { - n := l.Size() - - total := n + binaryEncodeLen - if total > len(e.buf) { - e.buf = make([]byte, total) - } - binary.BigEndian.PutUint32(e.buf, uint32(n)) - - if _, err := l.MarshalTo(e.buf[binaryEncodeLen:]); err != nil { - return err - } - _, err := e.w.Write(e.buf[:total]) - return err -} - -// LogEntryDecoder decodes log entries from a stream -// It is expected that the wire format is as defined by LogEntryEncoder. -type LogEntryDecoder interface { - Decode(*LogEntry) error -} - -// NewLogEntryDecoder creates a new stream decoder for log entries -func NewLogEntryDecoder(r io.Reader) LogEntryDecoder { - return &logEntryDecoder{ - lenBuf: make([]byte, binaryEncodeLen), - buf: make([]byte, 1024), - r: r, - } -} - -type logEntryDecoder struct { - r io.Reader - lenBuf []byte - buf []byte -} - -func (d *logEntryDecoder) Decode(l *LogEntry) error { - _, err := io.ReadFull(d.r, d.lenBuf) - if err != nil { - return err - } - - size := int(binary.BigEndian.Uint32(d.lenBuf)) - if len(d.buf) < size { - d.buf = make([]byte, size) - } - - if _, err := io.ReadFull(d.r, d.buf[:size]); err != nil { - return err - } - return l.Unmarshal(d.buf[:size]) -} diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go deleted file mode 100644 index d91234744..000000000 --- a/vendor/github.com/docker/docker/api/types/port.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Port An open port on a container -// swagger:model Port -type Port struct { - - // Host IP address that the container's port is mapped to - IP string `json:"IP,omitempty"` - - // Port on the container - // Required: true - PrivatePort uint16 `json:"PrivatePort"` - - // Port exposed on the host - PublicPort uint16 `json:"PublicPort,omitempty"` - - // type - // Required: true - Type string `json:"Type"` -} diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go deleted file mode 100644 index f0a2113e4..000000000 --- a/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ /dev/null @@ -1,21 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// AuthenticateOKBody authenticate o k body -// swagger:model AuthenticateOKBody -type AuthenticateOKBody struct { - - // An opaque token used to authenticate a user after a successful login - // Required: true - IdentityToken string `json:"IdentityToken"` - - // The status of the authentication - // Required: true - Status string `json:"Status"` -} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go deleted file mode 100644 index 8789ad3b3..000000000 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ /dev/null @@ -1,119 +0,0 @@ -package registry // import "github.com/docker/docker/api/types/registry" - -import ( - "encoding/json" - "net" - - "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - AllowNondistributableArtifactsCIDRs []*NetIPNet - AllowNondistributableArtifactsHostnames []string - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// String returns the CIDR notation of ipnet -func (ipnet *NetIPNet) String() string { - return (*net.IPNet)(ipnet).String() -} - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } - } - return -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} - -// DistributionInspect describes the result obtained from contacting the -// registry to retrieve image metadata -type DistributionInspect struct { - // Descriptor contains information about the manifest, including - // the content addressable digest - Descriptor v1.Descriptor - // Platforms contains the list of platforms supported by the image, - // obtained by parsing the manifest - Platforms []v1.Platform -} diff --git a/vendor/github.com/docker/docker/api/types/seccomp.go b/vendor/github.com/docker/docker/api/types/seccomp.go deleted file mode 100644 index 67a41e1a8..000000000 --- a/vendor/github.com/docker/docker/api/types/seccomp.go +++ /dev/null @@ -1,93 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -// Seccomp represents the config for a seccomp profile for syscall restriction. -type Seccomp struct { - DefaultAction Action `json:"defaultAction"` - // Architectures is kept to maintain backward compatibility with the old - // seccomp profile. - Architectures []Arch `json:"architectures,omitempty"` - ArchMap []Architecture `json:"archMap,omitempty"` - Syscalls []*Syscall `json:"syscalls"` -} - -// Architecture is used to represent a specific architecture -// and its sub-architectures -type Architecture struct { - Arch Arch `json:"architecture"` - SubArches []Arch `json:"subArchitectures"` -} - -// Arch used for architectures -type Arch string - -// Additional architectures permitted to be used for system calls -// By default only the native architecture of the kernel is permitted -const ( - ArchX86 Arch = "SCMP_ARCH_X86" - ArchX86_64 Arch = "SCMP_ARCH_X86_64" - ArchX32 Arch = "SCMP_ARCH_X32" - ArchARM Arch = "SCMP_ARCH_ARM" - ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" - ArchMIPS Arch = "SCMP_ARCH_MIPS" - ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" - ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" - ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" - ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" - ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" - ArchPPC Arch = "SCMP_ARCH_PPC" - ArchPPC64 Arch = "SCMP_ARCH_PPC64" - ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" - ArchS390 Arch = "SCMP_ARCH_S390" - ArchS390X Arch = "SCMP_ARCH_S390X" -) - -// Action taken upon Seccomp rule match -type Action string - -// Define actions for Seccomp rules -const ( - ActKill Action = "SCMP_ACT_KILL" - ActTrap Action = "SCMP_ACT_TRAP" - ActErrno Action = "SCMP_ACT_ERRNO" - ActTrace Action = "SCMP_ACT_TRACE" - ActAllow Action = "SCMP_ACT_ALLOW" -) - -// Operator used to match syscall arguments in Seccomp -type Operator string - -// Define operators for syscall arguments in Seccomp -const ( - OpNotEqual Operator = "SCMP_CMP_NE" - OpLessThan Operator = "SCMP_CMP_LT" - OpLessEqual Operator = "SCMP_CMP_LE" - OpEqualTo Operator = "SCMP_CMP_EQ" - OpGreaterEqual Operator = "SCMP_CMP_GE" - OpGreaterThan Operator = "SCMP_CMP_GT" - OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" -) - -// Arg used for matching specific syscall arguments in Seccomp -type Arg struct { - Index uint `json:"index"` - Value uint64 `json:"value"` - ValueTwo uint64 `json:"valueTwo"` - Op Operator `json:"op"` -} - -// Filter is used to conditionally apply Seccomp rules -type Filter struct { - Caps []string `json:"caps,omitempty"` - Arches []string `json:"arches,omitempty"` -} - -// Syscall is used to match a group of syscalls in Seccomp -type Syscall struct { - Name string `json:"name,omitempty"` - Names []string `json:"names,omitempty"` - Action Action `json:"action"` - Args []*Arg `json:"args"` - Comment string `json:"comment"` - Includes Filter `json:"includes"` - Excludes Filter `json:"excludes"` -} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go deleted file mode 100644 index 74ea64b1b..000000000 --- a/vendor/github.com/docker/docker/api/types/service_update_response.go +++ /dev/null @@ -1,12 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ServiceUpdateResponse service update response -// swagger:model ServiceUpdateResponse -type ServiceUpdateResponse struct { - - // Optional warning messages - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go deleted file mode 100644 index 60175c061..000000000 --- a/vendor/github.com/docker/docker/api/types/stats.go +++ /dev/null @@ -1,181 +0,0 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types // import "github.com/docker/docker/api/types" - -import "time" - -// ThrottlingData stores CPU throttling stats of one running container. -// Not used on Windows. -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// CPUUsage stores All CPU stats aggregated since container inception. -type CPUUsage struct { - // Total CPU time consumed. - // Units: nanoseconds (Linux) - // Units: 100's of nanoseconds (Windows) - TotalUsage uint64 `json:"total_usage"` - - // Total CPU time consumed per core (Linux). Not used on Windows. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage,omitempty"` - - // Time spent by tasks of the cgroup in kernel mode (Linux). - // Time spent by all container processes in kernel mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - - // Time spent by tasks of the cgroup in user mode (Linux). - // Time spent by all container processes in user mode (Windows). - // Units: nanoseconds (Linux). - // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -// CPUStats aggregates and wraps all CPU related info of container -type CPUStats struct { - // CPU Usage. Linux and Windows. - CPUUsage CPUUsage `json:"cpu_usage"` - - // System Usage. Linux only. - SystemUsage uint64 `json:"system_cpu_usage,omitempty"` - - // Online CPUs. Linux only. - OnlineCPUs uint32 `json:"online_cpus,omitempty"` - - // Throttling Data. Linux only. - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -// MemoryStats aggregates all memory stats since container inception on Linux. -// Windows returns stats for commit and private working set only. -type MemoryStats struct { - // Linux Memory Stats - - // current res_counter usage for memory - Usage uint64 `json:"usage,omitempty"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage,omitempty"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats,omitempty"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt,omitempty"` - Limit uint64 `json:"limit,omitempty"` - - // Windows Memory Stats - // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx - - // committed bytes - Commit uint64 `json:"commitbytes,omitempty"` - // peak committed bytes - CommitPeak uint64 `json:"commitpeakbytes,omitempty"` - // private working set - PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` -} - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// Not used on Windows. -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -// BlkioStats stores All IO service stats for data read and write. -// This is a Linux specific structure as the differences between expressing -// block I/O on Windows and Linux are sufficiently significant to make -// little sense attempting to morph into a combined structure. -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -// StorageStats is the disk I/O stats for read/write on Windows. -type StorageStats struct { - ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` - ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` - WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` - WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` -} - -// NetworkStats aggregates the network stats of one container -type NetworkStats struct { - // Bytes received. Windows and Linux. - RxBytes uint64 `json:"rx_bytes"` - // Packets received. Windows and Linux. - RxPackets uint64 `json:"rx_packets"` - // Received errors. Not used on Windows. Note that we dont `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - RxErrors uint64 `json:"rx_errors"` - // Incoming packets dropped. Windows and Linux. - RxDropped uint64 `json:"rx_dropped"` - // Bytes sent. Windows and Linux. - TxBytes uint64 `json:"tx_bytes"` - // Packets sent. Windows and Linux. - TxPackets uint64 `json:"tx_packets"` - // Sent errors. Not used on Windows. Note that we dont `omitempty` this - // field as it is expected in the >=v1.21 API stats structure. - TxErrors uint64 `json:"tx_errors"` - // Outgoing packets dropped. Windows and Linux. - TxDropped uint64 `json:"tx_dropped"` - // Endpoint ID. Not used on Linux. - EndpointID string `json:"endpoint_id,omitempty"` - // Instance ID. Not used on Linux. - InstanceID string `json:"instance_id,omitempty"` -} - -// PidsStats contains the stats of a container's pids -type PidsStats struct { - // Current is the number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` -} - -// Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { - // Common stats - Read time.Time `json:"read"` - PreRead time.Time `json:"preread"` - - // Linux specific stats, not populated on Windows. - PidsStats PidsStats `json:"pids_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - - // Windows specific stats, not populated on Linux. - NumProcs uint32 `json:"num_procs"` - StorageStats StorageStats `json:"storage_stats,omitempty"` - - // Shared stats - CPUStats CPUStats `json:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" - MemoryStats MemoryStats `json:"memory_stats,omitempty"` -} - -// StatsJSON is newly used Networks -type StatsJSON struct { - Stats - - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go deleted file mode 100644 index 82921cebc..000000000 --- a/vendor/github.com/docker/docker/api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice // import "github.com/docker/docker/api/types/strslice" - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go deleted file mode 100644 index ef020f458..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ /dev/null @@ -1,40 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// Version represents the internal object version. -type Version struct { - Index uint64 `json:",omitempty"` -} - -// Meta is a base object inherited by most of the other once. -type Meta struct { - Version Version `json:",omitempty"` - CreatedAt time.Time `json:",omitempty"` - UpdatedAt time.Time `json:",omitempty"` -} - -// Annotations represents how to describe an object. -type Annotations struct { - Name string `json:",omitempty"` - Labels map[string]string `json:"Labels"` -} - -// Driver represents a driver (network, logging, secrets backend). -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} - -// TLSInfo represents the TLS information about what CA certificate is trusted, -// and who the issuer for a TLS certificate is -type TLSInfo struct { - // TrustRoot is the trusted CA root certificate in PEM format - TrustRoot string `json:",omitempty"` - - // CertIssuer is the raw subject bytes of the issuer - CertIssuerSubject []byte `json:",omitempty"` - - // CertIssuerPublicKey is the raw public key bytes of the issuer - CertIssuerPublicKey []byte `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go deleted file mode 100644 index a1555cf43..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/config.go +++ /dev/null @@ -1,35 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Config represents a config. -type Config struct { - ID string - Meta - Spec ConfigSpec -} - -// ConfigSpec represents a config specification from a config in swarm -type ConfigSpec struct { - Annotations - Data []byte `json:",omitempty"` - - // Templating controls whether and how to evaluate the config payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// ConfigReferenceFileTarget is a file target in a config reference -type ConfigReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// ConfigReference is a reference to a config in swarm -type ConfigReference struct { - File *ConfigReferenceFileTarget - ConfigID string - ConfigName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go deleted file mode 100644 index 151211ff5..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ /dev/null @@ -1,74 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" -) - -// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) -// Detailed documentation is available in: -// http://man7.org/linux/man-pages/man5/resolv.conf.5.html -// `nameserver`, `search`, `options` have been supported. -// TODO: `domain` is not supported yet. -type DNSConfig struct { - // Nameservers specifies the IP addresses of the name servers - Nameservers []string `json:",omitempty"` - // Search specifies the search list for host-name lookup - Search []string `json:",omitempty"` - // Options allows certain internal resolver variables to be modified - Options []string `json:",omitempty"` -} - -// SELinuxContext contains the SELinux labels of the container. -type SELinuxContext struct { - Disable bool - - User string - Role string - Type string - Level string -} - -// CredentialSpec for managed service account (Windows only) -type CredentialSpec struct { - File string - Registry string -} - -// Privileges defines the security options for the container. -type Privileges struct { - CredentialSpec *CredentialSpec - SELinuxContext *SELinuxContext -} - -// ContainerSpec represents the spec of a container. -type ContainerSpec struct { - Image string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Hostname string `json:",omitempty"` - Env []string `json:",omitempty"` - Dir string `json:",omitempty"` - User string `json:",omitempty"` - Groups []string `json:",omitempty"` - Privileges *Privileges `json:",omitempty"` - Init *bool `json:",omitempty"` - StopSignal string `json:",omitempty"` - TTY bool `json:",omitempty"` - OpenStdin bool `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Mounts []mount.Mount `json:",omitempty"` - StopGracePeriod *time.Duration `json:",omitempty"` - Healthcheck *container.HealthConfig `json:",omitempty"` - // The format of extra hosts on swarmkit is specified in: - // http://man7.org/linux/man-pages/man5/hosts.5.html - // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` - Configs []*ConfigReference `json:",omitempty"` - Isolation container.Isolation `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go deleted file mode 100644 index 98ef3284d..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/network.go +++ /dev/null @@ -1,121 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "github.com/docker/docker/api/types/network" -) - -// Endpoint represents an endpoint. -type Endpoint struct { - Spec EndpointSpec `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` - VirtualIPs []EndpointVirtualIP `json:",omitempty"` -} - -// EndpointSpec represents the spec of an endpoint. -type EndpointSpec struct { - Mode ResolutionMode `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` -} - -// ResolutionMode represents a resolution mode. -type ResolutionMode string - -const ( - // ResolutionModeVIP VIP - ResolutionModeVIP ResolutionMode = "vip" - // ResolutionModeDNSRR DNSRR - ResolutionModeDNSRR ResolutionMode = "dnsrr" -) - -// PortConfig represents the config of a port. -type PortConfig struct { - Name string `json:",omitempty"` - Protocol PortConfigProtocol `json:",omitempty"` - // TargetPort is the port inside the container - TargetPort uint32 `json:",omitempty"` - // PublishedPort is the port on the swarm hosts - PublishedPort uint32 `json:",omitempty"` - // PublishMode is the mode in which port is published - PublishMode PortConfigPublishMode `json:",omitempty"` -} - -// PortConfigPublishMode represents the mode in which the port is to -// be published. -type PortConfigPublishMode string - -const ( - // PortConfigPublishModeIngress is used for ports published - // for ingress load balancing using routing mesh. - PortConfigPublishModeIngress PortConfigPublishMode = "ingress" - // PortConfigPublishModeHost is used for ports published - // for direct host level access on the host where the task is running. - PortConfigPublishModeHost PortConfigPublishMode = "host" -) - -// PortConfigProtocol represents the protocol of a port. -type PortConfigProtocol string - -const ( - // TODO(stevvooe): These should be used generally, not just for PortConfig. - - // PortConfigProtocolTCP TCP - PortConfigProtocolTCP PortConfigProtocol = "tcp" - // PortConfigProtocolUDP UDP - PortConfigProtocolUDP PortConfigProtocol = "udp" - // PortConfigProtocolSCTP SCTP - PortConfigProtocolSCTP PortConfigProtocol = "sctp" -) - -// EndpointVirtualIP represents the virtual ip of a port. -type EndpointVirtualIP struct { - NetworkID string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Network represents a network. -type Network struct { - ID string - Meta - Spec NetworkSpec `json:",omitempty"` - DriverState Driver `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkSpec represents the spec of a network. -type NetworkSpec struct { - Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - Attachable bool `json:",omitempty"` - Ingress bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` - ConfigFrom *network.ConfigReference `json:",omitempty"` - Scope string `json:",omitempty"` -} - -// NetworkAttachmentConfig represents the configuration of a network attachment. -type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` - DriverOpts map[string]string `json:",omitempty"` -} - -// NetworkAttachment represents a network attachment. -type NetworkAttachment struct { - Network Network `json:",omitempty"` - Addresses []string `json:",omitempty"` -} - -// IPAMOptions represents ipam options. -type IPAMOptions struct { - Driver Driver `json:",omitempty"` - Configs []IPAMConfig `json:",omitempty"` -} - -// IPAMConfig represents ipam configuration. -type IPAMConfig struct { - Subnet string `json:",omitempty"` - Range string `json:",omitempty"` - Gateway string `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go deleted file mode 100644 index 1e30f5fa1..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ /dev/null @@ -1,115 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// Node represents a node. -type Node struct { - ID string - Meta - // Spec defines the desired state of the node as specified by the user. - // The system will honor this and will *never* modify it. - Spec NodeSpec `json:",omitempty"` - // Description encapsulates the properties of the Node as reported by the - // agent. - Description NodeDescription `json:",omitempty"` - // Status provides the current status of the node, as seen by the manager. - Status NodeStatus `json:",omitempty"` - // ManagerStatus provides the current status of the node's manager - // component, if the node is a manager. - ManagerStatus *ManagerStatus `json:",omitempty"` -} - -// NodeSpec represents the spec of a node. -type NodeSpec struct { - Annotations - Role NodeRole `json:",omitempty"` - Availability NodeAvailability `json:",omitempty"` -} - -// NodeRole represents the role of a node. -type NodeRole string - -const ( - // NodeRoleWorker WORKER - NodeRoleWorker NodeRole = "worker" - // NodeRoleManager MANAGER - NodeRoleManager NodeRole = "manager" -) - -// NodeAvailability represents the availability of a node. -type NodeAvailability string - -const ( - // NodeAvailabilityActive ACTIVE - NodeAvailabilityActive NodeAvailability = "active" - // NodeAvailabilityPause PAUSE - NodeAvailabilityPause NodeAvailability = "pause" - // NodeAvailabilityDrain DRAIN - NodeAvailabilityDrain NodeAvailability = "drain" -) - -// NodeDescription represents the description of a node. -type NodeDescription struct { - Hostname string `json:",omitempty"` - Platform Platform `json:",omitempty"` - Resources Resources `json:",omitempty"` - Engine EngineDescription `json:",omitempty"` - TLSInfo TLSInfo `json:",omitempty"` -} - -// Platform represents the platform (Arch/OS). -type Platform struct { - Architecture string `json:",omitempty"` - OS string `json:",omitempty"` -} - -// EngineDescription represents the description of an engine. -type EngineDescription struct { - EngineVersion string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Plugins []PluginDescription `json:",omitempty"` -} - -// PluginDescription represents the description of an engine plugin. -type PluginDescription struct { - Type string `json:",omitempty"` - Name string `json:",omitempty"` -} - -// NodeStatus represents the status of a node. -type NodeStatus struct { - State NodeState `json:",omitempty"` - Message string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Reachability represents the reachability of a node. -type Reachability string - -const ( - // ReachabilityUnknown UNKNOWN - ReachabilityUnknown Reachability = "unknown" - // ReachabilityUnreachable UNREACHABLE - ReachabilityUnreachable Reachability = "unreachable" - // ReachabilityReachable REACHABLE - ReachabilityReachable Reachability = "reachable" -) - -// ManagerStatus represents the status of a manager. -type ManagerStatus struct { - Leader bool `json:",omitempty"` - Reachability Reachability `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// NodeState represents the state of a node. -type NodeState string - -const ( - // NodeStateUnknown UNKNOWN - NodeStateUnknown NodeState = "unknown" - // NodeStateDown DOWN - NodeStateDown NodeState = "down" - // NodeStateReady READY - NodeStateReady NodeState = "ready" - // NodeStateDisconnected DISCONNECTED - NodeStateDisconnected NodeState = "disconnected" -) diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go deleted file mode 100644 index 0c77403cc..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ /dev/null @@ -1,27 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -// RuntimeType is the type of runtime used for the TaskSpec -type RuntimeType string - -// RuntimeURL is the proto type url -type RuntimeURL string - -const ( - // RuntimeContainer is the container based runtime - RuntimeContainer RuntimeType = "container" - // RuntimePlugin is the plugin based runtime - RuntimePlugin RuntimeType = "plugin" - // RuntimeNetworkAttachment is the network attachment runtime - RuntimeNetworkAttachment RuntimeType = "attachment" - - // RuntimeURLContainer is the proto url for the container type - RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" - // RuntimeURLPlugin is the proto url for the plugin type - RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" -) - -// NetworkAttachmentSpec represents the runtime spec type for network -// attachment tasks -type NetworkAttachmentSpec struct { - ContainerID string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go deleted file mode 100644 index 98c2806c3..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto - -package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go deleted file mode 100644 index 1fdc9b043..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ /dev/null @@ -1,712 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: plugin.proto -// DO NOT EDIT! - -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - plugin.proto - - It has these top-level messages: - PluginSpec - PluginPrivilege -*/ -package runtime - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -type PluginSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` - Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` -} - -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } - -func (m *PluginSpec) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginSpec) GetRemote() string { - if m != nil { - return m.Remote - } - return "" -} - -func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { - if m != nil { - return m.Privileges - } - return nil -} - -func (m *PluginSpec) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` -} - -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } - -func (m *PluginPrivilege) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginPrivilege) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PluginPrivilege) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*PluginSpec)(nil), "PluginSpec") - proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") -} -func (m *PluginSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Remote) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i += copy(dAtA[i:], m.Remote) - } - if len(m.Privileges) > 0 { - for _, msg := range m.Privileges { - dAtA[i] = 0x1a - i++ - i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Disabled { - dAtA[i] = 0x20 - i++ - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Description) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PluginSpec) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Remote) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Privileges) > 0 { - for _, e := range m.Privileges { - l = e.Size() - n += 1 + l + sovPlugin(uint64(l)) - } - } - if m.Disabled { - n += 2 - } - return n -} - -func (m *PluginPrivilege) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func sovPlugin(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozPlugin(x uint64) (n int) { - return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PluginSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Remote = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Privileges = append(m.Privileges, &PluginPrivilege{}) - if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlugin(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthPlugin - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipPlugin(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } - -var fileDescriptorPlugin = []byte{ - // 196 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, - 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b, - 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, - 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12, - 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35, - 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c, - 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a, - 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab, - 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, - 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33, - 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79, - 0x0c, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go deleted file mode 100644 index d5213ec98..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ /dev/null @@ -1,36 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "os" - -// Secret represents a secret. -type Secret struct { - ID string - Meta - Spec SecretSpec -} - -// SecretSpec represents a secret specification from a secret in swarm -type SecretSpec struct { - Annotations - Data []byte `json:",omitempty"` - Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store - - // Templating controls whether and how to evaluate the secret payload as - // a template. If it is not set, no templating is used. - Templating *Driver `json:",omitempty"` -} - -// SecretReferenceFileTarget is a file target in a secret reference -type SecretReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// SecretReference is a reference to a secret in swarm -type SecretReference struct { - File *SecretReferenceFileTarget - SecretID string - SecretName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go deleted file mode 100644 index abf192e75..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ /dev/null @@ -1,124 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// Service represents a service. -type Service struct { - ID string - Meta - Spec ServiceSpec `json:",omitempty"` - PreviousSpec *ServiceSpec `json:",omitempty"` - Endpoint Endpoint `json:",omitempty"` - UpdateStatus *UpdateStatus `json:",omitempty"` -} - -// ServiceSpec represents the spec of a service. -type ServiceSpec struct { - Annotations - - // TaskTemplate defines how the service should construct new tasks when - // orchestrating this service. - TaskTemplate TaskSpec `json:",omitempty"` - Mode ServiceMode `json:",omitempty"` - UpdateConfig *UpdateConfig `json:",omitempty"` - RollbackConfig *UpdateConfig `json:",omitempty"` - - // Networks field in ServiceSpec is deprecated. The - // same field in TaskSpec should be used instead. - // This field will be removed in a future release. - Networks []NetworkAttachmentConfig `json:",omitempty"` - EndpointSpec *EndpointSpec `json:",omitempty"` -} - -// ServiceMode represents the mode of a service. -type ServiceMode struct { - Replicated *ReplicatedService `json:",omitempty"` - Global *GlobalService `json:",omitempty"` -} - -// UpdateState is the state of a service update. -type UpdateState string - -const ( - // UpdateStateUpdating is the updating state. - UpdateStateUpdating UpdateState = "updating" - // UpdateStatePaused is the paused state. - UpdateStatePaused UpdateState = "paused" - // UpdateStateCompleted is the completed state. - UpdateStateCompleted UpdateState = "completed" - // UpdateStateRollbackStarted is the state with a rollback in progress. - UpdateStateRollbackStarted UpdateState = "rollback_started" - // UpdateStateRollbackPaused is the state with a rollback in progress. - UpdateStateRollbackPaused UpdateState = "rollback_paused" - // UpdateStateRollbackCompleted is the state with a rollback in progress. - UpdateStateRollbackCompleted UpdateState = "rollback_completed" -) - -// UpdateStatus reports the status of a service update. -type UpdateStatus struct { - State UpdateState `json:",omitempty"` - StartedAt *time.Time `json:",omitempty"` - CompletedAt *time.Time `json:",omitempty"` - Message string `json:",omitempty"` -} - -// ReplicatedService is a kind of ServiceMode. -type ReplicatedService struct { - Replicas *uint64 `json:",omitempty"` -} - -// GlobalService is a kind of ServiceMode. -type GlobalService struct{} - -const ( - // UpdateFailureActionPause PAUSE - UpdateFailureActionPause = "pause" - // UpdateFailureActionContinue CONTINUE - UpdateFailureActionContinue = "continue" - // UpdateFailureActionRollback ROLLBACK - UpdateFailureActionRollback = "rollback" - - // UpdateOrderStopFirst STOP_FIRST - UpdateOrderStopFirst = "stop-first" - // UpdateOrderStartFirst START_FIRST - UpdateOrderStartFirst = "start-first" -) - -// UpdateConfig represents the update configuration. -type UpdateConfig struct { - // Maximum number of tasks to be updated in one iteration. - // 0 means unlimited parallelism. - Parallelism uint64 - - // Amount of time between updates. - Delay time.Duration `json:",omitempty"` - - // FailureAction is the action to take when an update failures. - FailureAction string `json:",omitempty"` - - // Monitor indicates how long to monitor a task for failure after it is - // created. If the task fails by ending up in one of the states - // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, - // this counts as a failure. If it fails after Monitor, it does not - // count as a failure. If Monitor is unspecified, a default value will - // be used. - Monitor time.Duration `json:",omitempty"` - - // MaxFailureRatio is the fraction of tasks that may fail during - // an update before the failure action is invoked. Any task created by - // the current update which ends up in one of the states REJECTED, - // COMPLETED or FAILED within Monitor from its creation counts as a - // failure. The number of failures is divided by the number of tasks - // being updated, and if this fraction is greater than - // MaxFailureRatio, the failure action is invoked. - // - // If the failure action is CONTINUE, there is no effect. - // If the failure action is PAUSE, no more tasks will be updated until - // another update is started. - MaxFailureRatio float32 - - // Order indicates the order of operations when rolling out an updated - // task. Either the old task is shut down before the new task is - // started, or the new task is started before the old task is shut down. - Order string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go deleted file mode 100644 index 1b111d725..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ /dev/null @@ -1,217 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import "time" - -// ClusterInfo represents info about the cluster for outputting in "info" -// it contains the same information as "Swarm", but without the JoinTokens -type ClusterInfo struct { - ID string - Meta - Spec Spec - TLSInfo TLSInfo - RootRotationInProgress bool -} - -// Swarm represents a swarm. -type Swarm struct { - ClusterInfo - JoinTokens JoinTokens -} - -// JoinTokens contains the tokens workers and managers need to join the swarm. -type JoinTokens struct { - // Worker is the join token workers may use to join the swarm. - Worker string - // Manager is the join token managers may use to join the swarm. - Manager string -} - -// Spec represents the spec of a swarm. -type Spec struct { - Annotations - - Orchestration OrchestrationConfig `json:",omitempty"` - Raft RaftConfig `json:",omitempty"` - Dispatcher DispatcherConfig `json:",omitempty"` - CAConfig CAConfig `json:",omitempty"` - TaskDefaults TaskDefaults `json:",omitempty"` - EncryptionConfig EncryptionConfig `json:",omitempty"` -} - -// OrchestrationConfig represents orchestration configuration. -type OrchestrationConfig struct { - // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or - // node. If negative, never remove completed or failed tasks. - TaskHistoryRetentionLimit *int64 `json:",omitempty"` -} - -// TaskDefaults parameterizes cluster-level task creation with default values. -type TaskDefaults struct { - // LogDriver selects the log driver to use for tasks created in the - // orchestrator if unspecified by a service. - // - // Updating this value will only have an affect on new tasks. Old tasks - // will continue use their previously configured log driver until - // recreated. - LogDriver *Driver `json:",omitempty"` -} - -// EncryptionConfig controls at-rest encryption of data and keys. -type EncryptionConfig struct { - // AutoLockManagers specifies whether or not managers TLS keys and raft data - // should be encrypted at rest in such a way that they must be unlocked - // before the manager node starts up again. - AutoLockManagers bool -} - -// RaftConfig represents raft configuration. -type RaftConfig struct { - // SnapshotInterval is the number of log entries between snapshots. - SnapshotInterval uint64 `json:",omitempty"` - - // KeepOldSnapshots is the number of snapshots to keep beyond the - // current snapshot. - KeepOldSnapshots *uint64 `json:",omitempty"` - - // LogEntriesForSlowFollowers is the number of log entries to keep - // around to sync up slow followers after a snapshot is created. - LogEntriesForSlowFollowers uint64 `json:",omitempty"` - - // ElectionTick is the number of ticks that a follower will wait for a message - // from the leader before becoming a candidate and starting an election. - // ElectionTick must be greater than HeartbeatTick. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - ElectionTick int - - // HeartbeatTick is the number of ticks between heartbeats. Every - // HeartbeatTick ticks, the leader will send a heartbeat to the - // followers. - // - // A tick currently defaults to one second, so these translate directly to - // seconds currently, but this is NOT guaranteed. - HeartbeatTick int -} - -// DispatcherConfig represents dispatcher configuration. -type DispatcherConfig struct { - // HeartbeatPeriod defines how often agent should send heartbeats to - // dispatcher. - HeartbeatPeriod time.Duration `json:",omitempty"` -} - -// CAConfig represents CA configuration. -type CAConfig struct { - // NodeCertExpiry is the duration certificates should be issued for - NodeCertExpiry time.Duration `json:",omitempty"` - - // ExternalCAs is a list of CAs to which a manager node will make - // certificate signing requests for node certificates. - ExternalCAs []*ExternalCA `json:",omitempty"` - - // SigningCACert and SigningCAKey specify the desired signing root CA and - // root CA key for the swarm. When inspecting the cluster, the key will - // be redacted. - SigningCACert string `json:",omitempty"` - SigningCAKey string `json:",omitempty"` - - // If this value changes, and there is no specified signing cert and key, - // then the swarm is forced to generate a new root certificate ane key. - ForceRotate uint64 `json:",omitempty"` -} - -// ExternalCAProtocol represents type of external CA. -type ExternalCAProtocol string - -// ExternalCAProtocolCFSSL CFSSL -const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" - -// ExternalCA defines external CA to be used by the cluster. -type ExternalCA struct { - // Protocol is the protocol used by this external CA. - Protocol ExternalCAProtocol - - // URL is the URL where the external CA can be reached. - URL string - - // Options is a set of additional key/value pairs whose interpretation - // depends on the specified CA type. - Options map[string]string `json:",omitempty"` - - // CACert specifies which root CA is used by this external CA. This certificate must - // be in PEM format. - CACert string -} - -// InitRequest is the request used to init a swarm. -type InitRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - ForceNewCluster bool - Spec Spec - AutoLockManagers bool - Availability NodeAvailability -} - -// JoinRequest is the request used to join a swarm. -type JoinRequest struct { - ListenAddr string - AdvertiseAddr string - DataPathAddr string - RemoteAddrs []string - JoinToken string // accept by secret - Availability NodeAvailability -} - -// UnlockRequest is the request used to unlock a swarm. -type UnlockRequest struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - -// LocalNodeState represents the state of the local node. -type LocalNodeState string - -const ( - // LocalNodeStateInactive INACTIVE - LocalNodeStateInactive LocalNodeState = "inactive" - // LocalNodeStatePending PENDING - LocalNodeStatePending LocalNodeState = "pending" - // LocalNodeStateActive ACTIVE - LocalNodeStateActive LocalNodeState = "active" - // LocalNodeStateError ERROR - LocalNodeStateError LocalNodeState = "error" - // LocalNodeStateLocked LOCKED - LocalNodeStateLocked LocalNodeState = "locked" -) - -// Info represents generic information about swarm. -type Info struct { - NodeID string - NodeAddr string - - LocalNodeState LocalNodeState - ControlAvailable bool - Error string - - RemoteManagers []Peer - Nodes int `json:",omitempty"` - Managers int `json:",omitempty"` - - Cluster *ClusterInfo `json:",omitempty"` -} - -// Peer represents a peer. -type Peer struct { - NodeID string - Addr string -} - -// UpdateFlags contains flags for SwarmUpdate. -type UpdateFlags struct { - RotateWorkerToken bool - RotateManagerToken bool - RotateManagerUnlockKey bool -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go deleted file mode 100644 index b35605d12..000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ /dev/null @@ -1,191 +0,0 @@ -package swarm // import "github.com/docker/docker/api/types/swarm" - -import ( - "time" - - "github.com/docker/docker/api/types/swarm/runtime" -) - -// TaskState represents the state of a task. -type TaskState string - -const ( - // TaskStateNew NEW - TaskStateNew TaskState = "new" - // TaskStateAllocated ALLOCATED - TaskStateAllocated TaskState = "allocated" - // TaskStatePending PENDING - TaskStatePending TaskState = "pending" - // TaskStateAssigned ASSIGNED - TaskStateAssigned TaskState = "assigned" - // TaskStateAccepted ACCEPTED - TaskStateAccepted TaskState = "accepted" - // TaskStatePreparing PREPARING - TaskStatePreparing TaskState = "preparing" - // TaskStateReady READY - TaskStateReady TaskState = "ready" - // TaskStateStarting STARTING - TaskStateStarting TaskState = "starting" - // TaskStateRunning RUNNING - TaskStateRunning TaskState = "running" - // TaskStateComplete COMPLETE - TaskStateComplete TaskState = "complete" - // TaskStateShutdown SHUTDOWN - TaskStateShutdown TaskState = "shutdown" - // TaskStateFailed FAILED - TaskStateFailed TaskState = "failed" - // TaskStateRejected REJECTED - TaskStateRejected TaskState = "rejected" - // TaskStateRemove REMOVE - TaskStateRemove TaskState = "remove" - // TaskStateOrphaned ORPHANED - TaskStateOrphaned TaskState = "orphaned" -) - -// Task represents a task. -type Task struct { - ID string - Meta - Annotations - - Spec TaskSpec `json:",omitempty"` - ServiceID string `json:",omitempty"` - Slot int `json:",omitempty"` - NodeID string `json:",omitempty"` - Status TaskStatus `json:",omitempty"` - DesiredState TaskState `json:",omitempty"` - NetworksAttachments []NetworkAttachment `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` -} - -// TaskSpec represents the spec of a task. -type TaskSpec struct { - // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. - // PluginSpec is only used when the `Runtime` field is set to `plugin` - // NetworkAttachmentSpec is used if the `Runtime` field is set to - // `attachment`. - ContainerSpec *ContainerSpec `json:",omitempty"` - PluginSpec *runtime.PluginSpec `json:",omitempty"` - NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` - - Resources *ResourceRequirements `json:",omitempty"` - RestartPolicy *RestartPolicy `json:",omitempty"` - Placement *Placement `json:",omitempty"` - Networks []NetworkAttachmentConfig `json:",omitempty"` - - // LogDriver specifies the LogDriver to use for tasks created from this - // spec. If not present, the one on cluster default on swarm.Spec will be - // used, finally falling back to the engine default if not specified. - LogDriver *Driver `json:",omitempty"` - - // ForceUpdate is a counter that triggers an update even if no relevant - // parameters have been changed. - ForceUpdate uint64 - - Runtime RuntimeType `json:",omitempty"` -} - -// Resources represents resources (CPU/Memory). -type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` -} - -// GenericResource represents a "user defined" resource which can -// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) -type GenericResource struct { - NamedResourceSpec *NamedGenericResource `json:",omitempty"` - DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` -} - -// NamedGenericResource represents a "user defined" resource which is defined -// as a string. -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) -type NamedGenericResource struct { - Kind string `json:",omitempty"` - Value string `json:",omitempty"` -} - -// DiscreteGenericResource represents a "user defined" resource which is defined -// as an integer -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to count the resource (SSD=5, HDD=3, ...) -type DiscreteGenericResource struct { - Kind string `json:",omitempty"` - Value int64 `json:",omitempty"` -} - -// ResourceRequirements represents resources requirements. -type ResourceRequirements struct { - Limits *Resources `json:",omitempty"` - Reservations *Resources `json:",omitempty"` -} - -// Placement represents orchestration parameters. -type Placement struct { - Constraints []string `json:",omitempty"` - Preferences []PlacementPreference `json:",omitempty"` - - // Platforms stores all the platforms that the image can run on. - // This field is used in the platform filter for scheduling. If empty, - // then the platform filter is off, meaning there are no scheduling restrictions. - Platforms []Platform `json:",omitempty"` -} - -// PlacementPreference provides a way to make the scheduler aware of factors -// such as topology. -type PlacementPreference struct { - Spread *SpreadOver -} - -// SpreadOver is a scheduling preference that instructs the scheduler to spread -// tasks evenly over groups of nodes identified by labels. -type SpreadOver struct { - // label descriptor, such as engine.labels.az - SpreadDescriptor string -} - -// RestartPolicy represents the restart policy. -type RestartPolicy struct { - Condition RestartPolicyCondition `json:",omitempty"` - Delay *time.Duration `json:",omitempty"` - MaxAttempts *uint64 `json:",omitempty"` - Window *time.Duration `json:",omitempty"` -} - -// RestartPolicyCondition represents when to restart. -type RestartPolicyCondition string - -const ( - // RestartPolicyConditionNone NONE - RestartPolicyConditionNone RestartPolicyCondition = "none" - // RestartPolicyConditionOnFailure ON_FAILURE - RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" - // RestartPolicyConditionAny ANY - RestartPolicyConditionAny RestartPolicyCondition = "any" -) - -// TaskStatus represents the status of a task. -type TaskStatus struct { - Timestamp time.Time `json:",omitempty"` - State TaskState `json:",omitempty"` - Message string `json:",omitempty"` - Err string `json:",omitempty"` - ContainerStatus *ContainerStatus `json:",omitempty"` - PortStatus PortStatus `json:",omitempty"` -} - -// ContainerStatus represents the status of a container. -type ContainerStatus struct { - ContainerID string - PID int - ExitCode int -} - -// PortStatus represents the port status of a task's host ports whose -// service has published host ports -type PortStatus struct { - Ports []PortConfig `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/docker/docker/api/types/time/duration_convert.go deleted file mode 100644 index 84b6f0732..000000000 --- a/vendor/github.com/docker/docker/api/types/time/duration_convert.go +++ /dev/null @@ -1,12 +0,0 @@ -package time // import "github.com/docker/docker/api/types/time" - -import ( - "strconv" - "time" -) - -// DurationToSecondsString converts the specified duration to the number -// seconds it represents, formatted as a string. -func DurationToSecondsString(duration time.Duration) string { - return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) -} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go deleted file mode 100644 index ea3495efe..000000000 --- a/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ /dev/null @@ -1,129 +0,0 @@ -package time // import "github.com/docker/docker/api/types/time" - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// These are additional predefined layouts for use in Time.Format and Time.Parse -// with --since and --until parameters for `docker logs` and `docker events` -const ( - rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone - rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone - dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 - dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 -) - -// GetTimestamp tries to parse given string as golang duration, -// then RFC3339 time and finally as a Unix timestamp. If -// any of these were successful, it returns a Unix timestamp -// as string otherwise returns the given value back. -// In case of duration input, the returned timestamp is computed -// as the given reference time minus the amount of the duration. -func GetTimestamp(value string, reference time.Time) (string, error) { - if d, err := time.ParseDuration(value); value != "0" && err == nil { - return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil - } - - var format string - // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) - - if strings.Contains(value, ".") { - if parseInLocation { - format = rFC3339NanoLocal - } else { - format = time.RFC3339Nano - } - } else if strings.Contains(value, "T") { - // we want the number of colons in the T portion of the timestamp - tcolons := strings.Count(value, ":") - // if parseInLocation is off and we have a +/- zone offset (not Z) then - // there will be an extra colon in the input for the tz offset subtract that - // colon from the tcolons count - if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { - tcolons-- - } - if parseInLocation { - switch tcolons { - case 0: - format = "2006-01-02T15" - case 1: - format = "2006-01-02T15:04" - default: - format = rFC3339Local - } - } else { - switch tcolons { - case 0: - format = "2006-01-02T15Z07:00" - case 1: - format = "2006-01-02T15:04Z07:00" - default: - format = time.RFC3339 - } - } - } else if parseInLocation { - format = dateLocal - } else { - format = dateWithZone - } - - var t time.Time - var err error - - if parseInLocation { - t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) - } else { - t, err = time.Parse(format, value) - } - - if err != nil { - // if there is a `-` then it's an RFC3339 like timestamp - if strings.Contains(value, "-") { - return "", err // was probably an RFC3339 like timestamp but the parser failed with an error - } - if _, _, err := parseTimestamp(value); err != nil { - return "", fmt.Errorf("failed to parse value as time or duration: %q", value) - } - return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) - } - - return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil -} - -// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the -// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) -// if the incoming nanosecond portion is longer or shorter than 9 digits it is -// converted to nanoseconds. The expectation is that the seconds and -// seconds will be used to create a time variable. For example: -// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) -// if err == nil since := time.Unix(seconds, nanoseconds) -// returns seconds as def(aultSeconds) if value == "" -func ParseTimestamps(value string, def int64) (int64, int64, error) { - if value == "" { - return def, 0, nil - } - return parseTimestamp(value) -} - -func parseTimestamp(value string) (int64, int64, error) { - sa := strings.SplitN(value, ".", 2) - s, err := strconv.ParseInt(sa[0], 10, 64) - if err != nil { - return s, 0, err - } - if len(sa) != 2 { - return s, 0, nil - } - n, err := strconv.ParseInt(sa[1], 10, 64) - if err != nil { - return s, n, err - } - // should already be in nanoseconds but just in case convert n to nanoseconds - n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) - return s, n, nil -} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go deleted file mode 100644 index 729f4eb6c..000000000 --- a/vendor/github.com/docker/docker/api/types/types.go +++ /dev/null @@ -1,587 +0,0 @@ -package types // import "github.com/docker/docker/api/types" - -import ( - "errors" - "fmt" - "io" - "os" - "strings" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/go-connections/nat" -) - -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string - Layers []string `json:",omitempty"` - BaseLayer string `json:",omitempty"` -} - -// ImageInspect contains response of Engine API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - ID string `json:"Id"` - RepoTags []string - RepoDigests []string - Parent string - Comment string - Created string - Container string - ContainerConfig *container.Config - DockerVersion string - Author string - Config *container.Config - Architecture string - Os string - OsVersion string `json:",omitempty"` - Size int64 - VirtualSize int64 - GraphDriver GraphDriverData - RootFS RootFS - Metadata ImageMetadata -} - -// ImageMetadata contains engine-local data about the image -type ImageMetadata struct { - LastTagTime time.Time `json:",omitempty"` -} - -// Container contains response of Engine API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - -// CopyConfig contains request body of Engine API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerStats contains response of Engine API: -// GET "/stats" -type ContainerStats struct { - Body io.ReadCloser `json:"body"` - OSType string `json:"ostype"` -} - -// Ping contains response of Engine API: -// GET "/_ping" -type Ping struct { - APIVersion string - OSType string - Experimental bool -} - -// ComponentVersion describes the version information for a specific component. -type ComponentVersion struct { - Name string - Version string - Details map[string]string `json:",omitempty"` -} - -// Version contains response of Engine API: -// GET "/version" -type Version struct { - Platform struct{ Name string } `json:",omitempty"` - Components []ComponentVersion `json:",omitempty"` - - // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility - - Version string - APIVersion string `json:"ApiVersion"` - MinAPIVersion string `json:"MinAPIVersion,omitempty"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// Commit holds the Git-commit (SHA1) that a binary was built from, as reported -// in the version-string of external tools, such as containerd, or runC. -type Commit struct { - ID string // ID is the actual commit ID of external tool. - Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. -} - -// Info contains response of Engine API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - LoggingDriver string - CgroupDriver string - NEventsListener int - KernelVersion string - OperatingSystem string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - GenericResources []swarm.GenericResource - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - ClusterStore string - ClusterAdvertise string - Runtimes map[string]Runtime - DefaultRuntime string - Swarm swarm.Info - // LiveRestoreEnabled determines whether containers should be kept - // running when the daemon is shutdown or upon daemon start if - // running containers are detected - LiveRestoreEnabled bool - Isolation container.Isolation - InitBinary string - ContainerdCommit Commit - RuncCommit Commit - InitCommit Commit - SecurityOptions []string -} - -// KeyValue holds a key/value pair -type KeyValue struct { - Key, Value string -} - -// SecurityOpt contains the name and options of a security option -type SecurityOpt struct { - Name string - Options []KeyValue -} - -// DecodeSecurityOptions decodes a security options string slice to a type safe -// SecurityOpt -func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { - so := []SecurityOpt{} - for _, opt := range opts { - // support output from a < 1.13 docker daemon - if !strings.Contains(opt, "=") { - so = append(so, SecurityOpt{Name: opt}) - continue - } - secopt := SecurityOpt{} - split := strings.Split(opt, ",") - for _, s := range split { - kv := strings.SplitN(s, "=", 2) - if len(kv) != 2 { - return nil, fmt.Errorf("invalid security option %q", s) - } - if kv[0] == "" || kv[1] == "" { - return nil, errors.New("invalid empty security option") - } - if kv[0] == "name" { - secopt.Name = kv[1] - continue - } - secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) - } - so = append(so, secopt) - } - return so, nil -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string - // List of Log plugins registered - Log []string -} - -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool -} - -// HealthcheckResult stores information about a single run of a healthcheck probe -type HealthcheckResult struct { - Start time.Time // Start is the time this check started - End time.Time // End is the time this check ended - ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe - Output string // Output from last check -} - -// Health states -const ( - NoHealthcheck = "none" // Indicates there is no healthcheck - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem -) - -// Health stores information about the container's healthcheck results -type Health struct { - Status string // Status is one of Starting, Healthy or Unhealthy - FailingStreak int // FailingStreak is the number of consecutive failures - Log []*HealthcheckResult // Log contains the last few results (oldest first) -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string - Health *Health `json:",omitempty"` -} - -// ContainerNode stores information about the node that a container -// is running on. It's only available in Docker Swarm -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int64 - Labels map[string]string -} - -// ContainerJSONBase contains response of Engine API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` - Name string - RestartCount int - Driver string - Platform string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds basic information about networks -type NetworkSettingsBase struct { - Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) - SandboxID string // SandboxID uniquely represents a container's network stack - HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - SandboxKey string // SandboxKey identifies the sandbox - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network -} - -// MountPoint represents a mount point configuration inside the container. -// This is used for reporting the mountpoints in use by a container. -type MountPoint struct { - Type mount.Type `json:",omitempty"` - Name string `json:",omitempty"` - Source string - Destination string - Driver string `json:",omitempty"` - Mode string - RW bool - Propagation mount.Propagation -} - -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string // Name is the requested name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. - ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. - ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created - Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network - Services map[string]network.ServiceInfo `json:",omitempty"` -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - // Check for networks with duplicate names. - // Network is primarily keyed based on a random ID and not on the name. - // Network name is strictly a user-friendly alias to the network - // which is uniquely identified using ID. - // And there is no guaranteed way to check for duplicates. - // Option CheckDuplicate is there to provide a best effort checking of any networks - // which has the same name but it is not guaranteed to catch all name collisions. - CheckDuplicate bool - Driver string - Scope string - EnableIPv6 bool - IPAM *network.IPAM - Internal bool - Attachable bool - Ingress bool - ConfigOnly bool - ConfigFrom *network.ConfigReference - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} - -// NetworkInspectOptions holds parameters to inspect network -type NetworkInspectOptions struct { - Scope string - Verbose bool -} - -// Checkpoint represents the details of a checkpoint -type Checkpoint struct { - Name string // Name is the name of the checkpoint -} - -// Runtime describes an OCI runtime -type Runtime struct { - Path string `json:"path"` - Args []string `json:"runtimeArgs,omitempty"` -} - -// DiskUsage contains response of Engine API: -// GET "/system/df" -type DiskUsage struct { - LayersSize int64 - Images []*ImageSummary - Containers []*Container - Volumes []*Volume - BuilderSize int64 -} - -// ContainersPruneReport contains the response for Engine API: -// POST "/containers/prune" -type ContainersPruneReport struct { - ContainersDeleted []string - SpaceReclaimed uint64 -} - -// VolumesPruneReport contains the response for Engine API: -// POST "/volumes/prune" -type VolumesPruneReport struct { - VolumesDeleted []string - SpaceReclaimed uint64 -} - -// ImagesPruneReport contains the response for Engine API: -// POST "/images/prune" -type ImagesPruneReport struct { - ImagesDeleted []ImageDeleteResponseItem - SpaceReclaimed uint64 -} - -// BuildCachePruneReport contains the response for Engine API: -// POST "/build/prune" -type BuildCachePruneReport struct { - SpaceReclaimed uint64 -} - -// NetworksPruneReport contains the response for Engine API: -// POST "/networks/prune" -type NetworksPruneReport struct { - NetworksDeleted []string -} - -// SecretCreateResponse contains the information returned to a client -// on the creation of a new secret. -type SecretCreateResponse struct { - // ID is the id of the created secret. - ID string -} - -// SecretListOptions holds parameters to list secrets -type SecretListOptions struct { - Filters filters.Args -} - -// ConfigCreateResponse contains the information returned to a client -// on the creation of a new config. -type ConfigCreateResponse struct { - // ID is the id of the created config. - ID string -} - -// ConfigListOptions holds parameters to list configs -type ConfigListOptions struct { - Filters filters.Args -} - -// PushResult contains the tag, manifest digest, and manifest size from the -// push. It's used to signal this information to the trust code in the client -// so it can sign the manifest if necessary. -type PushResult struct { - Tag string - Digest string - Size int -} - -// BuildResult contains the image id of a successful build -type BuildResult struct { - ID string -} diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go deleted file mode 100644 index 8ccb0aa92..000000000 --- a/vendor/github.com/docker/docker/api/types/versions/compare.go +++ /dev/null @@ -1,62 +0,0 @@ -package versions // import "github.com/docker/docker/api/types/versions" - -import ( - "strconv" - "strings" -) - -// compare compares two version strings -// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. -func compare(v1, v2 string) int { - var ( - currTab = strings.Split(v1, ".") - otherTab = strings.Split(v2, ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another -func LessThan(v, other string) bool { - return compare(v, other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func LessThanOrEqualTo(v, other string) bool { - return compare(v, other) <= 0 -} - -// GreaterThan checks if a version is greater than another -func GreaterThan(v, other string) bool { - return compare(v, other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to another -func GreaterThanOrEqualTo(v, other string) bool { - return compare(v, other) >= 0 -} - -// Equal checks if a version is equal to another -func Equal(v, other string) bool { - return compare(v, other) == 0 -} diff --git a/vendor/github.com/docker/docker/api/types/versions/v1p19/types.go b/vendor/github.com/docker/docker/api/types/versions/v1p19/types.go deleted file mode 100644 index 58afe32da..000000000 --- a/vendor/github.com/docker/docker/api/types/versions/v1p19/types.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package v1p19 provides specific API types for the API version 1, patch 19. -package v1p19 // import "github.com/docker/docker/api/types/versions/v1p19" - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions/v1p20" - "github.com/docker/go-connections/nat" -) - -// ContainerJSON is a backcompatibility struct for APIs prior to 1.20. -// Note this is not used by the Windows daemon. -type ContainerJSON struct { - *types.ContainerJSONBase - Volumes map[string]string - VolumesRW map[string]bool - Config *ContainerConfig - NetworkSettings *v1p20.NetworkSettings -} - -// ContainerConfig is a backcompatibility struct for APIs prior to 1.20. -type ContainerConfig struct { - *container.Config - - MacAddress string - NetworkDisabled bool - ExposedPorts map[nat.Port]struct{} - - // backward compatibility, they now live in HostConfig - VolumeDriver string - Memory int64 - MemorySwap int64 - CPUShares int64 `json:"CpuShares"` - CPUSet string `json:"Cpuset"` -} diff --git a/vendor/github.com/docker/docker/api/types/versions/v1p20/types.go b/vendor/github.com/docker/docker/api/types/versions/v1p20/types.go deleted file mode 100644 index cc7277b1b..000000000 --- a/vendor/github.com/docker/docker/api/types/versions/v1p20/types.go +++ /dev/null @@ -1,40 +0,0 @@ -// Package v1p20 provides specific API types for the API version 1, patch 20. -package v1p20 // import "github.com/docker/docker/api/types/versions/v1p20" - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/go-connections/nat" -) - -// ContainerJSON is a backcompatibility struct for the API 1.20 -type ContainerJSON struct { - *types.ContainerJSONBase - Mounts []types.MountPoint - Config *ContainerConfig - NetworkSettings *NetworkSettings -} - -// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 -type ContainerConfig struct { - *container.Config - - MacAddress string - NetworkDisabled bool - ExposedPorts map[nat.Port]struct{} - - // backward compatibility, they now live in HostConfig - VolumeDriver string -} - -// StatsJSON is a backcompatibility struct used in Stats for APIs prior to 1.21 -type StatsJSON struct { - types.Stats - Network types.NetworkStats `json:"network,omitempty"` -} - -// NetworkSettings is a backward compatible struct for APIs prior to 1.21 -type NetworkSettings struct { - types.NetworkSettingsBase - types.DefaultNetworkSettings -} diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go deleted file mode 100644 index b5ee96a50..000000000 --- a/vendor/github.com/docker/docker/api/types/volume.go +++ /dev/null @@ -1,69 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// Volume volume -// swagger:model Volume -type Volume struct { - - // Date/Time the volume was created. - CreatedAt string `json:"CreatedAt,omitempty"` - - // Name of the volume driver used by the volume. - // Required: true - Driver string `json:"Driver"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // Mount path of the volume on the host. - // Required: true - Mountpoint string `json:"Mountpoint"` - - // Name of the volume. - // Required: true - Name string `json:"Name"` - - // The driver specific options used when creating the volume. - // Required: true - Options map[string]string `json:"Options"` - - // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. - // Required: true - Scope string `json:"Scope"` - - // Low-level details about the volume, provided by the volume driver. - // Details are returned as a map with key/value pairs: - // `{"key":"value","key2":"value2"}`. - // - // The `Status` field is optional, and is omitted if the volume driver - // does not support this feature. - // - Status map[string]interface{} `json:"Status,omitempty"` - - // usage data - UsageData *VolumeUsageData `json:"UsageData,omitempty"` -} - -// VolumeUsageData Usage details about the volume. This information is used by the -// `GET /system/df` endpoint, and omitted in other endpoints. -// -// swagger:model VolumeUsageData -type VolumeUsageData struct { - - // The number of containers referencing this volume. This field - // is set to `-1` if the reference-count is not available. - // - // Required: true - RefCount int64 `json:"RefCount"` - - // Amount of disk space used by the volume (in bytes). This information - // is only available for volumes created with the `"local"` volume - // driver. For volumes created with other volume drivers, this field - // is set to `-1` ("not available") - // - // Required: true - Size int64 `json:"Size"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go deleted file mode 100644 index 539e9b97d..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume_create.go +++ /dev/null @@ -1,29 +0,0 @@ -package volume - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// VolumeCreateBody -// swagger:model VolumeCreateBody -type VolumeCreateBody struct { - - // Name of the volume driver to use. - // Required: true - Driver string `json:"Driver"` - - // A mapping of driver options and values. These options are passed directly to the driver and are driver specific. - // Required: true - DriverOpts map[string]string `json:"DriverOpts"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // The new volume's name. If not specified, Docker generates a name. - // Required: true - Name string `json:"Name"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go deleted file mode 100644 index 1bb279dbb..000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volume_list.go +++ /dev/null @@ -1,23 +0,0 @@ -package volume - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -import "github.com/docker/docker/api/types" - -// VolumeListOKBody -// swagger:model VolumeListOKBody -type VolumeListOKBody struct { - - // List of volumes - // Required: true - Volumes []*types.Volume `json:"Volumes"` - - // Warnings that occurred when fetching the list of volumes - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/builder/builder.go b/vendor/github.com/docker/docker/builder/builder.go deleted file mode 100644 index 3eb034141..000000000 --- a/vendor/github.com/docker/docker/builder/builder.go +++ /dev/null @@ -1,115 +0,0 @@ -// Package builder defines interfaces for any Docker builder to implement. -// -// Historically, only server-side Dockerfile interpreters existed. -// This package allows for other implementations of Docker builders. -package builder // import "github.com/docker/docker/builder" - -import ( - "context" - "io" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/container" - containerpkg "github.com/docker/docker/container" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/containerfs" -) - -const ( - // DefaultDockerfileName is the Default filename with Docker commands, read by docker build - DefaultDockerfileName = "Dockerfile" -) - -// Source defines a location that can be used as a source for the ADD/COPY -// instructions in the builder. -type Source interface { - // Root returns root path for accessing source - Root() containerfs.ContainerFS - // Close allows to signal that the filesystem tree won't be used anymore. - // For Context implementations using a temporary directory, it is recommended to - // delete the temporary directory in Close(). - Close() error - // Hash returns a checksum for a file - Hash(path string) (string, error) -} - -// Backend abstracts calls to a Docker Daemon. -type Backend interface { - ImageBackend - ExecBackend - - // CommitBuildStep creates a new Docker image from the config generated by - // a build step. - CommitBuildStep(backend.CommitConfig) (image.ID, error) - // ContainerCreateWorkdir creates the workdir - ContainerCreateWorkdir(containerID string) error - - CreateImage(config []byte, parent string) (Image, error) - - ImageCacheBuilder -} - -// ImageBackend are the interface methods required from an image component -type ImageBackend interface { - GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ROLayer, error) -} - -// ExecBackend contains the interface methods required for executing containers -type ExecBackend interface { - // ContainerAttachRaw attaches to container. - ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error - // ContainerCreate creates a new Docker container and returns potential warnings - ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) - // ContainerRm removes a container specified by `id`. - ContainerRm(name string, config *types.ContainerRmConfig) error - // ContainerKill stops the container execution abruptly. - ContainerKill(containerID string, sig uint64) error - // ContainerStart starts a new container - ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error - // ContainerWait stops processing until the given container is stopped. - ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) -} - -// Result is the output produced by a Builder -type Result struct { - ImageID string - FromImage Image -} - -// ImageCacheBuilder represents a generator for stateful image cache. -type ImageCacheBuilder interface { - // MakeImageCache creates a stateful image cache. - MakeImageCache(cacheFrom []string) ImageCache -} - -// ImageCache abstracts an image cache. -// (parent image, child runconfig) -> child image -type ImageCache interface { - // GetCache returns a reference to a cached image whose parent equals `parent` - // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. - GetCache(parentID string, cfg *container.Config) (imageID string, err error) -} - -// Image represents a Docker image used by the builder. -type Image interface { - ImageID() string - RunConfig() *container.Config - MarshalJSON() ([]byte, error) - OperatingSystem() string -} - -// ROLayer is a reference to image rootfs layer -type ROLayer interface { - Release() error - NewRWLayer() (RWLayer, error) - DiffID() layer.DiffID -} - -// RWLayer is active layer that can be read/modified -type RWLayer interface { - Release() error - Root() containerfs.ContainerFS - Commit() (ROLayer, error) -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go b/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go deleted file mode 100644 index f9cceaa05..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go +++ /dev/null @@ -1,172 +0,0 @@ -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -import ( - "fmt" - "io" - - "github.com/docker/docker/runconfig/opts" -) - -// builtinAllowedBuildArgs is list of built-in allowed build args -// these args are considered transparent and are excluded from the image history. -// Filtering from history is implemented in dispatchers.go -var builtinAllowedBuildArgs = map[string]bool{ - "HTTP_PROXY": true, - "http_proxy": true, - "HTTPS_PROXY": true, - "https_proxy": true, - "FTP_PROXY": true, - "ftp_proxy": true, - "NO_PROXY": true, - "no_proxy": true, -} - -// BuildArgs manages arguments used by the builder -type BuildArgs struct { - // args that are allowed for expansion/substitution and passing to commands in 'run'. - allowedBuildArgs map[string]*string - // args defined before the first `FROM` in a Dockerfile - allowedMetaArgs map[string]*string - // args referenced by the Dockerfile - referencedArgs map[string]struct{} - // args provided by the user on the command line - argsFromOptions map[string]*string -} - -// NewBuildArgs creates a new BuildArgs type -func NewBuildArgs(argsFromOptions map[string]*string) *BuildArgs { - return &BuildArgs{ - allowedBuildArgs: make(map[string]*string), - allowedMetaArgs: make(map[string]*string), - referencedArgs: make(map[string]struct{}), - argsFromOptions: argsFromOptions, - } -} - -// Clone returns a copy of the BuildArgs type -func (b *BuildArgs) Clone() *BuildArgs { - result := NewBuildArgs(b.argsFromOptions) - for k, v := range b.allowedBuildArgs { - result.allowedBuildArgs[k] = v - } - for k, v := range b.allowedMetaArgs { - result.allowedMetaArgs[k] = v - } - for k := range b.referencedArgs { - result.referencedArgs[k] = struct{}{} - } - return result -} - -// MergeReferencedArgs merges referenced args from another BuildArgs -// object into the current one -func (b *BuildArgs) MergeReferencedArgs(other *BuildArgs) { - for k := range other.referencedArgs { - b.referencedArgs[k] = struct{}{} - } -} - -// WarnOnUnusedBuildArgs checks if there are any leftover build-args that were -// passed but not consumed during build. Print a warning, if there are any. -func (b *BuildArgs) WarnOnUnusedBuildArgs(out io.Writer) { - var leftoverArgs []string - for arg := range b.argsFromOptions { - _, isReferenced := b.referencedArgs[arg] - _, isBuiltin := builtinAllowedBuildArgs[arg] - if !isBuiltin && !isReferenced { - leftoverArgs = append(leftoverArgs, arg) - } - } - if len(leftoverArgs) > 0 { - fmt.Fprintf(out, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs) - } -} - -// ResetAllowed clears the list of args that are allowed to be used by a -// directive -func (b *BuildArgs) ResetAllowed() { - b.allowedBuildArgs = make(map[string]*string) -} - -// AddMetaArg adds a new meta arg that can be used by FROM directives -func (b *BuildArgs) AddMetaArg(key string, value *string) { - b.allowedMetaArgs[key] = value -} - -// AddArg adds a new arg that can be used by directives -func (b *BuildArgs) AddArg(key string, value *string) { - b.allowedBuildArgs[key] = value - b.referencedArgs[key] = struct{}{} -} - -// IsReferencedOrNotBuiltin checks if the key is a built-in arg, or if it has been -// referenced by the Dockerfile. Returns true if the arg is not a builtin or -// if the builtin has been referenced in the Dockerfile. -func (b *BuildArgs) IsReferencedOrNotBuiltin(key string) bool { - _, isBuiltin := builtinAllowedBuildArgs[key] - _, isAllowed := b.allowedBuildArgs[key] - return isAllowed || !isBuiltin -} - -// GetAllAllowed returns a mapping with all the allowed args -func (b *BuildArgs) GetAllAllowed() map[string]string { - return b.getAllFromMapping(b.allowedBuildArgs) -} - -// GetAllMeta returns a mapping with all the meta meta args -func (b *BuildArgs) GetAllMeta() map[string]string { - return b.getAllFromMapping(b.allowedMetaArgs) -} - -func (b *BuildArgs) getAllFromMapping(source map[string]*string) map[string]string { - m := make(map[string]string) - - keys := keysFromMaps(source, builtinAllowedBuildArgs) - for _, key := range keys { - v, ok := b.getBuildArg(key, source) - if ok { - m[key] = v - } - } - return m -} - -// FilterAllowed returns all allowed args without the filtered args -func (b *BuildArgs) FilterAllowed(filter []string) []string { - envs := []string{} - configEnv := opts.ConvertKVStringsToMap(filter) - - for key, val := range b.GetAllAllowed() { - if _, ok := configEnv[key]; !ok { - envs = append(envs, fmt.Sprintf("%s=%s", key, val)) - } - } - return envs -} - -func (b *BuildArgs) getBuildArg(key string, mapping map[string]*string) (string, bool) { - defaultValue, exists := mapping[key] - // Return override from options if one is defined - if v, ok := b.argsFromOptions[key]; ok && v != nil { - return *v, ok - } - - if defaultValue == nil { - if v, ok := b.allowedMetaArgs[key]; ok && v != nil { - return *v, ok - } - return "", false - } - return *defaultValue, exists -} - -func keysFromMaps(source map[string]*string, builtin map[string]bool) []string { - keys := []string{} - for key := range source { - keys = append(keys, key) - } - for key := range builtin { - keys = append(keys, key) - } - return keys -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder.go b/vendor/github.com/docker/docker/builder/dockerfile/builder.go deleted file mode 100644 index d5d2de818..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/builder.go +++ /dev/null @@ -1,421 +0,0 @@ -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "sort" - "strings" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/fscache" - "github.com/docker/docker/builder/remotecontext" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/system" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/moby/buildkit/frontend/dockerfile/parser" - "github.com/moby/buildkit/frontend/dockerfile/shell" - "github.com/moby/buildkit/session" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sync/syncmap" -) - -var validCommitCommands = map[string]bool{ - "cmd": true, - "entrypoint": true, - "healthcheck": true, - "env": true, - "expose": true, - "label": true, - "onbuild": true, - "user": true, - "volume": true, - "workdir": true, -} - -const ( - stepFormat = "Step %d/%d : %v" -) - -// SessionGetter is object used to get access to a session by uuid -type SessionGetter interface { - Get(ctx context.Context, uuid string) (session.Caller, error) -} - -// BuildManager is shared across all Builder objects -type BuildManager struct { - idMappings *idtools.IDMappings - backend builder.Backend - pathCache pathCache // TODO: make this persistent - sg SessionGetter - fsCache *fscache.FSCache -} - -// NewBuildManager creates a BuildManager -func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, idMappings *idtools.IDMappings) (*BuildManager, error) { - bm := &BuildManager{ - backend: b, - pathCache: &syncmap.Map{}, - sg: sg, - idMappings: idMappings, - fsCache: fsCache, - } - if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil { - return nil, err - } - return bm, nil -} - -// Build starts a new build from a BuildConfig -func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (*builder.Result, error) { - buildsTriggered.Inc() - if config.Options.Dockerfile == "" { - config.Options.Dockerfile = builder.DefaultDockerfileName - } - - source, dockerfile, err := remotecontext.Detect(config) - if err != nil { - return nil, err - } - defer func() { - if source != nil { - if err := source.Close(); err != nil { - logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) - } - } - }() - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - if src, err := bm.initializeClientSession(ctx, cancel, config.Options); err != nil { - return nil, err - } else if src != nil { - source = src - } - - os := "" - apiPlatform := system.ParsePlatform(config.Options.Platform) - if apiPlatform.OS != "" { - os = apiPlatform.OS - } - config.Options.Platform = os - - builderOptions := builderOptions{ - Options: config.Options, - ProgressWriter: config.ProgressWriter, - Backend: bm.backend, - PathCache: bm.pathCache, - IDMappings: bm.idMappings, - } - return newBuilder(ctx, builderOptions).build(source, dockerfile) -} - -func (bm *BuildManager) initializeClientSession(ctx context.Context, cancel func(), options *types.ImageBuildOptions) (builder.Source, error) { - if options.SessionID == "" || bm.sg == nil { - return nil, nil - } - logrus.Debug("client is session enabled") - - connectCtx, cancelCtx := context.WithTimeout(ctx, sessionConnectTimeout) - defer cancelCtx() - - c, err := bm.sg.Get(connectCtx, options.SessionID) - if err != nil { - return nil, err - } - go func() { - <-c.Context().Done() - cancel() - }() - if options.RemoteContext == remotecontext.ClientSessionRemote { - st := time.Now() - csi, err := NewClientSessionSourceIdentifier(ctx, bm.sg, options.SessionID) - if err != nil { - return nil, err - } - src, err := bm.fsCache.SyncFrom(ctx, csi) - if err != nil { - return nil, err - } - logrus.Debugf("sync-time: %v", time.Since(st)) - return src, nil - } - return nil, nil -} - -// builderOptions are the dependencies required by the builder -type builderOptions struct { - Options *types.ImageBuildOptions - Backend builder.Backend - ProgressWriter backend.ProgressWriter - PathCache pathCache - IDMappings *idtools.IDMappings -} - -// Builder is a Dockerfile builder -// It implements the builder.Backend interface. -type Builder struct { - options *types.ImageBuildOptions - - Stdout io.Writer - Stderr io.Writer - Aux *streamformatter.AuxFormatter - Output io.Writer - - docker builder.Backend - clientCtx context.Context - - idMappings *idtools.IDMappings - disableCommit bool - imageSources *imageSources - pathCache pathCache - containerManager *containerManager - imageProber ImageProber -} - -// newBuilder creates a new Dockerfile builder from an optional dockerfile and a Options. -func newBuilder(clientCtx context.Context, options builderOptions) *Builder { - config := options.Options - if config == nil { - config = new(types.ImageBuildOptions) - } - - b := &Builder{ - clientCtx: clientCtx, - options: config, - Stdout: options.ProgressWriter.StdoutFormatter, - Stderr: options.ProgressWriter.StderrFormatter, - Aux: options.ProgressWriter.AuxFormatter, - Output: options.ProgressWriter.Output, - docker: options.Backend, - idMappings: options.IDMappings, - imageSources: newImageSources(clientCtx, options), - pathCache: options.PathCache, - imageProber: newImageProber(options.Backend, config.CacheFrom, config.NoCache), - containerManager: newContainerManager(options.Backend), - } - - return b -} - -// Build 'LABEL' command(s) from '--label' options and add to the last stage -func buildLabelOptions(labels map[string]string, stages []instructions.Stage) { - keys := []string{} - for key := range labels { - keys = append(keys, key) - } - - // Sort the label to have a repeatable order - sort.Strings(keys) - for _, key := range keys { - value := labels[key] - stages[len(stages)-1].AddCommand(instructions.NewLabelCommand(key, value, true)) - } -} - -// Build runs the Dockerfile builder by parsing the Dockerfile and executing -// the instructions from the file. -func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*builder.Result, error) { - defer b.imageSources.Unmount() - - stages, metaArgs, err := instructions.Parse(dockerfile.AST) - if err != nil { - if instructions.IsUnknownInstruction(err) { - buildsFailed.WithValues(metricsUnknownInstructionError).Inc() - } - return nil, errdefs.InvalidParameter(err) - } - if b.options.Target != "" { - targetIx, found := instructions.HasStage(stages, b.options.Target) - if !found { - buildsFailed.WithValues(metricsBuildTargetNotReachableError).Inc() - return nil, errdefs.InvalidParameter(errors.Errorf("failed to reach build target %s in Dockerfile", b.options.Target)) - } - stages = stages[:targetIx+1] - } - - // Add 'LABEL' command specified by '--label' option to the last stage - buildLabelOptions(b.options.Labels, stages) - - dockerfile.PrintWarnings(b.Stderr) - dispatchState, err := b.dispatchDockerfileWithCancellation(stages, metaArgs, dockerfile.EscapeToken, source) - if err != nil { - return nil, err - } - if dispatchState.imageID == "" { - buildsFailed.WithValues(metricsDockerfileEmptyError).Inc() - return nil, errors.New("No image was generated. Is your Dockerfile empty?") - } - return &builder.Result{ImageID: dispatchState.imageID, FromImage: dispatchState.baseImage}, nil -} - -func emitImageID(aux *streamformatter.AuxFormatter, state *dispatchState) error { - if aux == nil || state.imageID == "" { - return nil - } - return aux.Emit(types.BuildResult{ID: state.imageID}) -} - -func processMetaArg(meta instructions.ArgCommand, shlex *shell.Lex, args *BuildArgs) error { - // shell.Lex currently only support the concatenated string format - envs := convertMapToEnvList(args.GetAllAllowed()) - if err := meta.Expand(func(word string) (string, error) { - return shlex.ProcessWord(word, envs) - }); err != nil { - return err - } - args.AddArg(meta.Key, meta.Value) - args.AddMetaArg(meta.Key, meta.Value) - return nil -} - -func printCommand(out io.Writer, currentCommandIndex int, totalCommands int, cmd interface{}) int { - fmt.Fprintf(out, stepFormat, currentCommandIndex, totalCommands, cmd) - fmt.Fprintln(out) - return currentCommandIndex + 1 -} - -func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions.Stage, metaArgs []instructions.ArgCommand, escapeToken rune, source builder.Source) (*dispatchState, error) { - dispatchRequest := dispatchRequest{} - buildArgs := NewBuildArgs(b.options.BuildArgs) - totalCommands := len(metaArgs) + len(parseResult) - currentCommandIndex := 1 - for _, stage := range parseResult { - totalCommands += len(stage.Commands) - } - shlex := shell.NewLex(escapeToken) - for _, meta := range metaArgs { - currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, &meta) - - err := processMetaArg(meta, shlex, buildArgs) - if err != nil { - return nil, err - } - } - - stagesResults := newStagesBuildResults() - - for _, stage := range parseResult { - if err := stagesResults.checkStageNameAvailable(stage.Name); err != nil { - return nil, err - } - dispatchRequest = newDispatchRequest(b, escapeToken, source, buildArgs, stagesResults) - - currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, stage.SourceCode) - if err := initializeStage(dispatchRequest, &stage); err != nil { - return nil, err - } - dispatchRequest.state.updateRunConfig() - fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID)) - for _, cmd := range stage.Commands { - select { - case <-b.clientCtx.Done(): - logrus.Debug("Builder: build cancelled!") - fmt.Fprint(b.Stdout, "Build cancelled\n") - buildsFailed.WithValues(metricsBuildCanceled).Inc() - return nil, errors.New("Build cancelled") - default: - // Not cancelled yet, keep going... - } - - currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, cmd) - - if err := dispatch(dispatchRequest, cmd); err != nil { - return nil, err - } - dispatchRequest.state.updateRunConfig() - fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID)) - - } - if err := emitImageID(b.Aux, dispatchRequest.state); err != nil { - return nil, err - } - buildArgs.MergeReferencedArgs(dispatchRequest.state.buildArgs) - if err := commitStage(dispatchRequest.state, stagesResults); err != nil { - return nil, err - } - } - buildArgs.WarnOnUnusedBuildArgs(b.Stdout) - return dispatchRequest.state, nil -} - -// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile -// It will: -// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. -// - Do build by calling builder.dispatch() to call all entries' handling routines -// -// BuildFromConfig is used by the /commit endpoint, with the changes -// coming from the query parameter of the same name. -// -// TODO: Remove? -func BuildFromConfig(config *container.Config, changes []string, os string) (*container.Config, error) { - if !system.IsOSSupported(os) { - return nil, errdefs.InvalidParameter(system.ErrNotSupportedOperatingSystem) - } - if len(changes) == 0 { - return config, nil - } - - dockerfile, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) - if err != nil { - return nil, errdefs.InvalidParameter(err) - } - - b := newBuilder(context.Background(), builderOptions{ - Options: &types.ImageBuildOptions{NoCache: true}, - }) - - // ensure that the commands are valid - for _, n := range dockerfile.AST.Children { - if !validCommitCommands[n.Value] { - return nil, errdefs.InvalidParameter(errors.Errorf("%s is not a valid change command", n.Value)) - } - } - - b.Stdout = ioutil.Discard - b.Stderr = ioutil.Discard - b.disableCommit = true - - var commands []instructions.Command - for _, n := range dockerfile.AST.Children { - cmd, err := instructions.ParseCommand(n) - if err != nil { - return nil, errdefs.InvalidParameter(err) - } - commands = append(commands, cmd) - } - - dispatchRequest := newDispatchRequest(b, dockerfile.EscapeToken, nil, NewBuildArgs(b.options.BuildArgs), newStagesBuildResults()) - // We make mutations to the configuration, ensure we have a copy - dispatchRequest.state.runConfig = copyRunConfig(config) - dispatchRequest.state.imageID = config.Image - dispatchRequest.state.operatingSystem = os - for _, cmd := range commands { - err := dispatch(dispatchRequest, cmd) - if err != nil { - return nil, errdefs.InvalidParameter(err) - } - dispatchRequest.state.updateRunConfig() - } - - return dispatchRequest.state.runConfig, nil -} - -func convertMapToEnvList(m map[string]string) []string { - result := []string{} - for k, v := range m { - result = append(result, k+"="+v) - } - return result -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go deleted file mode 100644 index c4453459b..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -func defaultShellForOS(os string) []string { - return []string{"/bin/sh", "-c"} -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go deleted file mode 100644 index fbafa52ae..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -func defaultShellForOS(os string) []string { - if os == "linux" { - return []string{"/bin/sh", "-c"} - } - return []string{"cmd", "/S", "/C"} -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go b/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go deleted file mode 100644 index b48090d7b..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go +++ /dev/null @@ -1,76 +0,0 @@ -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -import ( - "context" - "time" - - "github.com/docker/docker/builder/fscache" - "github.com/docker/docker/builder/remotecontext" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/filesync" - "github.com/pkg/errors" -) - -const sessionConnectTimeout = 5 * time.Second - -// ClientSessionTransport is a transport for copying files from docker client -// to the daemon. -type ClientSessionTransport struct{} - -// NewClientSessionTransport returns new ClientSessionTransport instance -func NewClientSessionTransport() *ClientSessionTransport { - return &ClientSessionTransport{} -} - -// Copy data from a remote to a destination directory. -func (cst *ClientSessionTransport) Copy(ctx context.Context, id fscache.RemoteIdentifier, dest string, cu filesync.CacheUpdater) error { - csi, ok := id.(*ClientSessionSourceIdentifier) - if !ok { - return errors.New("invalid identifier for client session") - } - - return filesync.FSSync(ctx, csi.caller, filesync.FSSendRequestOpt{ - IncludePatterns: csi.includePatterns, - DestDir: dest, - CacheUpdater: cu, - }) -} - -// ClientSessionSourceIdentifier is an identifier that can be used for requesting -// files from remote client -type ClientSessionSourceIdentifier struct { - includePatterns []string - caller session.Caller - uuid string -} - -// NewClientSessionSourceIdentifier returns new ClientSessionSourceIdentifier instance -func NewClientSessionSourceIdentifier(ctx context.Context, sg SessionGetter, uuid string) (*ClientSessionSourceIdentifier, error) { - csi := &ClientSessionSourceIdentifier{ - uuid: uuid, - } - caller, err := sg.Get(ctx, uuid) - if err != nil { - return nil, errors.Wrapf(err, "failed to get session for %s", uuid) - } - - csi.caller = caller - return csi, nil -} - -// Transport returns transport identifier for remote identifier -func (csi *ClientSessionSourceIdentifier) Transport() string { - return remotecontext.ClientSessionRemote -} - -// SharedKey returns shared key for remote identifier. Shared key is used -// for finding the base for a repeated transfer. -func (csi *ClientSessionSourceIdentifier) SharedKey() string { - return csi.caller.SharedKey() -} - -// Key returns unique key for remote identifier. Requests with same key return -// same data. -func (csi *ClientSessionSourceIdentifier) Key() string { - return csi.uuid -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go b/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go deleted file mode 100644 index 54adfb13f..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go +++ /dev/null @@ -1,146 +0,0 @@ -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -import ( - "context" - "fmt" - "io" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/builder" - containerpkg "github.com/docker/docker/container" - "github.com/docker/docker/pkg/stringid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type containerManager struct { - tmpContainers map[string]struct{} - backend builder.ExecBackend -} - -// newContainerManager creates a new container backend -func newContainerManager(docker builder.ExecBackend) *containerManager { - return &containerManager{ - backend: docker, - tmpContainers: make(map[string]struct{}), - } -} - -// Create a container -func (c *containerManager) Create(runConfig *container.Config, hostConfig *container.HostConfig) (container.ContainerCreateCreatedBody, error) { - container, err := c.backend.ContainerCreate(types.ContainerCreateConfig{ - Config: runConfig, - HostConfig: hostConfig, - }) - if err != nil { - return container, err - } - c.tmpContainers[container.ID] = struct{}{} - return container, nil -} - -var errCancelled = errors.New("build cancelled") - -// Run a container by ID -func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr io.Writer) (err error) { - attached := make(chan struct{}) - errCh := make(chan error) - go func() { - errCh <- c.backend.ContainerAttachRaw(cID, nil, stdout, stderr, true, attached) - }() - select { - case err := <-errCh: - return err - case <-attached: - } - - finished := make(chan struct{}) - cancelErrCh := make(chan error, 1) - go func() { - select { - case <-ctx.Done(): - logrus.Debugln("Build cancelled, killing and removing container:", cID) - c.backend.ContainerKill(cID, 0) - c.removeContainer(cID, stdout) - cancelErrCh <- errCancelled - case <-finished: - cancelErrCh <- nil - } - }() - - if err := c.backend.ContainerStart(cID, nil, "", ""); err != nil { - close(finished) - logCancellationError(cancelErrCh, "error from ContainerStart: "+err.Error()) - return err - } - - // Block on reading output from container, stop on err or chan closed - if err := <-errCh; err != nil { - close(finished) - logCancellationError(cancelErrCh, "error from errCh: "+err.Error()) - return err - } - - waitC, err := c.backend.ContainerWait(ctx, cID, containerpkg.WaitConditionNotRunning) - if err != nil { - close(finished) - logCancellationError(cancelErrCh, fmt.Sprintf("unable to begin ContainerWait: %s", err)) - return err - } - - if status := <-waitC; status.ExitCode() != 0 { - close(finished) - logCancellationError(cancelErrCh, - fmt.Sprintf("a non-zero code from ContainerWait: %d", status.ExitCode())) - return &statusCodeError{code: status.ExitCode(), err: status.Err()} - } - - close(finished) - return <-cancelErrCh -} - -func logCancellationError(cancelErrCh chan error, msg string) { - if cancelErr := <-cancelErrCh; cancelErr != nil { - logrus.Debugf("Build cancelled (%v): %s", cancelErr, msg) - } -} - -type statusCodeError struct { - code int - err error -} - -func (e *statusCodeError) Error() string { - if e.err == nil { - return "" - } - return e.err.Error() -} - -func (e *statusCodeError) StatusCode() int { - return e.code -} - -func (c *containerManager) removeContainer(containerID string, stdout io.Writer) error { - rmConfig := &types.ContainerRmConfig{ - ForceRemove: true, - RemoveVolume: true, - } - if err := c.backend.ContainerRm(containerID, rmConfig); err != nil { - fmt.Fprintf(stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(containerID), err) - return err - } - return nil -} - -// RemoveAll containers managed by this container manager -func (c *containerManager) RemoveAll(stdout io.Writer) { - for containerID := range c.tmpContainers { - if err := c.removeContainer(containerID, stdout); err != nil { - return - } - delete(c.tmpContainers, containerID) - fmt.Fprintf(stdout, "Removing intermediate container %s\n", stringid.TruncateID(containerID)) - } -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/copy.go b/vendor/github.com/docker/docker/builder/dockerfile/copy.go deleted file mode 100644 index 43f40b62f..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/copy.go +++ /dev/null @@ -1,560 +0,0 @@ -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -import ( - "archive/tar" - "fmt" - "io" - "mime" - "net/http" - "net/url" - "os" - "path/filepath" - "runtime" - "sort" - "strings" - "time" - - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/remotecontext" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/urlutil" - "github.com/pkg/errors" -) - -const unnamedFilename = "__unnamed__" - -type pathCache interface { - Load(key interface{}) (value interface{}, ok bool) - Store(key, value interface{}) -} - -// copyInfo is a data object which stores the metadata about each source file in -// a copyInstruction -type copyInfo struct { - root containerfs.ContainerFS - path string - hash string - noDecompress bool -} - -func (c copyInfo) fullPath() (string, error) { - return c.root.ResolveScopedPath(c.path, true) -} - -func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo { - return copyInfo{root: source.Root(), path: path, hash: hash} -} - -func newCopyInfos(copyInfos ...copyInfo) []copyInfo { - return copyInfos -} - -// copyInstruction is a fully parsed COPY or ADD command that is passed to -// Builder.performCopy to copy files into the image filesystem -type copyInstruction struct { - cmdName string - infos []copyInfo - dest string - chownStr string - allowLocalDecompression bool -} - -// copier reads a raw COPY or ADD command, fetches remote sources using a downloader, -// and creates a copyInstruction -type copier struct { - imageSource *imageMount - source builder.Source - pathCache pathCache - download sourceDownloader - platform string - // for cleanup. TODO: having copier.cleanup() is error prone and hard to - // follow. Code calling performCopy should manage the lifecycle of its params. - // Copier should take override source as input, not imageMount. - activeLayer builder.RWLayer - tmpPaths []string -} - -func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier { - return copier{ - source: req.source, - pathCache: req.builder.pathCache, - download: download, - imageSource: imageSource, - platform: req.builder.options.Platform, - } -} - -func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstruction, error) { - inst := copyInstruction{cmdName: cmdName} - last := len(args) - 1 - - // Work in platform-specific filepath semantics - inst.dest = fromSlash(args[last], o.platform) - separator := string(separator(o.platform)) - infos, err := o.getCopyInfosForSourcePaths(args[0:last], inst.dest) - if err != nil { - return inst, errors.Wrapf(err, "%s failed", cmdName) - } - if len(infos) > 1 && !strings.HasSuffix(inst.dest, separator) { - return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) - } - inst.infos = infos - return inst, nil -} - -// getCopyInfosForSourcePaths iterates over the source files and calculate the info -// needed to copy (e.g. hash value if cached) -// The dest is used in case source is URL (and ends with "/") -func (o *copier) getCopyInfosForSourcePaths(sources []string, dest string) ([]copyInfo, error) { - var infos []copyInfo - for _, orig := range sources { - subinfos, err := o.getCopyInfoForSourcePath(orig, dest) - if err != nil { - return nil, err - } - infos = append(infos, subinfos...) - } - - if len(infos) == 0 { - return nil, errors.New("no source files were specified") - } - return infos, nil -} - -func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error) { - if !urlutil.IsURL(orig) { - return o.calcCopyInfo(orig, true) - } - - remote, path, err := o.download(orig) - if err != nil { - return nil, err - } - // If path == "" then we are unable to determine filename from src - // We have to make sure dest is available - if path == "" { - if strings.HasSuffix(dest, "/") { - return nil, errors.Errorf("cannot determine filename for source %s", orig) - } - path = unnamedFilename - } - o.tmpPaths = append(o.tmpPaths, remote.Root().Path()) - - hash, err := remote.Hash(path) - ci := newCopyInfoFromSource(remote, path, hash) - ci.noDecompress = true // data from http shouldn't be extracted even on ADD - return newCopyInfos(ci), err -} - -// Cleanup removes any temporary directories created as part of downloading -// remote files. -func (o *copier) Cleanup() { - for _, path := range o.tmpPaths { - os.RemoveAll(path) - } - o.tmpPaths = []string{} - if o.activeLayer != nil { - o.activeLayer.Release() - o.activeLayer = nil - } -} - -// TODO: allowWildcards can probably be removed by refactoring this function further. -func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) { - imageSource := o.imageSource - - // TODO: do this when creating copier. Requires validateCopySourcePath - // (and other below) to be aware of the difference sources. Why is it only - // done on image Source? - if imageSource != nil && o.activeLayer == nil { - // this needs to be protected against repeated calls as wildcard copy - // will call it multiple times for a single COPY - var err error - rwLayer, err := imageSource.NewRWLayer() - if err != nil { - return nil, err - } - o.activeLayer = rwLayer - - o.source, err = remotecontext.NewLazySource(rwLayer.Root()) - if err != nil { - return nil, errors.Wrapf(err, "failed to create context for copy from %s", rwLayer.Root().Path()) - } - } - - if o.source == nil { - return nil, errors.Errorf("missing build context") - } - - root := o.source.Root() - - if err := validateCopySourcePath(imageSource, origPath, root.OS()); err != nil { - return nil, err - } - - // Work in source OS specific filepath semantics - // For LCOW, this is NOT the daemon OS. - origPath = root.FromSlash(origPath) - origPath = strings.TrimPrefix(origPath, string(root.Separator())) - origPath = strings.TrimPrefix(origPath, "."+string(root.Separator())) - - // Deal with wildcards - if allowWildcards && containsWildcards(origPath, root.OS()) { - return o.copyWithWildcards(origPath) - } - - if imageSource != nil && imageSource.ImageID() != "" { - // return a cached copy if one exists - if h, ok := o.pathCache.Load(imageSource.ImageID() + origPath); ok { - return newCopyInfos(newCopyInfoFromSource(o.source, origPath, h.(string))), nil - } - } - - // Deal with the single file case - copyInfo, err := copyInfoForFile(o.source, origPath) - switch { - case err != nil: - return nil, err - case copyInfo.hash != "": - o.storeInPathCache(imageSource, origPath, copyInfo.hash) - return newCopyInfos(copyInfo), err - } - - // TODO: remove, handle dirs in Hash() - subfiles, err := walkSource(o.source, origPath) - if err != nil { - return nil, err - } - - hash := hashStringSlice("dir", subfiles) - o.storeInPathCache(imageSource, origPath, hash) - return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil -} - -func containsWildcards(name, platform string) bool { - isWindows := platform == "windows" - for i := 0; i < len(name); i++ { - ch := name[i] - if ch == '\\' && !isWindows { - i++ - } else if ch == '*' || ch == '?' || ch == '[' { - return true - } - } - return false -} - -func (o *copier) storeInPathCache(im *imageMount, path string, hash string) { - if im != nil { - o.pathCache.Store(im.ImageID()+path, hash) - } -} - -func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) { - root := o.source.Root() - var copyInfos []copyInfo - if err := root.Walk(root.Path(), func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - rel, err := remotecontext.Rel(root, path) - if err != nil { - return err - } - - if rel == "." { - return nil - } - if match, _ := root.Match(origPath, rel); !match { - return nil - } - - // Note we set allowWildcards to false in case the name has - // a * in it - subInfos, err := o.calcCopyInfo(rel, false) - if err != nil { - return err - } - copyInfos = append(copyInfos, subInfos...) - return nil - }); err != nil { - return nil, err - } - return copyInfos, nil -} - -func copyInfoForFile(source builder.Source, path string) (copyInfo, error) { - fi, err := remotecontext.StatAt(source, path) - if err != nil { - return copyInfo{}, err - } - - if fi.IsDir() { - return copyInfo{}, nil - } - hash, err := source.Hash(path) - if err != nil { - return copyInfo{}, err - } - return newCopyInfoFromSource(source, path, "file:"+hash), nil -} - -// TODO: dedupe with copyWithWildcards() -func walkSource(source builder.Source, origPath string) ([]string, error) { - fp, err := remotecontext.FullPath(source, origPath) - if err != nil { - return nil, err - } - // Must be a dir - var subfiles []string - err = source.Root().Walk(fp, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - rel, err := remotecontext.Rel(source.Root(), path) - if err != nil { - return err - } - if rel == "." { - return nil - } - hash, err := source.Hash(rel) - if err != nil { - return nil - } - // we already checked handleHash above - subfiles = append(subfiles, hash) - return nil - }) - if err != nil { - return nil, err - } - - sort.Strings(subfiles) - return subfiles, nil -} - -type sourceDownloader func(string) (builder.Source, string, error) - -func newRemoteSourceDownloader(output, stdout io.Writer) sourceDownloader { - return func(url string) (builder.Source, string, error) { - return downloadSource(output, stdout, url) - } -} - -func errOnSourceDownload(_ string) (builder.Source, string, error) { - return nil, "", errors.New("source can't be a URL for COPY") -} - -func getFilenameForDownload(path string, resp *http.Response) string { - // Guess filename based on source - if path != "" && !strings.HasSuffix(path, "/") { - if filename := filepath.Base(filepath.FromSlash(path)); filename != "" { - return filename - } - } - - // Guess filename based on Content-Disposition - if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" { - if _, params, err := mime.ParseMediaType(contentDisposition); err == nil { - if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") { - if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" { - return filename - } - } - } - } - return "" -} - -func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) { - u, err := url.Parse(srcURL) - if err != nil { - return - } - - resp, err := remotecontext.GetWithStatusError(srcURL) - if err != nil { - return - } - - filename := getFilenameForDownload(u.Path, resp) - - // Prepare file in a tmp dir - tmpDir, err := ioutils.TempDir("", "docker-remote") - if err != nil { - return - } - defer func() { - if err != nil { - os.RemoveAll(tmpDir) - } - }() - // If filename is empty, the returned filename will be "" but - // the tmp filename will be created as "__unnamed__" - tmpFileName := filename - if filename == "" { - tmpFileName = unnamedFilename - } - tmpFileName = filepath.Join(tmpDir, tmpFileName) - tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if err != nil { - return - } - - progressOutput := streamformatter.NewJSONProgressOutput(output, true) - progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") - // Download and dump result to tmp file - // TODO: add filehash directly - if _, err = io.Copy(tmpFile, progressReader); err != nil { - tmpFile.Close() - return - } - // TODO: how important is this random blank line to the output? - fmt.Fprintln(stdout) - - // Set the mtime to the Last-Modified header value if present - // Otherwise just remove atime and mtime - mTime := time.Time{} - - lastMod := resp.Header.Get("Last-Modified") - if lastMod != "" { - // If we can't parse it then just let it default to 'zero' - // otherwise use the parsed time value - if parsedMTime, err := http.ParseTime(lastMod); err == nil { - mTime = parsedMTime - } - } - - tmpFile.Close() - - if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { - return - } - - lc, err := remotecontext.NewLazySource(containerfs.NewLocalContainerFS(tmpDir)) - return lc, filename, err -} - -type copyFileOptions struct { - decompress bool - chownPair idtools.IDPair - archiver Archiver -} - -type copyEndpoint struct { - driver containerfs.Driver - path string -} - -func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error { - srcPath, err := source.fullPath() - if err != nil { - return err - } - - destPath, err := dest.fullPath() - if err != nil { - return err - } - - archiver := options.archiver - - srcEndpoint := ©Endpoint{driver: source.root, path: srcPath} - destEndpoint := ©Endpoint{driver: dest.root, path: destPath} - - src, err := source.root.Stat(srcPath) - if err != nil { - return errors.Wrapf(err, "source path not found") - } - if src.IsDir() { - return copyDirectory(archiver, srcEndpoint, destEndpoint, options.chownPair) - } - if options.decompress && isArchivePath(source.root, srcPath) && !source.noDecompress { - return archiver.UntarPath(srcPath, destPath) - } - - destExistsAsDir, err := isExistingDirectory(destEndpoint) - if err != nil { - return err - } - // dest.path must be used because destPath has already been cleaned of any - // trailing slash - if endsInSlash(dest.root, dest.path) || destExistsAsDir { - // source.path must be used to get the correct filename when the source - // is a symlink - destPath = dest.root.Join(destPath, source.root.Base(source.path)) - destEndpoint = ©Endpoint{driver: dest.root, path: destPath} - } - return copyFile(archiver, srcEndpoint, destEndpoint, options.chownPair) -} - -func isArchivePath(driver containerfs.ContainerFS, path string) bool { - file, err := driver.Open(path) - if err != nil { - return false - } - defer file.Close() - rdr, err := archive.DecompressStream(file) - if err != nil { - return false - } - r := tar.NewReader(rdr) - _, err = r.Next() - return err == nil -} - -func copyDirectory(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error { - destExists, err := isExistingDirectory(dest) - if err != nil { - return errors.Wrapf(err, "failed to query destination path") - } - - if err := archiver.CopyWithTar(source.path, dest.path); err != nil { - return errors.Wrapf(err, "failed to copy directory") - } - // TODO: @gupta-ak. Investigate how LCOW permission mappings will work. - return fixPermissions(source.path, dest.path, chownPair, !destExists) -} - -func copyFile(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error { - if runtime.GOOS == "windows" && dest.driver.OS() == "linux" { - // LCOW - if err := dest.driver.MkdirAll(dest.driver.Dir(dest.path), 0755); err != nil { - return errors.Wrapf(err, "failed to create new directory") - } - } else { - if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, chownPair); err != nil { - // Normal containers - return errors.Wrapf(err, "failed to create new directory") - } - } - - if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil { - return errors.Wrapf(err, "failed to copy file") - } - // TODO: @gupta-ak. Investigate how LCOW permission mappings will work. - return fixPermissions(source.path, dest.path, chownPair, false) -} - -func endsInSlash(driver containerfs.Driver, path string) bool { - return strings.HasSuffix(path, string(driver.Separator())) -} - -// isExistingDirectory returns true if the path exists and is a directory -func isExistingDirectory(point *copyEndpoint) (bool, error) { - destStat, err := point.driver.Stat(point.path) - switch { - case os.IsNotExist(err): - return false, nil - case err != nil: - return false, err - } - return destStat.IsDir(), nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go deleted file mode 100644 index 15453452e..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build !windows - -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -import ( - "os" - "path/filepath" - - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/idtools" -) - -func fixPermissions(source, destination string, rootIDs idtools.IDPair, overrideSkip bool) error { - var ( - skipChownRoot bool - err error - ) - if !overrideSkip { - destEndpoint := ©Endpoint{driver: containerfs.NewLocalDriver(), path: destination} - skipChownRoot, err = isExistingDirectory(destEndpoint) - if err != nil { - return err - } - } - - // We Walk on the source rather than on the destination because we don't - // want to change permissions on things we haven't created or modified. - return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { - // Do not alter the walk root iff. it existed before, as it doesn't fall under - // the domain of "things we should chown". - if skipChownRoot && source == fullpath { - return nil - } - - // Path is prefixed by source: substitute with destination instead. - cleaned, err := filepath.Rel(source, fullpath) - if err != nil { - return err - } - - fullpath = filepath.Join(destination, cleaned) - return os.Lchown(fullpath, rootIDs.UID, rootIDs.GID) - }) -} - -func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error { - return nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go deleted file mode 100644 index 907c34407..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go +++ /dev/null @@ -1,43 +0,0 @@ -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -import ( - "errors" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/idtools" -) - -var pathBlacklist = map[string]bool{ - "c:\\": true, - "c:\\windows": true, -} - -func fixPermissions(source, destination string, rootIDs idtools.IDPair, overrideSkip bool) error { - // chown is not supported on Windows - return nil -} - -func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error { - // validate windows paths from other images + LCOW - if imageSource == nil || platform != "windows" { - return nil - } - - origPath = filepath.FromSlash(origPath) - p := strings.ToLower(filepath.Clean(origPath)) - if !filepath.IsAbs(p) { - if filepath.VolumeName(p) != "" { - if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths - p = p[:len(p)-1] - } - p += "\\" - } else { - p = filepath.Join("c:\\", p) - } - } - if _, blacklisted := pathBlacklist[p]; blacklisted { - return errors.New("copy from c:\\ or c:\\windows is not allowed on windows") - } - return nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go deleted file mode 100644 index 4d47c208b..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go +++ /dev/null @@ -1,571 +0,0 @@ -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -// This file contains the dispatchers for each command. Note that -// `nullDispatch` is not actually a command, but support for commands we parse -// but do nothing with. -// -// See evaluator.go for a higher level discussion of the whole evaluator -// package. - -import ( - "bytes" - "fmt" - "runtime" - "sort" - "strings" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/docker/builder" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/system" - "github.com/docker/go-connections/nat" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/moby/buildkit/frontend/dockerfile/parser" - "github.com/moby/buildkit/frontend/dockerfile/shell" - "github.com/pkg/errors" -) - -// ENV foo bar -// -// Sets the environment variable foo to bar, also makes interpolation -// in the dockerfile available from the next statement on via ${foo}. -// -func dispatchEnv(d dispatchRequest, c *instructions.EnvCommand) error { - runConfig := d.state.runConfig - commitMessage := bytes.NewBufferString("ENV") - for _, e := range c.Env { - name := e.Key - newVar := e.String() - - commitMessage.WriteString(" " + newVar) - gotOne := false - for i, envVar := range runConfig.Env { - envParts := strings.SplitN(envVar, "=", 2) - compareFrom := envParts[0] - if shell.EqualEnvKeys(compareFrom, name) { - runConfig.Env[i] = newVar - gotOne = true - break - } - } - if !gotOne { - runConfig.Env = append(runConfig.Env, newVar) - } - } - return d.builder.commit(d.state, commitMessage.String()) -} - -// MAINTAINER some text -// -// Sets the maintainer metadata. -func dispatchMaintainer(d dispatchRequest, c *instructions.MaintainerCommand) error { - - d.state.maintainer = c.Maintainer - return d.builder.commit(d.state, "MAINTAINER "+c.Maintainer) -} - -// LABEL some json data describing the image -// -// Sets the Label variable foo to bar, -// -func dispatchLabel(d dispatchRequest, c *instructions.LabelCommand) error { - if d.state.runConfig.Labels == nil { - d.state.runConfig.Labels = make(map[string]string) - } - commitStr := "LABEL" - for _, v := range c.Labels { - d.state.runConfig.Labels[v.Key] = v.Value - commitStr += " " + v.String() - } - return d.builder.commit(d.state, commitStr) -} - -// ADD foo /path -// -// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling -// exist here. If you do not wish to have this automatic handling, use COPY. -// -func dispatchAdd(d dispatchRequest, c *instructions.AddCommand) error { - downloader := newRemoteSourceDownloader(d.builder.Output, d.builder.Stdout) - copier := copierFromDispatchRequest(d, downloader, nil) - defer copier.Cleanup() - - copyInstruction, err := copier.createCopyInstruction(c.SourcesAndDest, "ADD") - if err != nil { - return err - } - copyInstruction.chownStr = c.Chown - copyInstruction.allowLocalDecompression = true - - return d.builder.performCopy(d.state, copyInstruction) -} - -// COPY foo /path -// -// Same as 'ADD' but without the tar and remote url handling. -// -func dispatchCopy(d dispatchRequest, c *instructions.CopyCommand) error { - var im *imageMount - var err error - if c.From != "" { - im, err = d.getImageMount(c.From) - if err != nil { - return errors.Wrapf(err, "invalid from flag value %s", c.From) - } - } - copier := copierFromDispatchRequest(d, errOnSourceDownload, im) - defer copier.Cleanup() - copyInstruction, err := copier.createCopyInstruction(c.SourcesAndDest, "COPY") - if err != nil { - return err - } - copyInstruction.chownStr = c.Chown - - return d.builder.performCopy(d.state, copyInstruction) -} - -func (d *dispatchRequest) getImageMount(imageRefOrID string) (*imageMount, error) { - if imageRefOrID == "" { - // TODO: this could return the source in the default case as well? - return nil, nil - } - - var localOnly bool - stage, err := d.stages.get(imageRefOrID) - if err != nil { - return nil, err - } - if stage != nil { - imageRefOrID = stage.Image - localOnly = true - } - return d.builder.imageSources.Get(imageRefOrID, localOnly, d.state.operatingSystem) -} - -// FROM [--platform=platform] imagename[:tag | @digest] [AS build-stage-name] -// -func initializeStage(d dispatchRequest, cmd *instructions.Stage) error { - d.builder.imageProber.Reset() - if err := system.ValidatePlatform(&cmd.Platform); err != nil { - return err - } - image, err := d.getFromImage(d.shlex, cmd.BaseName, cmd.Platform.OS) - if err != nil { - return err - } - state := d.state - if err := state.beginStage(cmd.Name, image); err != nil { - return err - } - if len(state.runConfig.OnBuild) > 0 { - triggers := state.runConfig.OnBuild - state.runConfig.OnBuild = nil - return dispatchTriggeredOnBuild(d, triggers) - } - return nil -} - -func dispatchTriggeredOnBuild(d dispatchRequest, triggers []string) error { - fmt.Fprintf(d.builder.Stdout, "# Executing %d build trigger", len(triggers)) - if len(triggers) > 1 { - fmt.Fprint(d.builder.Stdout, "s") - } - fmt.Fprintln(d.builder.Stdout) - for _, trigger := range triggers { - d.state.updateRunConfig() - ast, err := parser.Parse(strings.NewReader(trigger)) - if err != nil { - return err - } - if len(ast.AST.Children) != 1 { - return errors.New("onbuild trigger should be a single expression") - } - cmd, err := instructions.ParseCommand(ast.AST.Children[0]) - if err != nil { - if instructions.IsUnknownInstruction(err) { - buildsFailed.WithValues(metricsUnknownInstructionError).Inc() - } - return err - } - err = dispatch(d, cmd) - if err != nil { - return err - } - } - return nil -} - -func (d *dispatchRequest) getExpandedImageName(shlex *shell.Lex, name string) (string, error) { - substitutionArgs := []string{} - for key, value := range d.state.buildArgs.GetAllMeta() { - substitutionArgs = append(substitutionArgs, key+"="+value) - } - - name, err := shlex.ProcessWord(name, substitutionArgs) - if err != nil { - return "", err - } - return name, nil -} - -// getOsFromFlagsAndStage calculates the operating system if we need to pull an image. -// stagePlatform contains the value supplied by optional `--platform=` on -// a current FROM statement. b.builder.options.Platform contains the operating -// system part of the optional flag passed in the API call (or CLI flag -// through `docker build --platform=...`). Precedence is for an explicit -// platform indication in the FROM statement. -func (d *dispatchRequest) getOsFromFlagsAndStage(stageOS string) string { - switch { - case stageOS != "": - return stageOS - case d.builder.options.Platform != "": - // Note this is API "platform", but by this point, as the daemon is not - // multi-arch aware yet, it is guaranteed to only hold the OS part here. - return d.builder.options.Platform - default: - return runtime.GOOS - } -} - -func (d *dispatchRequest) getImageOrStage(name string, stageOS string) (builder.Image, error) { - var localOnly bool - if im, ok := d.stages.getByName(name); ok { - name = im.Image - localOnly = true - } - - os := d.getOsFromFlagsAndStage(stageOS) - - // Windows cannot support a container with no base image unless it is LCOW. - if name == api.NoBaseImageSpecifier { - imageImage := &image.Image{} - imageImage.OS = runtime.GOOS - if runtime.GOOS == "windows" { - switch os { - case "windows", "": - return nil, errors.New("Windows does not support FROM scratch") - case "linux": - if !system.LCOWSupported() { - return nil, errors.New("Linux containers are not supported on this system") - } - imageImage.OS = "linux" - default: - return nil, errors.Errorf("operating system %q is not supported", os) - } - } - return builder.Image(imageImage), nil - } - imageMount, err := d.builder.imageSources.Get(name, localOnly, os) - if err != nil { - return nil, err - } - return imageMount.Image(), nil -} -func (d *dispatchRequest) getFromImage(shlex *shell.Lex, name string, stageOS string) (builder.Image, error) { - name, err := d.getExpandedImageName(shlex, name) - if err != nil { - return nil, err - } - return d.getImageOrStage(name, stageOS) -} - -func dispatchOnbuild(d dispatchRequest, c *instructions.OnbuildCommand) error { - - d.state.runConfig.OnBuild = append(d.state.runConfig.OnBuild, c.Expression) - return d.builder.commit(d.state, "ONBUILD "+c.Expression) -} - -// WORKDIR /tmp -// -// Set the working directory for future RUN/CMD/etc statements. -// -func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error { - runConfig := d.state.runConfig - var err error - runConfig.WorkingDir, err = normalizeWorkdir(d.state.operatingSystem, runConfig.WorkingDir, c.Path) - if err != nil { - return err - } - - // For performance reasons, we explicitly do a create/mkdir now - // This avoids having an unnecessary expensive mount/unmount calls - // (on Windows in particular) during each container create. - // Prior to 1.13, the mkdir was deferred and not executed at this step. - if d.builder.disableCommit { - // Don't call back into the daemon if we're going through docker commit --change "WORKDIR /foo". - // We've already updated the runConfig and that's enough. - return nil - } - - comment := "WORKDIR " + runConfig.WorkingDir - runConfigWithCommentCmd := copyRunConfig(runConfig, withCmdCommentString(comment, d.state.operatingSystem)) - - containerID, err := d.builder.probeAndCreate(d.state, runConfigWithCommentCmd) - if err != nil || containerID == "" { - return err - } - - if err := d.builder.docker.ContainerCreateWorkdir(containerID); err != nil { - return err - } - - return d.builder.commitContainer(d.state, containerID, runConfigWithCommentCmd) -} - -func resolveCmdLine(cmd instructions.ShellDependantCmdLine, runConfig *container.Config, os string) []string { - result := cmd.CmdLine - if cmd.PrependShell && result != nil { - result = append(getShell(runConfig, os), result...) - } - return result -} - -// RUN some command yo -// -// run a command and commit the image. Args are automatically prepended with -// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under -// Windows, in the event there is only one argument The difference in processing: -// -// RUN echo hi # sh -c echo hi (Linux and LCOW) -// RUN echo hi # cmd /S /C echo hi (Windows) -// RUN [ "echo", "hi" ] # echo hi -// -func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error { - if !system.IsOSSupported(d.state.operatingSystem) { - return system.ErrNotSupportedOperatingSystem - } - stateRunConfig := d.state.runConfig - cmdFromArgs := resolveCmdLine(c.ShellDependantCmdLine, stateRunConfig, d.state.operatingSystem) - buildArgs := d.state.buildArgs.FilterAllowed(stateRunConfig.Env) - - saveCmd := cmdFromArgs - if len(buildArgs) > 0 { - saveCmd = prependEnvOnCmd(d.state.buildArgs, buildArgs, cmdFromArgs) - } - - runConfigForCacheProbe := copyRunConfig(stateRunConfig, - withCmd(saveCmd), - withEntrypointOverride(saveCmd, nil)) - if hit, err := d.builder.probeCache(d.state, runConfigForCacheProbe); err != nil || hit { - return err - } - - runConfig := copyRunConfig(stateRunConfig, - withCmd(cmdFromArgs), - withEnv(append(stateRunConfig.Env, buildArgs...)), - withEntrypointOverride(saveCmd, strslice.StrSlice{""})) - - // set config as already being escaped, this prevents double escaping on windows - runConfig.ArgsEscaped = true - - cID, err := d.builder.create(runConfig) - if err != nil { - return err - } - - if err := d.builder.containerManager.Run(d.builder.clientCtx, cID, d.builder.Stdout, d.builder.Stderr); err != nil { - if err, ok := err.(*statusCodeError); ok { - // TODO: change error type, because jsonmessage.JSONError assumes HTTP - msg := fmt.Sprintf( - "The command '%s' returned a non-zero code: %d", - strings.Join(runConfig.Cmd, " "), err.StatusCode()) - if err.Error() != "" { - msg = fmt.Sprintf("%s: %s", msg, err.Error()) - } - return &jsonmessage.JSONError{ - Message: msg, - Code: err.StatusCode(), - } - } - return err - } - - return d.builder.commitContainer(d.state, cID, runConfigForCacheProbe) -} - -// Derive the command to use for probeCache() and to commit in this container. -// Note that we only do this if there are any build-time env vars. Also, we -// use the special argument "|#" at the start of the args array. This will -// avoid conflicts with any RUN command since commands can not -// start with | (vertical bar). The "#" (number of build envs) is there to -// help ensure proper cache matches. We don't want a RUN command -// that starts with "foo=abc" to be considered part of a build-time env var. -// -// remove any unreferenced built-in args from the environment variables. -// These args are transparent so resulting image should be the same regardless -// of the value. -func prependEnvOnCmd(buildArgs *BuildArgs, buildArgVars []string, cmd strslice.StrSlice) strslice.StrSlice { - var tmpBuildEnv []string - for _, env := range buildArgVars { - key := strings.SplitN(env, "=", 2)[0] - if buildArgs.IsReferencedOrNotBuiltin(key) { - tmpBuildEnv = append(tmpBuildEnv, env) - } - } - - sort.Strings(tmpBuildEnv) - tmpEnv := append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...) - return strslice.StrSlice(append(tmpEnv, cmd...)) -} - -// CMD foo -// -// Set the default command to run in the container (which may be empty). -// Argument handling is the same as RUN. -// -func dispatchCmd(d dispatchRequest, c *instructions.CmdCommand) error { - runConfig := d.state.runConfig - cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem) - runConfig.Cmd = cmd - // set config as already being escaped, this prevents double escaping on windows - runConfig.ArgsEscaped = true - - if err := d.builder.commit(d.state, fmt.Sprintf("CMD %q", cmd)); err != nil { - return err - } - - if len(c.ShellDependantCmdLine.CmdLine) != 0 { - d.state.cmdSet = true - } - - return nil -} - -// HEALTHCHECK foo -// -// Set the default healthcheck command to run in the container (which may be empty). -// Argument handling is the same as RUN. -// -func dispatchHealthcheck(d dispatchRequest, c *instructions.HealthCheckCommand) error { - runConfig := d.state.runConfig - if runConfig.Healthcheck != nil { - oldCmd := runConfig.Healthcheck.Test - if len(oldCmd) > 0 && oldCmd[0] != "NONE" { - fmt.Fprintf(d.builder.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd) - } - } - runConfig.Healthcheck = c.Health - return d.builder.commit(d.state, fmt.Sprintf("HEALTHCHECK %q", runConfig.Healthcheck)) -} - -// ENTRYPOINT /usr/sbin/nginx -// -// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments -// to /usr/sbin/nginx. Uses the default shell if not in JSON format. -// -// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint -// is initialized at newBuilder time instead of through argument parsing. -// -func dispatchEntrypoint(d dispatchRequest, c *instructions.EntrypointCommand) error { - runConfig := d.state.runConfig - cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem) - runConfig.Entrypoint = cmd - if !d.state.cmdSet { - runConfig.Cmd = nil - } - - return d.builder.commit(d.state, fmt.Sprintf("ENTRYPOINT %q", runConfig.Entrypoint)) -} - -// EXPOSE 6667/tcp 7000/tcp -// -// Expose ports for links and port mappings. This all ends up in -// req.runConfig.ExposedPorts for runconfig. -// -func dispatchExpose(d dispatchRequest, c *instructions.ExposeCommand, envs []string) error { - // custom multi word expansion - // expose $FOO with FOO="80 443" is expanded as EXPOSE [80,443]. This is the only command supporting word to words expansion - // so the word processing has been de-generalized - ports := []string{} - for _, p := range c.Ports { - ps, err := d.shlex.ProcessWords(p, envs) - if err != nil { - return err - } - ports = append(ports, ps...) - } - c.Ports = ports - - ps, _, err := nat.ParsePortSpecs(ports) - if err != nil { - return err - } - - if d.state.runConfig.ExposedPorts == nil { - d.state.runConfig.ExposedPorts = make(nat.PortSet) - } - for p := range ps { - d.state.runConfig.ExposedPorts[p] = struct{}{} - } - - return d.builder.commit(d.state, "EXPOSE "+strings.Join(c.Ports, " ")) -} - -// USER foo -// -// Set the user to 'foo' for future commands and when running the -// ENTRYPOINT/CMD at container run time. -// -func dispatchUser(d dispatchRequest, c *instructions.UserCommand) error { - d.state.runConfig.User = c.User - return d.builder.commit(d.state, fmt.Sprintf("USER %v", c.User)) -} - -// VOLUME /foo -// -// Expose the volume /foo for use. Will also accept the JSON array form. -// -func dispatchVolume(d dispatchRequest, c *instructions.VolumeCommand) error { - if d.state.runConfig.Volumes == nil { - d.state.runConfig.Volumes = map[string]struct{}{} - } - for _, v := range c.Volumes { - if v == "" { - return errors.New("VOLUME specified can not be an empty string") - } - d.state.runConfig.Volumes[v] = struct{}{} - } - return d.builder.commit(d.state, fmt.Sprintf("VOLUME %v", c.Volumes)) -} - -// STOPSIGNAL signal -// -// Set the signal that will be used to kill the container. -func dispatchStopSignal(d dispatchRequest, c *instructions.StopSignalCommand) error { - - _, err := signal.ParseSignal(c.Signal) - if err != nil { - return errdefs.InvalidParameter(err) - } - d.state.runConfig.StopSignal = c.Signal - return d.builder.commit(d.state, fmt.Sprintf("STOPSIGNAL %v", c.Signal)) -} - -// ARG name[=value] -// -// Adds the variable foo to the trusted list of variables that can be passed -// to builder using the --build-arg flag for expansion/substitution or passing to 'run'. -// Dockerfile author may optionally set a default value of this variable. -func dispatchArg(d dispatchRequest, c *instructions.ArgCommand) error { - - commitStr := "ARG " + c.Key - if c.Value != nil { - commitStr += "=" + *c.Value - } - - d.state.buildArgs.AddArg(c.Key, c.Value) - return d.builder.commit(d.state, commitStr) -} - -// SHELL powershell -command -// -// Set the non-default shell to use. -func dispatchShell(d dispatchRequest, c *instructions.ShellCommand) error { - d.state.runConfig.Shell = c.Shell - return d.builder.commit(d.state, fmt.Sprintf("SHELL %v", d.state.runConfig.Shell)) -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go deleted file mode 100644 index b3ba38032..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !windows - -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -import ( - "errors" - "os" - "path/filepath" -) - -// normalizeWorkdir normalizes a user requested working directory in a -// platform semantically consistent way. -func normalizeWorkdir(_ string, current string, requested string) (string, error) { - if requested == "" { - return "", errors.New("cannot normalize nothing") - } - current = filepath.FromSlash(current) - requested = filepath.FromSlash(requested) - if !filepath.IsAbs(requested) { - return filepath.Join(string(os.PathSeparator), current, requested), nil - } - return requested, nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go deleted file mode 100644 index 7824d1169..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go +++ /dev/null @@ -1,95 +0,0 @@ -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -import ( - "errors" - "fmt" - "os" - "path" - "path/filepath" - "regexp" - "strings" - - "github.com/docker/docker/pkg/system" -) - -var pattern = regexp.MustCompile(`^[a-zA-Z]:\.$`) - -// normalizeWorkdir normalizes a user requested working directory in a -// platform semantically consistent way. -func normalizeWorkdir(platform string, current string, requested string) (string, error) { - if platform == "" { - platform = "windows" - } - if platform == "windows" { - return normalizeWorkdirWindows(current, requested) - } - return normalizeWorkdirUnix(current, requested) -} - -// normalizeWorkdirUnix normalizes a user requested working directory in a -// platform semantically consistent way. -func normalizeWorkdirUnix(current string, requested string) (string, error) { - if requested == "" { - return "", errors.New("cannot normalize nothing") - } - current = strings.Replace(current, string(os.PathSeparator), "/", -1) - requested = strings.Replace(requested, string(os.PathSeparator), "/", -1) - if !path.IsAbs(requested) { - return path.Join(`/`, current, requested), nil - } - return requested, nil -} - -// normalizeWorkdirWindows normalizes a user requested working directory in a -// platform semantically consistent way. -func normalizeWorkdirWindows(current string, requested string) (string, error) { - if requested == "" { - return "", errors.New("cannot normalize nothing") - } - - // `filepath.Clean` will replace "" with "." so skip in that case - if current != "" { - current = filepath.Clean(current) - } - if requested != "" { - requested = filepath.Clean(requested) - } - - // If either current or requested in Windows is: - // C: - // C:. - // then an error will be thrown as the definition for the above - // refers to `current directory on drive C:` - // Since filepath.Clean() will automatically normalize the above - // to `C:.`, we only need to check the last format - if pattern.MatchString(current) { - return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", current) - } - if pattern.MatchString(requested) { - return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", requested) - } - - // Target semantics is C:\somefolder, specifically in the format: - // UPPERCASEDriveLetter-Colon-Backslash-FolderName. We are already - // guaranteed that `current`, if set, is consistent. This allows us to - // cope correctly with any of the following in a Dockerfile: - // WORKDIR a --> C:\a - // WORKDIR c:\\foo --> C:\foo - // WORKDIR \\foo --> C:\foo - // WORKDIR /foo --> C:\foo - // WORKDIR c:\\foo \ WORKDIR bar --> C:\foo --> C:\foo\bar - // WORKDIR C:/foo \ WORKDIR bar --> C:\foo --> C:\foo\bar - // WORKDIR C:/foo \ WORKDIR \\bar --> C:\foo --> C:\bar - // WORKDIR /foo \ WORKDIR c:/bar --> C:\foo --> C:\bar - if len(current) == 0 || system.IsAbs(requested) { - if (requested[0] == os.PathSeparator) || - (len(requested) > 1 && string(requested[1]) != ":") || - (len(requested) == 1) { - requested = filepath.Join(`C:\`, requested) - } - } else { - requested = filepath.Join(current, requested) - } - // Upper-case drive letter - return (strings.ToUpper(string(requested[0])) + requested[1:]), nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go deleted file mode 100644 index 02e147752..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go +++ /dev/null @@ -1,250 +0,0 @@ -// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. -// -// It incorporates a dispatch table based on the parser.Node values (see the -// parser package for more information) that are yielded from the parser itself. -// Calling newBuilder with the BuildOpts struct can be used to customize the -// experience for execution purposes only. Parsing is controlled in the parser -// package, and this division of responsibility should be respected. -// -// Please see the jump table targets for the actual invocations, most of which -// will call out to the functions in internals.go to deal with their tasks. -// -// ONBUILD is a special case, which is covered in the onbuild() func in -// dispatchers.go. -// -// The evaluator uses the concept of "steps", which are usually each processable -// line in the Dockerfile. Each step is numbered and certain actions are taken -// before and after each step, such as creating an image ID and removing temporary -// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which -// includes its own set of steps (usually only one of them). -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -import ( - "reflect" - "runtime" - "strconv" - "strings" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/builder" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/runconfig/opts" - "github.com/moby/buildkit/frontend/dockerfile/instructions" - "github.com/moby/buildkit/frontend/dockerfile/shell" - "github.com/pkg/errors" -) - -func dispatch(d dispatchRequest, cmd instructions.Command) (err error) { - if c, ok := cmd.(instructions.PlatformSpecific); ok { - err := c.CheckPlatform(d.state.operatingSystem) - if err != nil { - return errdefs.InvalidParameter(err) - } - } - runConfigEnv := d.state.runConfig.Env - envs := append(runConfigEnv, d.state.buildArgs.FilterAllowed(runConfigEnv)...) - - if ex, ok := cmd.(instructions.SupportsSingleWordExpansion); ok { - err := ex.Expand(func(word string) (string, error) { - return d.shlex.ProcessWord(word, envs) - }) - if err != nil { - return errdefs.InvalidParameter(err) - } - } - - defer func() { - if d.builder.options.ForceRemove { - d.builder.containerManager.RemoveAll(d.builder.Stdout) - return - } - if d.builder.options.Remove && err == nil { - d.builder.containerManager.RemoveAll(d.builder.Stdout) - return - } - }() - switch c := cmd.(type) { - case *instructions.EnvCommand: - return dispatchEnv(d, c) - case *instructions.MaintainerCommand: - return dispatchMaintainer(d, c) - case *instructions.LabelCommand: - return dispatchLabel(d, c) - case *instructions.AddCommand: - return dispatchAdd(d, c) - case *instructions.CopyCommand: - return dispatchCopy(d, c) - case *instructions.OnbuildCommand: - return dispatchOnbuild(d, c) - case *instructions.WorkdirCommand: - return dispatchWorkdir(d, c) - case *instructions.RunCommand: - return dispatchRun(d, c) - case *instructions.CmdCommand: - return dispatchCmd(d, c) - case *instructions.HealthCheckCommand: - return dispatchHealthcheck(d, c) - case *instructions.EntrypointCommand: - return dispatchEntrypoint(d, c) - case *instructions.ExposeCommand: - return dispatchExpose(d, c, envs) - case *instructions.UserCommand: - return dispatchUser(d, c) - case *instructions.VolumeCommand: - return dispatchVolume(d, c) - case *instructions.StopSignalCommand: - return dispatchStopSignal(d, c) - case *instructions.ArgCommand: - return dispatchArg(d, c) - case *instructions.ShellCommand: - return dispatchShell(d, c) - } - return errors.Errorf("unsupported command type: %v", reflect.TypeOf(cmd)) -} - -// dispatchState is a data object which is modified by dispatchers -type dispatchState struct { - runConfig *container.Config - maintainer string - cmdSet bool - imageID string - baseImage builder.Image - stageName string - buildArgs *BuildArgs - operatingSystem string -} - -func newDispatchState(baseArgs *BuildArgs) *dispatchState { - args := baseArgs.Clone() - args.ResetAllowed() - return &dispatchState{runConfig: &container.Config{}, buildArgs: args} -} - -type stagesBuildResults struct { - flat []*container.Config - indexed map[string]*container.Config -} - -func newStagesBuildResults() *stagesBuildResults { - return &stagesBuildResults{ - indexed: make(map[string]*container.Config), - } -} - -func (r *stagesBuildResults) getByName(name string) (*container.Config, bool) { - c, ok := r.indexed[strings.ToLower(name)] - return c, ok -} - -func (r *stagesBuildResults) validateIndex(i int) error { - if i == len(r.flat) { - return errors.New("refers to current build stage") - } - if i < 0 || i > len(r.flat) { - return errors.New("index out of bounds") - } - return nil -} - -func (r *stagesBuildResults) get(nameOrIndex string) (*container.Config, error) { - if c, ok := r.getByName(nameOrIndex); ok { - return c, nil - } - ix, err := strconv.ParseInt(nameOrIndex, 10, 0) - if err != nil { - return nil, nil - } - if err := r.validateIndex(int(ix)); err != nil { - return nil, err - } - return r.flat[ix], nil -} - -func (r *stagesBuildResults) checkStageNameAvailable(name string) error { - if name != "" { - if _, ok := r.getByName(name); ok { - return errors.Errorf("%s stage name already used", name) - } - } - return nil -} - -func (r *stagesBuildResults) commitStage(name string, config *container.Config) error { - if name != "" { - if _, ok := r.getByName(name); ok { - return errors.Errorf("%s stage name already used", name) - } - r.indexed[strings.ToLower(name)] = config - } - r.flat = append(r.flat, config) - return nil -} - -func commitStage(state *dispatchState, stages *stagesBuildResults) error { - return stages.commitStage(state.stageName, state.runConfig) -} - -type dispatchRequest struct { - state *dispatchState - shlex *shell.Lex - builder *Builder - source builder.Source - stages *stagesBuildResults -} - -func newDispatchRequest(builder *Builder, escapeToken rune, source builder.Source, buildArgs *BuildArgs, stages *stagesBuildResults) dispatchRequest { - return dispatchRequest{ - state: newDispatchState(buildArgs), - shlex: shell.NewLex(escapeToken), - builder: builder, - source: source, - stages: stages, - } -} - -func (s *dispatchState) updateRunConfig() { - s.runConfig.Image = s.imageID -} - -// hasFromImage returns true if the builder has processed a `FROM ` line -func (s *dispatchState) hasFromImage() bool { - return s.imageID != "" || (s.baseImage != nil && s.baseImage.ImageID() == "") -} - -func (s *dispatchState) beginStage(stageName string, image builder.Image) error { - s.stageName = stageName - s.imageID = image.ImageID() - s.operatingSystem = image.OperatingSystem() - if s.operatingSystem == "" { // In case it isn't set - s.operatingSystem = runtime.GOOS - } - if !system.IsOSSupported(s.operatingSystem) { - return system.ErrNotSupportedOperatingSystem - } - - if image.RunConfig() != nil { - // copy avoids referencing the same instance when 2 stages have the same base - s.runConfig = copyRunConfig(image.RunConfig()) - } else { - s.runConfig = &container.Config{} - } - s.baseImage = image - s.setDefaultPath() - s.runConfig.OpenStdin = false - s.runConfig.StdinOnce = false - return nil -} - -// Add the default PATH to runConfig.ENV if one exists for the operating system and there -// is no PATH set. Note that Windows containers on Windows won't have one as it's set by HCS -func (s *dispatchState) setDefaultPath() { - defaultPath := system.DefaultPathEnv(s.operatingSystem) - if defaultPath == "" { - return - } - envMap := opts.ConvertKVStringsToMap(s.runConfig.Env) - if _, ok := envMap["PATH"]; !ok { - s.runConfig.Env = append(s.runConfig.Env, "PATH="+defaultPath) - } -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go b/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go deleted file mode 100644 index 53a4b9774..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go +++ /dev/null @@ -1,121 +0,0 @@ -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -import ( - "context" - "runtime" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/builder" - dockerimage "github.com/docker/docker/image" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type getAndMountFunc func(string, bool, string) (builder.Image, builder.ROLayer, error) - -// imageSources mounts images and provides a cache for mounted images. It tracks -// all images so they can be unmounted at the end of the build. -type imageSources struct { - byImageID map[string]*imageMount - mounts []*imageMount - getImage getAndMountFunc -} - -func newImageSources(ctx context.Context, options builderOptions) *imageSources { - getAndMount := func(idOrRef string, localOnly bool, osForPull string) (builder.Image, builder.ROLayer, error) { - pullOption := backend.PullOptionNoPull - if !localOnly { - if options.Options.PullParent { - pullOption = backend.PullOptionForcePull - } else { - pullOption = backend.PullOptionPreferLocal - } - } - return options.Backend.GetImageAndReleasableLayer(ctx, idOrRef, backend.GetImageAndLayerOptions{ - PullOption: pullOption, - AuthConfig: options.Options.AuthConfigs, - Output: options.ProgressWriter.Output, - OS: osForPull, - }) - } - - return &imageSources{ - byImageID: make(map[string]*imageMount), - getImage: getAndMount, - } -} - -func (m *imageSources) Get(idOrRef string, localOnly bool, osForPull string) (*imageMount, error) { - if im, ok := m.byImageID[idOrRef]; ok { - return im, nil - } - - image, layer, err := m.getImage(idOrRef, localOnly, osForPull) - if err != nil { - return nil, err - } - im := newImageMount(image, layer) - m.Add(im) - return im, nil -} - -func (m *imageSources) Unmount() (retErr error) { - for _, im := range m.mounts { - if err := im.unmount(); err != nil { - logrus.Error(err) - retErr = err - } - } - return -} - -func (m *imageSources) Add(im *imageMount) { - switch im.image { - case nil: - // set the OS for scratch images - os := runtime.GOOS - // Windows does not support scratch except for LCOW - if runtime.GOOS == "windows" { - os = "linux" - } - im.image = &dockerimage.Image{V1Image: dockerimage.V1Image{OS: os}} - default: - m.byImageID[im.image.ImageID()] = im - } - m.mounts = append(m.mounts, im) -} - -// imageMount is a reference to an image that can be used as a builder.Source -type imageMount struct { - image builder.Image - source builder.Source - layer builder.ROLayer -} - -func newImageMount(image builder.Image, layer builder.ROLayer) *imageMount { - im := &imageMount{image: image, layer: layer} - return im -} - -func (im *imageMount) unmount() error { - if im.layer == nil { - return nil - } - if err := im.layer.Release(); err != nil { - return errors.Wrapf(err, "failed to unmount previous build image %s", im.image.ImageID()) - } - im.layer = nil - return nil -} - -func (im *imageMount) Image() builder.Image { - return im.image -} - -func (im *imageMount) NewRWLayer() (builder.RWLayer, error) { - return im.layer.NewRWLayer() -} - -func (im *imageMount) ImageID() string { - return im.image.ImageID() -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go b/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go deleted file mode 100644 index 6960bf889..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go +++ /dev/null @@ -1,63 +0,0 @@ -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/builder" - "github.com/sirupsen/logrus" -) - -// ImageProber exposes an Image cache to the Builder. It supports resetting a -// cache. -type ImageProber interface { - Reset() - Probe(parentID string, runConfig *container.Config) (string, error) -} - -type imageProber struct { - cache builder.ImageCache - reset func() builder.ImageCache - cacheBusted bool -} - -func newImageProber(cacheBuilder builder.ImageCacheBuilder, cacheFrom []string, noCache bool) ImageProber { - if noCache { - return &nopProber{} - } - - reset := func() builder.ImageCache { - return cacheBuilder.MakeImageCache(cacheFrom) - } - return &imageProber{cache: reset(), reset: reset} -} - -func (c *imageProber) Reset() { - c.cache = c.reset() - c.cacheBusted = false -} - -// Probe checks if cache match can be found for current build instruction. -// It returns the cachedID if there is a hit, and the empty string on miss -func (c *imageProber) Probe(parentID string, runConfig *container.Config) (string, error) { - if c.cacheBusted { - return "", nil - } - cacheID, err := c.cache.GetCache(parentID, runConfig) - if err != nil { - return "", err - } - if len(cacheID) == 0 { - logrus.Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd) - c.cacheBusted = true - return "", nil - } - logrus.Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd) - return cacheID, nil -} - -type nopProber struct{} - -func (c *nopProber) Reset() {} - -func (c *nopProber) Probe(_ string, _ *container.Config) (string, error) { - return "", nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals.go b/vendor/github.com/docker/docker/builder/dockerfile/internals.go deleted file mode 100644 index 88e75a217..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals.go +++ /dev/null @@ -1,481 +0,0 @@ -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -// internals for handling commands. Covers many areas and a lot of -// non-contiguous functionality. Please read the comments. - -import ( - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "os" - "path" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/builder" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/system" - "github.com/docker/go-connections/nat" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// Archiver defines an interface for copying files from one destination to -// another using Tar/Untar. -type Archiver interface { - TarUntar(src, dst string) error - UntarPath(src, dst string) error - CopyWithTar(src, dst string) error - CopyFileWithTar(src, dst string) error - IDMappings() *idtools.IDMappings -} - -// The builder will use the following interfaces if the container fs implements -// these for optimized copies to and from the container. -type extractor interface { - ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error -} - -type archiver interface { - ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) -} - -// helper functions to get tar/untar func -func untarFunc(i interface{}) containerfs.UntarFunc { - if ea, ok := i.(extractor); ok { - return ea.ExtractArchive - } - return chrootarchive.Untar -} - -func tarFunc(i interface{}) containerfs.TarFunc { - if ap, ok := i.(archiver); ok { - return ap.ArchivePath - } - return archive.TarWithOptions -} - -func (b *Builder) getArchiver(src, dst containerfs.Driver) Archiver { - t, u := tarFunc(src), untarFunc(dst) - return &containerfs.Archiver{ - SrcDriver: src, - DstDriver: dst, - Tar: t, - Untar: u, - IDMappingsVar: b.idMappings, - } -} - -func (b *Builder) commit(dispatchState *dispatchState, comment string) error { - if b.disableCommit { - return nil - } - if !dispatchState.hasFromImage() { - return errors.New("Please provide a source image with `from` prior to commit") - } - - runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment, dispatchState.operatingSystem)) - id, err := b.probeAndCreate(dispatchState, runConfigWithCommentCmd) - if err != nil || id == "" { - return err - } - - return b.commitContainer(dispatchState, id, runConfigWithCommentCmd) -} - -func (b *Builder) commitContainer(dispatchState *dispatchState, id string, containerConfig *container.Config) error { - if b.disableCommit { - return nil - } - - commitCfg := backend.CommitConfig{ - Author: dispatchState.maintainer, - // TODO: this copy should be done by Commit() - Config: copyRunConfig(dispatchState.runConfig), - ContainerConfig: containerConfig, - ContainerID: id, - } - - imageID, err := b.docker.CommitBuildStep(commitCfg) - dispatchState.imageID = string(imageID) - return err -} - -func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, parent builder.Image, runConfig *container.Config) error { - newLayer, err := layer.Commit() - if err != nil { - return err - } - - // add an image mount without an image so the layer is properly unmounted - // if there is an error before we can add the full mount with image - b.imageSources.Add(newImageMount(nil, newLayer)) - - parentImage, ok := parent.(*image.Image) - if !ok { - return errors.Errorf("unexpected image type") - } - - newImage := image.NewChildImage(parentImage, image.ChildConfig{ - Author: state.maintainer, - ContainerConfig: runConfig, - DiffID: newLayer.DiffID(), - Config: copyRunConfig(state.runConfig), - }, parentImage.OS) - - // TODO: it seems strange to marshal this here instead of just passing in the - // image struct - config, err := newImage.MarshalJSON() - if err != nil { - return errors.Wrap(err, "failed to encode image config") - } - - exportedImage, err := b.docker.CreateImage(config, state.imageID) - if err != nil { - return errors.Wrapf(err, "failed to export image") - } - - state.imageID = exportedImage.ImageID() - b.imageSources.Add(newImageMount(exportedImage, newLayer)) - return nil -} - -func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error { - srcHash := getSourceHashFromInfos(inst.infos) - - var chownComment string - if inst.chownStr != "" { - chownComment = fmt.Sprintf("--chown=%s", inst.chownStr) - } - commentStr := fmt.Sprintf("%s %s%s in %s ", inst.cmdName, chownComment, srcHash, inst.dest) - - // TODO: should this have been using origPaths instead of srcHash in the comment? - runConfigWithCommentCmd := copyRunConfig( - state.runConfig, - withCmdCommentString(commentStr, state.operatingSystem)) - hit, err := b.probeCache(state, runConfigWithCommentCmd) - if err != nil || hit { - return err - } - - imageMount, err := b.imageSources.Get(state.imageID, true, state.operatingSystem) - if err != nil { - return errors.Wrapf(err, "failed to get destination image %q", state.imageID) - } - - rwLayer, err := imageMount.NewRWLayer() - if err != nil { - return err - } - defer rwLayer.Release() - - destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, rwLayer, state.operatingSystem) - if err != nil { - return err - } - - chownPair := b.idMappings.RootPair() - // if a chown was requested, perform the steps to get the uid, gid - // translated (if necessary because of user namespaces), and replace - // the root pair with the chown pair for copy operations - if inst.chownStr != "" { - chownPair, err = parseChownFlag(inst.chownStr, destInfo.root.Path(), b.idMappings) - if err != nil { - return errors.Wrapf(err, "unable to convert uid/gid chown string to host mapping") - } - } - - for _, info := range inst.infos { - opts := copyFileOptions{ - decompress: inst.allowLocalDecompression, - archiver: b.getArchiver(info.root, destInfo.root), - chownPair: chownPair, - } - if err := performCopyForInfo(destInfo, info, opts); err != nil { - return errors.Wrapf(err, "failed to copy files") - } - } - return b.exportImage(state, rwLayer, imageMount.Image(), runConfigWithCommentCmd) -} - -func createDestInfo(workingDir string, inst copyInstruction, rwLayer builder.RWLayer, platform string) (copyInfo, error) { - // Twiddle the destination when it's a relative path - meaning, make it - // relative to the WORKINGDIR - dest, err := normalizeDest(workingDir, inst.dest, platform) - if err != nil { - return copyInfo{}, errors.Wrapf(err, "invalid %s", inst.cmdName) - } - - return copyInfo{root: rwLayer.Root(), path: dest}, nil -} - -// normalizeDest normalises the destination of a COPY/ADD command in a -// platform semantically consistent way. -func normalizeDest(workingDir, requested string, platform string) (string, error) { - dest := fromSlash(requested, platform) - endsInSlash := strings.HasSuffix(dest, string(separator(platform))) - - if platform != "windows" { - if !path.IsAbs(requested) { - dest = path.Join("/", filepath.ToSlash(workingDir), dest) - // Make sure we preserve any trailing slash - if endsInSlash { - dest += "/" - } - } - return dest, nil - } - - // We are guaranteed that the working directory is already consistent, - // However, Windows also has, for now, the limitation that ADD/COPY can - // only be done to the system drive, not any drives that might be present - // as a result of a bind mount. - // - // So... if the path requested is Linux-style absolute (/foo or \\foo), - // we assume it is the system drive. If it is a Windows-style absolute - // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we - // strip any configured working directories drive letter so that it - // can be subsequently legitimately converted to a Windows volume-style - // pathname. - - // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as - // we only want to validate where the DriveColon part has been supplied. - if filepath.IsAbs(dest) { - if strings.ToUpper(string(dest[0])) != "C" { - return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)") - } - dest = dest[2:] // Strip the drive letter - } - - // Cannot handle relative where WorkingDir is not the system drive. - if len(workingDir) > 0 { - if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) { - return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir) - } - if !system.IsAbs(dest) { - if string(workingDir[0]) != "C" { - return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive") - } - dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) - // Make sure we preserve any trailing slash - if endsInSlash { - dest += string(os.PathSeparator) - } - } - } - return dest, nil -} - -// For backwards compat, if there's just one info then use it as the -// cache look-up string, otherwise hash 'em all into one -func getSourceHashFromInfos(infos []copyInfo) string { - if len(infos) == 1 { - return infos[0].hash - } - var hashs []string - for _, info := range infos { - hashs = append(hashs, info.hash) - } - return hashStringSlice("multi", hashs) -} - -func hashStringSlice(prefix string, slice []string) string { - hasher := sha256.New() - hasher.Write([]byte(strings.Join(slice, ","))) - return prefix + ":" + hex.EncodeToString(hasher.Sum(nil)) -} - -type runConfigModifier func(*container.Config) - -func withCmd(cmd []string) runConfigModifier { - return func(runConfig *container.Config) { - runConfig.Cmd = cmd - } -} - -// withCmdComment sets Cmd to a nop comment string. See withCmdCommentString for -// why there are two almost identical versions of this. -func withCmdComment(comment string, platform string) runConfigModifier { - return func(runConfig *container.Config) { - runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) ", comment) - } -} - -// withCmdCommentString exists to maintain compatibility with older versions. -// A few instructions (workdir, copy, add) used a nop comment that is a single arg -// where as all the other instructions used a two arg comment string. This -// function implements the single arg version. -func withCmdCommentString(comment string, platform string) runConfigModifier { - return func(runConfig *container.Config) { - runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) "+comment) - } -} - -func withEnv(env []string) runConfigModifier { - return func(runConfig *container.Config) { - runConfig.Env = env - } -} - -// withEntrypointOverride sets an entrypoint on runConfig if the command is -// not empty. The entrypoint is left unmodified if command is empty. -// -// The dockerfile RUN instruction expect to run without an entrypoint -// so the runConfig entrypoint needs to be modified accordingly. ContainerCreate -// will change a []string{""} entrypoint to nil, so we probe the cache with the -// nil entrypoint. -func withEntrypointOverride(cmd []string, entrypoint []string) runConfigModifier { - return func(runConfig *container.Config) { - if len(cmd) > 0 { - runConfig.Entrypoint = entrypoint - } - } -} - -func copyRunConfig(runConfig *container.Config, modifiers ...runConfigModifier) *container.Config { - copy := *runConfig - copy.Cmd = copyStringSlice(runConfig.Cmd) - copy.Env = copyStringSlice(runConfig.Env) - copy.Entrypoint = copyStringSlice(runConfig.Entrypoint) - copy.OnBuild = copyStringSlice(runConfig.OnBuild) - copy.Shell = copyStringSlice(runConfig.Shell) - - if copy.Volumes != nil { - copy.Volumes = make(map[string]struct{}, len(runConfig.Volumes)) - for k, v := range runConfig.Volumes { - copy.Volumes[k] = v - } - } - - if copy.ExposedPorts != nil { - copy.ExposedPorts = make(nat.PortSet, len(runConfig.ExposedPorts)) - for k, v := range runConfig.ExposedPorts { - copy.ExposedPorts[k] = v - } - } - - if copy.Labels != nil { - copy.Labels = make(map[string]string, len(runConfig.Labels)) - for k, v := range runConfig.Labels { - copy.Labels[k] = v - } - } - - for _, modifier := range modifiers { - modifier(©) - } - return © -} - -func copyStringSlice(orig []string) []string { - if orig == nil { - return nil - } - return append([]string{}, orig...) -} - -// getShell is a helper function which gets the right shell for prefixing the -// shell-form of RUN, ENTRYPOINT and CMD instructions -func getShell(c *container.Config, os string) []string { - if 0 == len(c.Shell) { - return append([]string{}, defaultShellForOS(os)[:]...) - } - return append([]string{}, c.Shell[:]...) -} - -func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.Config) (bool, error) { - cachedID, err := b.imageProber.Probe(dispatchState.imageID, runConfig) - if cachedID == "" || err != nil { - return false, err - } - fmt.Fprint(b.Stdout, " ---> Using cache\n") - - dispatchState.imageID = cachedID - return true, nil -} - -var defaultLogConfig = container.LogConfig{Type: "none"} - -func (b *Builder) probeAndCreate(dispatchState *dispatchState, runConfig *container.Config) (string, error) { - if hit, err := b.probeCache(dispatchState, runConfig); err != nil || hit { - return "", err - } - return b.create(runConfig) -} - -func (b *Builder) create(runConfig *container.Config) (string, error) { - logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd) - hostConfig := hostConfigFromOptions(b.options) - container, err := b.containerManager.Create(runConfig, hostConfig) - if err != nil { - return "", err - } - // TODO: could this be moved into containerManager.Create() ? - for _, warning := range container.Warnings { - fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) - } - fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(container.ID)) - return container.ID, nil -} - -func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConfig { - resources := container.Resources{ - CgroupParent: options.CgroupParent, - CPUShares: options.CPUShares, - CPUPeriod: options.CPUPeriod, - CPUQuota: options.CPUQuota, - CpusetCpus: options.CPUSetCPUs, - CpusetMems: options.CPUSetMems, - Memory: options.Memory, - MemorySwap: options.MemorySwap, - Ulimits: options.Ulimits, - } - - hc := &container.HostConfig{ - SecurityOpt: options.SecurityOpt, - Isolation: options.Isolation, - ShmSize: options.ShmSize, - Resources: resources, - NetworkMode: container.NetworkMode(options.NetworkMode), - // Set a log config to override any default value set on the daemon - LogConfig: defaultLogConfig, - ExtraHosts: options.ExtraHosts, - } - - // For WCOW, the default of 20GB hard-coded in the platform - // is too small for builder scenarios where many users are - // using RUN statements to install large amounts of data. - // Use 127GB as that's the default size of a VHD in Hyper-V. - if runtime.GOOS == "windows" && options.Platform == "windows" { - hc.StorageOpt = make(map[string]string) - hc.StorageOpt["size"] = "127GB" - } - - return hc -} - -// fromSlash works like filepath.FromSlash but with a given OS platform field -func fromSlash(path, platform string) string { - if platform == "windows" { - return strings.Replace(path, "/", "\\", -1) - } - return path -} - -// separator returns a OS path separator for the given OS platform -func separator(platform string) byte { - if platform == "windows" { - return '\\' - } - return '/' -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go deleted file mode 100644 index 1014b16a2..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go +++ /dev/null @@ -1,88 +0,0 @@ -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -import ( - "path/filepath" - "strconv" - "strings" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/symlink" - lcUser "github.com/opencontainers/runc/libcontainer/user" - "github.com/pkg/errors" -) - -func parseChownFlag(chown, ctrRootPath string, idMappings *idtools.IDMappings) (idtools.IDPair, error) { - var userStr, grpStr string - parts := strings.Split(chown, ":") - if len(parts) > 2 { - return idtools.IDPair{}, errors.New("invalid chown string format: " + chown) - } - if len(parts) == 1 { - // if no group specified, use the user spec as group as well - userStr, grpStr = parts[0], parts[0] - } else { - userStr, grpStr = parts[0], parts[1] - } - - passwdPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "passwd"), ctrRootPath) - if err != nil { - return idtools.IDPair{}, errors.Wrapf(err, "can't resolve /etc/passwd path in container rootfs") - } - groupPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "group"), ctrRootPath) - if err != nil { - return idtools.IDPair{}, errors.Wrapf(err, "can't resolve /etc/group path in container rootfs") - } - uid, err := lookupUser(userStr, passwdPath) - if err != nil { - return idtools.IDPair{}, errors.Wrapf(err, "can't find uid for user "+userStr) - } - gid, err := lookupGroup(grpStr, groupPath) - if err != nil { - return idtools.IDPair{}, errors.Wrapf(err, "can't find gid for group "+grpStr) - } - - // convert as necessary because of user namespaces - chownPair, err := idMappings.ToHost(idtools.IDPair{UID: uid, GID: gid}) - if err != nil { - return idtools.IDPair{}, errors.Wrapf(err, "unable to convert uid/gid to host mapping") - } - return chownPair, nil -} - -func lookupUser(userStr, filepath string) (int, error) { - // if the string is actually a uid integer, parse to int and return - // as we don't need to translate with the help of files - uid, err := strconv.Atoi(userStr) - if err == nil { - return uid, nil - } - users, err := lcUser.ParsePasswdFileFilter(filepath, func(u lcUser.User) bool { - return u.Name == userStr - }) - if err != nil { - return 0, err - } - if len(users) == 0 { - return 0, errors.New("no such user: " + userStr) - } - return users[0].Uid, nil -} - -func lookupGroup(groupStr, filepath string) (int, error) { - // if the string is actually a gid integer, parse to int and return - // as we don't need to translate with the help of files - gid, err := strconv.Atoi(groupStr) - if err == nil { - return gid, nil - } - groups, err := lcUser.ParseGroupFileFilter(filepath, func(g lcUser.Group) bool { - return g.Name == groupStr - }) - if err != nil { - return 0, err - } - if len(groups) == 0 { - return 0, errors.New("no such group: " + groupStr) - } - return groups[0].Gid, nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go deleted file mode 100644 index 26978b48c..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -import "github.com/docker/docker/pkg/idtools" - -func parseChownFlag(chown, ctrRootPath string, idMappings *idtools.IDMappings) (idtools.IDPair, error) { - return idMappings.RootPair(), nil -} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/metrics.go b/vendor/github.com/docker/docker/builder/dockerfile/metrics.go deleted file mode 100644 index ceafa7ad6..000000000 --- a/vendor/github.com/docker/docker/builder/dockerfile/metrics.go +++ /dev/null @@ -1,44 +0,0 @@ -package dockerfile // import "github.com/docker/docker/builder/dockerfile" - -import ( - "github.com/docker/go-metrics" -) - -var ( - buildsTriggered metrics.Counter - buildsFailed metrics.LabeledCounter -) - -// Build metrics prometheus messages, these values must be initialized before -// using them. See the example below in the "builds_failed" metric definition. -const ( - metricsDockerfileSyntaxError = "dockerfile_syntax_error" - metricsDockerfileEmptyError = "dockerfile_empty_error" - metricsCommandNotSupportedError = "command_not_supported_error" - metricsErrorProcessingCommandsError = "error_processing_commands_error" - metricsBuildTargetNotReachableError = "build_target_not_reachable_error" - metricsMissingOnbuildArgumentsError = "missing_onbuild_arguments_error" - metricsUnknownInstructionError = "unknown_instruction_error" - metricsBuildCanceled = "build_canceled" -) - -func init() { - buildMetrics := metrics.NewNamespace("builder", "", nil) - - buildsTriggered = buildMetrics.NewCounter("builds_triggered", "Number of triggered image builds") - buildsFailed = buildMetrics.NewLabeledCounter("builds_failed", "Number of failed image builds", "reason") - for _, r := range []string{ - metricsDockerfileSyntaxError, - metricsDockerfileEmptyError, - metricsCommandNotSupportedError, - metricsErrorProcessingCommandsError, - metricsBuildTargetNotReachableError, - metricsMissingOnbuildArgumentsError, - metricsUnknownInstructionError, - metricsBuildCanceled, - } { - buildsFailed.WithValues(r) - } - - metrics.Register(buildMetrics) -} diff --git a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go deleted file mode 100644 index 57f224afc..000000000 --- a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go +++ /dev/null @@ -1,64 +0,0 @@ -package dockerignore // import "github.com/docker/docker/builder/dockerignore" - -import ( - "bufio" - "bytes" - "fmt" - "io" - "path/filepath" - "strings" -) - -// ReadAll reads a .dockerignore file and returns the list of file patterns -// to ignore. Note this will trim whitespace from each line as well -// as use GO's "clean" func to get the shortest/cleanest path for each. -func ReadAll(reader io.Reader) ([]string, error) { - if reader == nil { - return nil, nil - } - - scanner := bufio.NewScanner(reader) - var excludes []string - currentLine := 0 - - utf8bom := []byte{0xEF, 0xBB, 0xBF} - for scanner.Scan() { - scannedBytes := scanner.Bytes() - // We trim UTF8 BOM - if currentLine == 0 { - scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) - } - pattern := string(scannedBytes) - currentLine++ - // Lines starting with # (comments) are ignored before processing - if strings.HasPrefix(pattern, "#") { - continue - } - pattern = strings.TrimSpace(pattern) - if pattern == "" { - continue - } - // normalize absolute paths to paths relative to the context - // (taking care of '!' prefix) - invert := pattern[0] == '!' - if invert { - pattern = strings.TrimSpace(pattern[1:]) - } - if len(pattern) > 0 { - pattern = filepath.Clean(pattern) - pattern = filepath.ToSlash(pattern) - if len(pattern) > 1 && pattern[0] == '/' { - pattern = pattern[1:] - } - } - if invert { - pattern = "!" + pattern - } - - excludes = append(excludes, pattern) - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("Error reading .dockerignore: %v", err) - } - return excludes, nil -} diff --git a/vendor/github.com/docker/docker/builder/fscache/fscache.go b/vendor/github.com/docker/docker/builder/fscache/fscache.go deleted file mode 100644 index 92c3ea4ad..000000000 --- a/vendor/github.com/docker/docker/builder/fscache/fscache.go +++ /dev/null @@ -1,652 +0,0 @@ -package fscache // import "github.com/docker/docker/builder/fscache" - -import ( - "archive/tar" - "context" - "crypto/sha256" - "encoding/json" - "hash" - "os" - "path/filepath" - "sort" - "sync" - "time" - - "github.com/boltdb/bolt" - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/remotecontext" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/directory" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/tarsum" - "github.com/moby/buildkit/session/filesync" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/tonistiigi/fsutil" - "golang.org/x/sync/singleflight" -) - -const dbFile = "fscache.db" -const cacheKey = "cache" -const metaKey = "meta" - -// Backend is a backing implementation for FSCache -type Backend interface { - Get(id string) (string, error) - Remove(id string) error -} - -// FSCache allows syncing remote resources to cached snapshots -type FSCache struct { - opt Opt - transports map[string]Transport - mu sync.Mutex - g singleflight.Group - store *fsCacheStore -} - -// Opt defines options for initializing FSCache -type Opt struct { - Backend Backend - Root string // for storing local metadata - GCPolicy GCPolicy -} - -// GCPolicy defines policy for garbage collection -type GCPolicy struct { - MaxSize uint64 - MaxKeepDuration time.Duration -} - -// NewFSCache returns new FSCache object -func NewFSCache(opt Opt) (*FSCache, error) { - store, err := newFSCacheStore(opt) - if err != nil { - return nil, err - } - return &FSCache{ - store: store, - opt: opt, - transports: make(map[string]Transport), - }, nil -} - -// Transport defines a method for syncing remote data to FSCache -type Transport interface { - Copy(ctx context.Context, id RemoteIdentifier, dest string, cs filesync.CacheUpdater) error -} - -// RemoteIdentifier identifies a transfer request -type RemoteIdentifier interface { - Key() string - SharedKey() string - Transport() string -} - -// RegisterTransport registers a new transport method -func (fsc *FSCache) RegisterTransport(id string, transport Transport) error { - fsc.mu.Lock() - defer fsc.mu.Unlock() - if _, ok := fsc.transports[id]; ok { - return errors.Errorf("transport %v already exists", id) - } - fsc.transports[id] = transport - return nil -} - -// SyncFrom returns a source based on a remote identifier -func (fsc *FSCache) SyncFrom(ctx context.Context, id RemoteIdentifier) (builder.Source, error) { // cacheOpt - trasportID := id.Transport() - fsc.mu.Lock() - transport, ok := fsc.transports[id.Transport()] - if !ok { - fsc.mu.Unlock() - return nil, errors.Errorf("invalid transport %s", trasportID) - } - - logrus.Debugf("SyncFrom %s %s", id.Key(), id.SharedKey()) - fsc.mu.Unlock() - sourceRef, err, _ := fsc.g.Do(id.Key(), func() (interface{}, error) { - var sourceRef *cachedSourceRef - sourceRef, err := fsc.store.Get(id.Key()) - if err == nil { - return sourceRef, nil - } - - // check for unused shared cache - sharedKey := id.SharedKey() - if sharedKey != "" { - r, err := fsc.store.Rebase(sharedKey, id.Key()) - if err == nil { - sourceRef = r - } - } - - if sourceRef == nil { - var err error - sourceRef, err = fsc.store.New(id.Key(), sharedKey) - if err != nil { - return nil, errors.Wrap(err, "failed to create remote context") - } - } - - if err := syncFrom(ctx, sourceRef, transport, id); err != nil { - sourceRef.Release() - return nil, err - } - if err := sourceRef.resetSize(-1); err != nil { - return nil, err - } - return sourceRef, nil - }) - if err != nil { - return nil, err - } - ref := sourceRef.(*cachedSourceRef) - if ref.src == nil { // failsafe - return nil, errors.Errorf("invalid empty pull") - } - wc := &wrappedContext{Source: ref.src, closer: func() error { - ref.Release() - return nil - }} - return wc, nil -} - -// DiskUsage reports how much data is allocated by the cache -func (fsc *FSCache) DiskUsage(ctx context.Context) (int64, error) { - return fsc.store.DiskUsage(ctx) -} - -// Prune allows manually cleaning up the cache -func (fsc *FSCache) Prune(ctx context.Context) (uint64, error) { - return fsc.store.Prune(ctx) -} - -// Close stops the gc and closes the persistent db -func (fsc *FSCache) Close() error { - return fsc.store.Close() -} - -func syncFrom(ctx context.Context, cs *cachedSourceRef, transport Transport, id RemoteIdentifier) (retErr error) { - src := cs.src - if src == nil { - src = remotecontext.NewCachableSource(cs.Dir()) - } - - if !cs.cached { - if err := cs.storage.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(id.Key())) - dt := b.Get([]byte(cacheKey)) - if dt != nil { - if err := src.UnmarshalBinary(dt); err != nil { - return err - } - } else { - return errors.Wrap(src.Scan(), "failed to scan cache records") - } - return nil - }); err != nil { - return err - } - } - - dc := &detectChanges{f: src.HandleChange} - - // todo: probably send a bucket to `Copy` and let it return source - // but need to make sure that tx is safe - if err := transport.Copy(ctx, id, cs.Dir(), dc); err != nil { - return errors.Wrapf(err, "failed to copy to %s", cs.Dir()) - } - - if !dc.supported { - if err := src.Scan(); err != nil { - return errors.Wrap(err, "failed to scan cache records after transfer") - } - } - cs.cached = true - cs.src = src - return cs.storage.db.Update(func(tx *bolt.Tx) error { - dt, err := src.MarshalBinary() - if err != nil { - return err - } - b := tx.Bucket([]byte(id.Key())) - return b.Put([]byte(cacheKey), dt) - }) -} - -type fsCacheStore struct { - mu sync.Mutex - sources map[string]*cachedSource - db *bolt.DB - fs Backend - gcTimer *time.Timer - gcPolicy GCPolicy -} - -// CachePolicy defines policy for keeping a resource in cache -type CachePolicy struct { - Priority int - LastUsed time.Time -} - -func defaultCachePolicy() CachePolicy { - return CachePolicy{Priority: 10, LastUsed: time.Now()} -} - -func newFSCacheStore(opt Opt) (*fsCacheStore, error) { - if err := os.MkdirAll(opt.Root, 0700); err != nil { - return nil, err - } - p := filepath.Join(opt.Root, dbFile) - db, err := bolt.Open(p, 0600, nil) - if err != nil { - return nil, errors.Wrap(err, "failed to open database file %s") - } - s := &fsCacheStore{db: db, sources: make(map[string]*cachedSource), fs: opt.Backend, gcPolicy: opt.GCPolicy} - db.View(func(tx *bolt.Tx) error { - return tx.ForEach(func(name []byte, b *bolt.Bucket) error { - dt := b.Get([]byte(metaKey)) - if dt == nil { - return nil - } - var sm sourceMeta - if err := json.Unmarshal(dt, &sm); err != nil { - return err - } - dir, err := s.fs.Get(sm.BackendID) - if err != nil { - return err // TODO: handle gracefully - } - source := &cachedSource{ - refs: make(map[*cachedSourceRef]struct{}), - id: string(name), - dir: dir, - sourceMeta: sm, - storage: s, - } - s.sources[string(name)] = source - return nil - }) - }) - - s.gcTimer = s.startPeriodicGC(5 * time.Minute) - return s, nil -} - -func (s *fsCacheStore) startPeriodicGC(interval time.Duration) *time.Timer { - var t *time.Timer - t = time.AfterFunc(interval, func() { - if err := s.GC(); err != nil { - logrus.Errorf("build gc error: %v", err) - } - t.Reset(interval) - }) - return t -} - -func (s *fsCacheStore) Close() error { - s.gcTimer.Stop() - return s.db.Close() -} - -func (s *fsCacheStore) New(id, sharedKey string) (*cachedSourceRef, error) { - s.mu.Lock() - defer s.mu.Unlock() - var ret *cachedSource - if err := s.db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte(id)) - if err != nil { - return err - } - backendID := stringid.GenerateRandomID() - dir, err := s.fs.Get(backendID) - if err != nil { - return err - } - source := &cachedSource{ - refs: make(map[*cachedSourceRef]struct{}), - id: id, - dir: dir, - sourceMeta: sourceMeta{ - BackendID: backendID, - SharedKey: sharedKey, - CachePolicy: defaultCachePolicy(), - }, - storage: s, - } - dt, err := json.Marshal(source.sourceMeta) - if err != nil { - return err - } - if err := b.Put([]byte(metaKey), dt); err != nil { - return err - } - s.sources[id] = source - ret = source - return nil - }); err != nil { - return nil, err - } - return ret.getRef(), nil -} - -func (s *fsCacheStore) Rebase(sharedKey, newid string) (*cachedSourceRef, error) { - s.mu.Lock() - defer s.mu.Unlock() - var ret *cachedSource - for id, snap := range s.sources { - if snap.SharedKey == sharedKey && len(snap.refs) == 0 { - if err := s.db.Update(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte(id)); err != nil { - return err - } - b, err := tx.CreateBucket([]byte(newid)) - if err != nil { - return err - } - snap.id = newid - snap.CachePolicy = defaultCachePolicy() - dt, err := json.Marshal(snap.sourceMeta) - if err != nil { - return err - } - if err := b.Put([]byte(metaKey), dt); err != nil { - return err - } - delete(s.sources, id) - s.sources[newid] = snap - return nil - }); err != nil { - return nil, err - } - ret = snap - break - } - } - if ret == nil { - return nil, errors.Errorf("no candidate for rebase") - } - return ret.getRef(), nil -} - -func (s *fsCacheStore) Get(id string) (*cachedSourceRef, error) { - s.mu.Lock() - defer s.mu.Unlock() - src, ok := s.sources[id] - if !ok { - return nil, errors.Errorf("not found") - } - return src.getRef(), nil -} - -// DiskUsage reports how much data is allocated by the cache -func (s *fsCacheStore) DiskUsage(ctx context.Context) (int64, error) { - s.mu.Lock() - defer s.mu.Unlock() - var size int64 - - for _, snap := range s.sources { - if len(snap.refs) == 0 { - ss, err := snap.getSize(ctx) - if err != nil { - return 0, err - } - size += ss - } - } - return size, nil -} - -// Prune allows manually cleaning up the cache -func (s *fsCacheStore) Prune(ctx context.Context) (uint64, error) { - s.mu.Lock() - defer s.mu.Unlock() - var size uint64 - - for id, snap := range s.sources { - select { - case <-ctx.Done(): - logrus.Debugf("Cache prune operation cancelled, pruned size: %d", size) - // when the context is cancelled, only return current size and nil - return size, nil - default: - } - if len(snap.refs) == 0 { - ss, err := snap.getSize(ctx) - if err != nil { - return size, err - } - if err := s.delete(id); err != nil { - return size, errors.Wrapf(err, "failed to delete %s", id) - } - size += uint64(ss) - } - } - return size, nil -} - -// GC runs a garbage collector on FSCache -func (s *fsCacheStore) GC() error { - s.mu.Lock() - defer s.mu.Unlock() - var size uint64 - - ctx := context.Background() - cutoff := time.Now().Add(-s.gcPolicy.MaxKeepDuration) - var blacklist []*cachedSource - - for id, snap := range s.sources { - if len(snap.refs) == 0 { - if cutoff.After(snap.CachePolicy.LastUsed) { - if err := s.delete(id); err != nil { - return errors.Wrapf(err, "failed to delete %s", id) - } - } else { - ss, err := snap.getSize(ctx) - if err != nil { - return err - } - size += uint64(ss) - blacklist = append(blacklist, snap) - } - } - } - - sort.Sort(sortableCacheSources(blacklist)) - for _, snap := range blacklist { - if size <= s.gcPolicy.MaxSize { - break - } - ss, err := snap.getSize(ctx) - if err != nil { - return err - } - if err := s.delete(snap.id); err != nil { - return errors.Wrapf(err, "failed to delete %s", snap.id) - } - size -= uint64(ss) - } - return nil -} - -// keep mu while calling this -func (s *fsCacheStore) delete(id string) error { - src, ok := s.sources[id] - if !ok { - return nil - } - if len(src.refs) > 0 { - return errors.Errorf("can't delete %s because it has active references", id) - } - delete(s.sources, id) - if err := s.db.Update(func(tx *bolt.Tx) error { - return tx.DeleteBucket([]byte(id)) - }); err != nil { - return err - } - return s.fs.Remove(src.BackendID) -} - -type sourceMeta struct { - SharedKey string - BackendID string - CachePolicy CachePolicy - Size int64 -} - -type cachedSource struct { - sourceMeta - refs map[*cachedSourceRef]struct{} - id string - dir string - src *remotecontext.CachableSource - storage *fsCacheStore - cached bool // keep track if cache is up to date -} - -type cachedSourceRef struct { - *cachedSource -} - -func (cs *cachedSource) Dir() string { - return cs.dir -} - -// hold storage lock before calling -func (cs *cachedSource) getRef() *cachedSourceRef { - ref := &cachedSourceRef{cachedSource: cs} - cs.refs[ref] = struct{}{} - return ref -} - -// hold storage lock before calling -func (cs *cachedSource) getSize(ctx context.Context) (int64, error) { - if cs.sourceMeta.Size < 0 { - ss, err := directory.Size(ctx, cs.dir) - if err != nil { - return 0, err - } - if err := cs.resetSize(ss); err != nil { - return 0, err - } - return ss, nil - } - return cs.sourceMeta.Size, nil -} - -func (cs *cachedSource) resetSize(val int64) error { - cs.sourceMeta.Size = val - return cs.saveMeta() -} -func (cs *cachedSource) saveMeta() error { - return cs.storage.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte(cs.id)) - dt, err := json.Marshal(cs.sourceMeta) - if err != nil { - return err - } - return b.Put([]byte(metaKey), dt) - }) -} - -func (csr *cachedSourceRef) Release() error { - csr.cachedSource.storage.mu.Lock() - defer csr.cachedSource.storage.mu.Unlock() - delete(csr.cachedSource.refs, csr) - if len(csr.cachedSource.refs) == 0 { - go csr.cachedSource.storage.GC() - } - return nil -} - -type detectChanges struct { - f fsutil.ChangeFunc - supported bool -} - -func (dc *detectChanges) HandleChange(kind fsutil.ChangeKind, path string, fi os.FileInfo, err error) error { - if dc == nil { - return nil - } - return dc.f(kind, path, fi, err) -} - -func (dc *detectChanges) MarkSupported(v bool) { - if dc == nil { - return - } - dc.supported = v -} - -func (dc *detectChanges) ContentHasher() fsutil.ContentHasher { - return newTarsumHash -} - -type wrappedContext struct { - builder.Source - closer func() error -} - -func (wc *wrappedContext) Close() error { - if err := wc.Source.Close(); err != nil { - return err - } - return wc.closer() -} - -type sortableCacheSources []*cachedSource - -// Len is the number of elements in the collection. -func (s sortableCacheSources) Len() int { - return len(s) -} - -// Less reports whether the element with -// index i should sort before the element with index j. -func (s sortableCacheSources) Less(i, j int) bool { - return s[i].CachePolicy.LastUsed.Before(s[j].CachePolicy.LastUsed) -} - -// Swap swaps the elements with indexes i and j. -func (s sortableCacheSources) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func newTarsumHash(stat *fsutil.Stat) (hash.Hash, error) { - fi := &fsutil.StatInfo{Stat: stat} - p := stat.Path - if fi.IsDir() { - p += string(os.PathSeparator) - } - h, err := archive.FileInfoHeader(p, fi, stat.Linkname) - if err != nil { - return nil, err - } - h.Name = p - h.Uid = int(stat.Uid) - h.Gid = int(stat.Gid) - h.Linkname = stat.Linkname - if stat.Xattrs != nil { - h.Xattrs = make(map[string]string) - for k, v := range stat.Xattrs { - h.Xattrs[k] = string(v) - } - } - - tsh := &tarsumHash{h: h, Hash: sha256.New()} - tsh.Reset() - return tsh, nil -} - -// Reset resets the Hash to its initial state. -func (tsh *tarsumHash) Reset() { - tsh.Hash.Reset() - tarsum.WriteV1Header(tsh.h, tsh.Hash) -} - -type tarsumHash struct { - hash.Hash - h *tar.Header -} diff --git a/vendor/github.com/docker/docker/builder/fscache/naivedriver.go b/vendor/github.com/docker/docker/builder/fscache/naivedriver.go deleted file mode 100644 index 053509aec..000000000 --- a/vendor/github.com/docker/docker/builder/fscache/naivedriver.go +++ /dev/null @@ -1,28 +0,0 @@ -package fscache // import "github.com/docker/docker/builder/fscache" - -import ( - "os" - "path/filepath" - - "github.com/pkg/errors" -) - -// NewNaiveCacheBackend is a basic backend implementation for fscache -func NewNaiveCacheBackend(root string) Backend { - return &naiveCacheBackend{root: root} -} - -type naiveCacheBackend struct { - root string -} - -func (tcb *naiveCacheBackend) Get(id string) (string, error) { - d := filepath.Join(tcb.root, id) - if err := os.MkdirAll(d, 0700); err != nil { - return "", errors.Wrapf(err, "failed to create tmp dir for %s", d) - } - return d, nil -} -func (tcb *naiveCacheBackend) Remove(id string) error { - return errors.WithStack(os.RemoveAll(filepath.Join(tcb.root, id))) -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/archive.go b/vendor/github.com/docker/docker/builder/remotecontext/archive.go deleted file mode 100644 index 6d247f945..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/archive.go +++ /dev/null @@ -1,125 +0,0 @@ -package remotecontext // import "github.com/docker/docker/builder/remotecontext" - -import ( - "io" - "os" - "path/filepath" - - "github.com/docker/docker/builder" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/tarsum" - "github.com/pkg/errors" -) - -type archiveContext struct { - root containerfs.ContainerFS - sums tarsum.FileInfoSums -} - -func (c *archiveContext) Close() error { - return c.root.RemoveAll(c.root.Path()) -} - -func convertPathError(err error, cleanpath string) error { - if err, ok := err.(*os.PathError); ok { - err.Path = cleanpath - return err - } - return err -} - -type modifiableContext interface { - builder.Source - // Remove deletes the entry specified by `path`. - // It is usual for directory entries to delete all its subentries. - Remove(path string) error -} - -// FromArchive returns a build source from a tar stream. -// -// It extracts the tar stream to a temporary folder that is deleted as soon as -// the Context is closed. -// As the extraction happens, a tarsum is calculated for every file, and the set of -// all those sums then becomes the source of truth for all operations on this Context. -// -// Closing tarStream has to be done by the caller. -func FromArchive(tarStream io.Reader) (builder.Source, error) { - root, err := ioutils.TempDir("", "docker-builder") - if err != nil { - return nil, err - } - - // Assume local file system. Since it's coming from a tar file. - tsc := &archiveContext{root: containerfs.NewLocalContainerFS(root)} - - // Make sure we clean-up upon error. In the happy case the caller - // is expected to manage the clean-up - defer func() { - if err != nil { - tsc.Close() - } - }() - - decompressedStream, err := archive.DecompressStream(tarStream) - if err != nil { - return nil, err - } - - sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) - if err != nil { - return nil, err - } - - err = chrootarchive.Untar(sum, root, nil) - if err != nil { - return nil, err - } - - tsc.sums = sum.GetSums() - return tsc, nil -} - -func (c *archiveContext) Root() containerfs.ContainerFS { - return c.root -} - -func (c *archiveContext) Remove(path string) error { - _, fullpath, err := normalize(path, c.root) - if err != nil { - return err - } - return c.root.RemoveAll(fullpath) -} - -func (c *archiveContext) Hash(path string) (string, error) { - cleanpath, fullpath, err := normalize(path, c.root) - if err != nil { - return "", err - } - - rel, err := c.root.Rel(c.root.Path(), fullpath) - if err != nil { - return "", convertPathError(err, cleanpath) - } - - // Use the checksum of the followed path(not the possible symlink) because - // this is the file that is actually copied. - if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { - return tsInfo.Sum(), nil - } - // We set sum to path by default for the case where GetFile returns nil. - // The usual case is if relative path is empty. - return path, nil // backwards compat TODO: see if really needed -} - -func normalize(path string, root containerfs.ContainerFS) (cleanPath, fullPath string, err error) { - cleanPath = root.Clean(string(root.Separator()) + path)[1:] - fullPath, err = root.ResolveScopedPath(path, true) - if err != nil { - return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath) - } - return -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/detect.go b/vendor/github.com/docker/docker/builder/remotecontext/detect.go deleted file mode 100644 index aaace269e..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/detect.go +++ /dev/null @@ -1,180 +0,0 @@ -package remotecontext // import "github.com/docker/docker/builder/remotecontext" - -import ( - "bufio" - "fmt" - "io" - "os" - "strings" - - "github.com/containerd/continuity/driver" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerignore" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/urlutil" - "github.com/moby/buildkit/frontend/dockerfile/parser" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// ClientSessionRemote is identifier for client-session context transport -const ClientSessionRemote = "client-session" - -// Detect returns a context and dockerfile from remote location or local -// archive. progressReader is only used if remoteURL is actually a URL -// (not empty, and not a Git endpoint). -func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *parser.Result, err error) { - remoteURL := config.Options.RemoteContext - dockerfilePath := config.Options.Dockerfile - - switch { - case remoteURL == "": - remote, dockerfile, err = newArchiveRemote(config.Source, dockerfilePath) - case remoteURL == ClientSessionRemote: - res, err := parser.Parse(config.Source) - if err != nil { - return nil, nil, err - } - return nil, res, nil - case urlutil.IsGitURL(remoteURL): - remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath) - case urlutil.IsURL(remoteURL): - remote, dockerfile, err = newURLRemote(remoteURL, dockerfilePath, config.ProgressWriter.ProgressReaderFunc) - default: - err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) - } - return -} - -func newArchiveRemote(rc io.ReadCloser, dockerfilePath string) (builder.Source, *parser.Result, error) { - defer rc.Close() - c, err := FromArchive(rc) - if err != nil { - return nil, nil, err - } - - return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) -} - -func withDockerfileFromContext(c modifiableContext, dockerfilePath string) (builder.Source, *parser.Result, error) { - df, err := openAt(c, dockerfilePath) - if err != nil { - if os.IsNotExist(err) { - if dockerfilePath == builder.DefaultDockerfileName { - lowercase := strings.ToLower(dockerfilePath) - if _, err := StatAt(c, lowercase); err == nil { - return withDockerfileFromContext(c, lowercase) - } - } - return nil, nil, errors.Errorf("Cannot locate specified Dockerfile: %s", dockerfilePath) // backwards compatible error - } - c.Close() - return nil, nil, err - } - - res, err := readAndParseDockerfile(dockerfilePath, df) - if err != nil { - return nil, nil, err - } - - df.Close() - - if err := removeDockerfile(c, dockerfilePath); err != nil { - c.Close() - return nil, nil, err - } - - return c, res, nil -} - -func newGitRemote(gitURL string, dockerfilePath string) (builder.Source, *parser.Result, error) { - c, err := MakeGitContext(gitURL) // TODO: change this to NewLazySource - if err != nil { - return nil, nil, err - } - return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) -} - -func newURLRemote(url string, dockerfilePath string, progressReader func(in io.ReadCloser) io.ReadCloser) (builder.Source, *parser.Result, error) { - contentType, content, err := downloadRemote(url) - if err != nil { - return nil, nil, err - } - defer content.Close() - - switch contentType { - case mimeTypes.TextPlain: - res, err := parser.Parse(progressReader(content)) - return nil, res, err - default: - source, err := FromArchive(progressReader(content)) - if err != nil { - return nil, nil, err - } - return withDockerfileFromContext(source.(modifiableContext), dockerfilePath) - } -} - -func removeDockerfile(c modifiableContext, filesToRemove ...string) error { - f, err := openAt(c, ".dockerignore") - // Note that a missing .dockerignore file isn't treated as an error - switch { - case os.IsNotExist(err): - return nil - case err != nil: - return err - } - excludes, err := dockerignore.ReadAll(f) - if err != nil { - f.Close() - return err - } - f.Close() - filesToRemove = append([]string{".dockerignore"}, filesToRemove...) - for _, fileToRemove := range filesToRemove { - if rm, _ := fileutils.Matches(fileToRemove, excludes); rm { - if err := c.Remove(fileToRemove); err != nil { - logrus.Errorf("failed to remove %s: %v", fileToRemove, err) - } - } - } - return nil -} - -func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) { - br := bufio.NewReader(rc) - if _, err := br.Peek(1); err != nil { - if err == io.EOF { - return nil, errors.Errorf("the Dockerfile (%s) cannot be empty", name) - } - return nil, errors.Wrap(err, "unexpected error reading Dockerfile") - } - return parser.Parse(br) -} - -func openAt(remote builder.Source, path string) (driver.File, error) { - fullPath, err := FullPath(remote, path) - if err != nil { - return nil, err - } - return remote.Root().Open(fullPath) -} - -// StatAt is a helper for calling Stat on a path from a source -func StatAt(remote builder.Source, path string) (os.FileInfo, error) { - fullPath, err := FullPath(remote, path) - if err != nil { - return nil, err - } - return remote.Root().Stat(fullPath) -} - -// FullPath is a helper for getting a full path for a path from a source -func FullPath(remote builder.Source, path string) (string, error) { - fullPath, err := remote.Root().ResolveScopedPath(path, true) - if err != nil { - return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error - } - return fullPath, nil -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/filehash.go b/vendor/github.com/docker/docker/builder/remotecontext/filehash.go deleted file mode 100644 index 3565dd827..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/filehash.go +++ /dev/null @@ -1,45 +0,0 @@ -package remotecontext // import "github.com/docker/docker/builder/remotecontext" - -import ( - "archive/tar" - "crypto/sha256" - "hash" - "os" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/tarsum" -) - -// NewFileHash returns new hash that is used for the builder cache keys -func NewFileHash(path, name string, fi os.FileInfo) (hash.Hash, error) { - var link string - if fi.Mode()&os.ModeSymlink != 0 { - var err error - link, err = os.Readlink(path) - if err != nil { - return nil, err - } - } - hdr, err := archive.FileInfoHeader(name, fi, link) - if err != nil { - return nil, err - } - if err := archive.ReadSecurityXattrToTarHeader(path, hdr); err != nil { - return nil, err - } - tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()} - tsh.Reset() // initialize header - return tsh, nil -} - -type tarsumHash struct { - hash.Hash - hdr *tar.Header -} - -// Reset resets the Hash to its initial state. -func (tsh *tarsumHash) Reset() { - // comply with hash.Hash and reset to the state hash had before any writes - tsh.Hash.Reset() - tarsum.WriteV1Header(tsh.hdr, tsh.Hash) -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/generate.go b/vendor/github.com/docker/docker/builder/remotecontext/generate.go deleted file mode 100644 index 84c1b3b5e..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package remotecontext // import "github.com/docker/docker/builder/remotecontext" - -//go:generate protoc --gogoslick_out=. tarsum.proto diff --git a/vendor/github.com/docker/docker/builder/remotecontext/git.go b/vendor/github.com/docker/docker/builder/remotecontext/git.go deleted file mode 100644 index 1583ca28d..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/git.go +++ /dev/null @@ -1,35 +0,0 @@ -package remotecontext // import "github.com/docker/docker/builder/remotecontext" - -import ( - "os" - - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/remotecontext/git" - "github.com/docker/docker/pkg/archive" - "github.com/sirupsen/logrus" -) - -// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. -func MakeGitContext(gitURL string) (builder.Source, error) { - root, err := git.Clone(gitURL) - if err != nil { - return nil, err - } - - c, err := archive.Tar(root, archive.Uncompressed) - if err != nil { - return nil, err - } - - defer func() { - err := c.Close() - if err != nil { - logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while closing git context") - } - err = os.RemoveAll(root) - if err != nil { - logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while removing path and children of root") - } - }() - return FromArchive(c) -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go b/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go deleted file mode 100644 index 77a45beff..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go +++ /dev/null @@ -1,204 +0,0 @@ -package git // import "github.com/docker/docker/builder/remotecontext/git" - -import ( - "io/ioutil" - "net/http" - "net/url" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/urlutil" - "github.com/pkg/errors" -) - -type gitRepo struct { - remote string - ref string - subdir string -} - -// Clone clones a repository into a newly created directory which -// will be under "docker-build-git" -func Clone(remoteURL string) (string, error) { - repo, err := parseRemoteURL(remoteURL) - - if err != nil { - return "", err - } - - return cloneGitRepo(repo) -} - -func cloneGitRepo(repo gitRepo) (checkoutDir string, err error) { - fetch := fetchArgs(repo.remote, repo.ref) - - root, err := ioutil.TempDir("", "docker-build-git") - if err != nil { - return "", err - } - - defer func() { - if err != nil { - os.RemoveAll(root) - } - }() - - if out, err := gitWithinDir(root, "init"); err != nil { - return "", errors.Wrapf(err, "failed to init repo at %s: %s", root, out) - } - - // Add origin remote for compatibility with previous implementation that - // used "git clone" and also to make sure local refs are created for branches - if out, err := gitWithinDir(root, "remote", "add", "origin", repo.remote); err != nil { - return "", errors.Wrapf(err, "failed add origin repo at %s: %s", repo.remote, out) - } - - if output, err := gitWithinDir(root, fetch...); err != nil { - return "", errors.Wrapf(err, "error fetching: %s", output) - } - - checkoutDir, err = checkoutGit(root, repo.ref, repo.subdir) - if err != nil { - return "", err - } - - cmd := exec.Command("git", "submodule", "update", "--init", "--recursive", "--depth=1") - cmd.Dir = root - output, err := cmd.CombinedOutput() - if err != nil { - return "", errors.Wrapf(err, "error initializing submodules: %s", output) - } - - return checkoutDir, nil -} - -func parseRemoteURL(remoteURL string) (gitRepo, error) { - repo := gitRepo{} - - if !isGitTransport(remoteURL) { - remoteURL = "https://" + remoteURL - } - - var fragment string - if strings.HasPrefix(remoteURL, "git@") { - // git@.. is not an URL, so cannot be parsed as URL - parts := strings.SplitN(remoteURL, "#", 2) - - repo.remote = parts[0] - if len(parts) == 2 { - fragment = parts[1] - } - repo.ref, repo.subdir = getRefAndSubdir(fragment) - } else { - u, err := url.Parse(remoteURL) - if err != nil { - return repo, err - } - - repo.ref, repo.subdir = getRefAndSubdir(u.Fragment) - u.Fragment = "" - repo.remote = u.String() - } - return repo, nil -} - -func getRefAndSubdir(fragment string) (ref string, subdir string) { - refAndDir := strings.SplitN(fragment, ":", 2) - ref = "master" - if len(refAndDir[0]) != 0 { - ref = refAndDir[0] - } - if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { - subdir = refAndDir[1] - } - return -} - -func fetchArgs(remoteURL string, ref string) []string { - args := []string{"fetch"} - - if supportsShallowClone(remoteURL) { - args = append(args, "--depth", "1") - } - - return append(args, "origin", ref) -} - -// Check if a given git URL supports a shallow git clone, -// i.e. it is a non-HTTP server or a smart HTTP server. -func supportsShallowClone(remoteURL string) bool { - if urlutil.IsURL(remoteURL) { - // Check if the HTTP server is smart - - // Smart servers must correctly respond to a query for the git-upload-pack service - serviceURL := remoteURL + "/info/refs?service=git-upload-pack" - - // Try a HEAD request and fallback to a Get request on error - res, err := http.Head(serviceURL) - if err != nil || res.StatusCode != http.StatusOK { - res, err = http.Get(serviceURL) - if err == nil { - res.Body.Close() - } - if err != nil || res.StatusCode != http.StatusOK { - // request failed - return false - } - } - - if res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { - // Fallback, not a smart server - return false - } - return true - } - // Non-HTTP protocols always support shallow clones - return true -} - -func checkoutGit(root, ref, subdir string) (string, error) { - // Try checking out by ref name first. This will work on branches and sets - // .git/HEAD to the current branch name - if output, err := gitWithinDir(root, "checkout", ref); err != nil { - // If checking out by branch name fails check out the last fetched ref - if _, err2 := gitWithinDir(root, "checkout", "FETCH_HEAD"); err2 != nil { - return "", errors.Wrapf(err, "error checking out %s: %s", ref, output) - } - } - - if subdir != "" { - newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, subdir), root) - if err != nil { - return "", errors.Wrapf(err, "error setting git context, %q not within git root", subdir) - } - - fi, err := os.Stat(newCtx) - if err != nil { - return "", err - } - if !fi.IsDir() { - return "", errors.Errorf("error setting git context, not a directory: %s", newCtx) - } - root = newCtx - } - - return root, nil -} - -func gitWithinDir(dir string, args ...string) ([]byte, error) { - a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} - return git(append(a, args...)...) -} - -func git(args ...string) ([]byte, error) { - return exec.Command("git", args...).CombinedOutput() -} - -// isGitTransport returns true if the provided str is a git transport by inspecting -// the prefix of the string for known protocols used in git. -func isGitTransport(str string) bool { - return urlutil.IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go b/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go deleted file mode 100644 index 442cecad8..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go +++ /dev/null @@ -1,102 +0,0 @@ -package remotecontext // import "github.com/docker/docker/builder/remotecontext" - -import ( - "encoding/hex" - "os" - "strings" - - "github.com/docker/docker/builder" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/pools" - "github.com/pkg/errors" -) - -// NewLazySource creates a new LazyContext. LazyContext defines a hashed build -// context based on a root directory. Individual files are hashed first time -// they are asked. It is not safe to call methods of LazyContext concurrently. -func NewLazySource(root containerfs.ContainerFS) (builder.Source, error) { - return &lazySource{ - root: root, - sums: make(map[string]string), - }, nil -} - -type lazySource struct { - root containerfs.ContainerFS - sums map[string]string -} - -func (c *lazySource) Root() containerfs.ContainerFS { - return c.root -} - -func (c *lazySource) Close() error { - return nil -} - -func (c *lazySource) Hash(path string) (string, error) { - cleanPath, fullPath, err := normalize(path, c.root) - if err != nil { - return "", err - } - - relPath, err := Rel(c.root, fullPath) - if err != nil { - return "", errors.WithStack(convertPathError(err, cleanPath)) - } - - fi, err := os.Lstat(fullPath) - if err != nil { - // Backwards compatibility: a missing file returns a path as hash. - // This is reached in the case of a broken symlink. - return relPath, nil - } - - sum, ok := c.sums[relPath] - if !ok { - sum, err = c.prepareHash(relPath, fi) - if err != nil { - return "", err - } - } - - return sum, nil -} - -func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) { - p := c.root.Join(c.root.Path(), relPath) - h, err := NewFileHash(p, relPath, fi) - if err != nil { - return "", errors.Wrapf(err, "failed to create hash for %s", relPath) - } - if fi.Mode().IsRegular() && fi.Size() > 0 { - f, err := c.root.Open(p) - if err != nil { - return "", errors.Wrapf(err, "failed to open %s", relPath) - } - defer f.Close() - if _, err := pools.Copy(h, f); err != nil { - return "", errors.Wrapf(err, "failed to copy file data for %s", relPath) - } - } - sum := hex.EncodeToString(h.Sum(nil)) - c.sums[relPath] = sum - return sum, nil -} - -// Rel makes a path relative to base path. Same as `filepath.Rel` but can also -// handle UUID paths in windows. -func Rel(basepath containerfs.ContainerFS, targpath string) (string, error) { - // filepath.Rel can't handle UUID paths in windows - if basepath.OS() == "windows" { - pfx := basepath.Path() + `\` - if strings.HasPrefix(targpath, pfx) { - p := strings.TrimPrefix(targpath, pfx) - if p == "" { - p = "." - } - return p, nil - } - } - return basepath.Rel(basepath.Path(), targpath) -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go b/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go deleted file mode 100644 index e8a6210e9..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go +++ /dev/null @@ -1,27 +0,0 @@ -package remotecontext // import "github.com/docker/docker/builder/remotecontext" - -import ( - "mime" - "net/http" -) - -// mimeTypes stores the MIME content type. -var mimeTypes = struct { - TextPlain string - OctetStream string -}{"text/plain", "application/octet-stream"} - -// detectContentType returns a best guess representation of the MIME -// content type for the bytes at c. The value detected by -// http.DetectContentType is guaranteed not be nil, defaulting to -// application/octet-stream when a better guess cannot be made. The -// result of this detection is then run through mime.ParseMediaType() -// which separates the actual MIME string from any parameters. -func detectContentType(c []byte) (string, map[string]string, error) { - ct := http.DetectContentType(c) - contentType, args, err := mime.ParseMediaType(ct) - if err != nil { - return "", nil, err - } - return contentType, args, nil -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/remote.go b/vendor/github.com/docker/docker/builder/remotecontext/remote.go deleted file mode 100644 index 1fb80549b..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/remote.go +++ /dev/null @@ -1,127 +0,0 @@ -package remotecontext // import "github.com/docker/docker/builder/remotecontext" - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "regexp" - - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/ioutils" - "github.com/pkg/errors" -) - -// When downloading remote contexts, limit the amount (in bytes) -// to be read from the response body in order to detect its Content-Type -const maxPreambleLength = 100 - -const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` - -var mimeRe = regexp.MustCompile(acceptableRemoteMIME) - -// downloadRemote context from a url and returns it, along with the parsed content type -func downloadRemote(remoteURL string) (string, io.ReadCloser, error) { - response, err := GetWithStatusError(remoteURL) - if err != nil { - return "", nil, errors.Wrapf(err, "error downloading remote context %s", remoteURL) - } - - contentType, contextReader, err := inspectResponse( - response.Header.Get("Content-Type"), - response.Body, - response.ContentLength) - if err != nil { - response.Body.Close() - return "", nil, errors.Wrapf(err, "error detecting content type for remote %s", remoteURL) - } - - return contentType, ioutils.NewReadCloserWrapper(contextReader, response.Body.Close), nil -} - -// GetWithStatusError does an http.Get() and returns an error if the -// status code is 4xx or 5xx. -func GetWithStatusError(address string) (resp *http.Response, err error) { - if resp, err = http.Get(address); err != nil { - if uerr, ok := err.(*url.Error); ok { - if derr, ok := uerr.Err.(*net.DNSError); ok && !derr.IsTimeout { - return nil, errdefs.NotFound(err) - } - } - return nil, errdefs.System(err) - } - if resp.StatusCode < 400 { - return resp, nil - } - msg := fmt.Sprintf("failed to GET %s with status %s", address, resp.Status) - body, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return nil, errdefs.System(errors.New(msg + ": error reading body")) - } - - msg += ": " + string(bytes.TrimSpace(body)) - switch resp.StatusCode { - case http.StatusNotFound: - return nil, errdefs.NotFound(errors.New(msg)) - case http.StatusBadRequest: - return nil, errdefs.InvalidParameter(errors.New(msg)) - case http.StatusUnauthorized: - return nil, errdefs.Unauthorized(errors.New(msg)) - case http.StatusForbidden: - return nil, errdefs.Forbidden(errors.New(msg)) - } - return nil, errdefs.Unknown(errors.New(msg)) -} - -// inspectResponse looks into the http response data at r to determine whether its -// content-type is on the list of acceptable content types for remote build contexts. -// This function returns: -// - a string representation of the detected content-type -// - an io.Reader for the response body -// - an error value which will be non-nil either when something goes wrong while -// reading bytes from r or when the detected content-type is not acceptable. -func inspectResponse(ct string, r io.Reader, clen int64) (string, io.Reader, error) { - plen := clen - if plen <= 0 || plen > maxPreambleLength { - plen = maxPreambleLength - } - - preamble := make([]byte, plen) - rlen, err := r.Read(preamble) - if rlen == 0 { - return ct, r, errors.New("empty response") - } - if err != nil && err != io.EOF { - return ct, r, err - } - - preambleR := bytes.NewReader(preamble[:rlen]) - bodyReader := io.MultiReader(preambleR, r) - // Some web servers will use application/octet-stream as the default - // content type for files without an extension (e.g. 'Dockerfile') - // so if we receive this value we better check for text content - contentType := ct - if len(ct) == 0 || ct == mimeTypes.OctetStream { - contentType, _, err = detectContentType(preamble) - if err != nil { - return contentType, bodyReader, err - } - } - - contentType = selectAcceptableMIME(contentType) - var cterr error - if len(contentType) == 0 { - cterr = fmt.Errorf("unsupported Content-Type %q", ct) - contentType = ct - } - - return contentType, bodyReader, cterr -} - -func selectAcceptableMIME(ct string) string { - return mimeRe.FindString(ct) -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go deleted file mode 100644 index b809cfb78..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go +++ /dev/null @@ -1,157 +0,0 @@ -package remotecontext // import "github.com/docker/docker/builder/remotecontext" - -import ( - "os" - "sync" - - "github.com/docker/docker/pkg/containerfs" - iradix "github.com/hashicorp/go-immutable-radix" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil" -) - -type hashed interface { - Digest() digest.Digest -} - -// CachableSource is a source that contains cache records for its contents -type CachableSource struct { - mu sync.Mutex - root containerfs.ContainerFS - tree *iradix.Tree - txn *iradix.Txn -} - -// NewCachableSource creates new CachableSource -func NewCachableSource(root string) *CachableSource { - ts := &CachableSource{ - tree: iradix.New(), - root: containerfs.NewLocalContainerFS(root), - } - return ts -} - -// MarshalBinary marshals current cache information to a byte array -func (cs *CachableSource) MarshalBinary() ([]byte, error) { - b := TarsumBackup{Hashes: make(map[string]string)} - root := cs.getRoot() - root.Walk(func(k []byte, v interface{}) bool { - b.Hashes[string(k)] = v.(*fileInfo).sum - return false - }) - return b.Marshal() -} - -// UnmarshalBinary decodes cache information for presented byte array -func (cs *CachableSource) UnmarshalBinary(data []byte) error { - var b TarsumBackup - if err := b.Unmarshal(data); err != nil { - return err - } - txn := iradix.New().Txn() - for p, v := range b.Hashes { - txn.Insert([]byte(p), &fileInfo{sum: v}) - } - cs.mu.Lock() - defer cs.mu.Unlock() - cs.tree = txn.Commit() - return nil -} - -// Scan rescans the cache information from the file system -func (cs *CachableSource) Scan() error { - lc, err := NewLazySource(cs.root) - if err != nil { - return err - } - txn := iradix.New().Txn() - err = cs.root.Walk(cs.root.Path(), func(path string, info os.FileInfo, err error) error { - if err != nil { - return errors.Wrapf(err, "failed to walk %s", path) - } - rel, err := Rel(cs.root, path) - if err != nil { - return err - } - h, err := lc.Hash(rel) - if err != nil { - return err - } - txn.Insert([]byte(rel), &fileInfo{sum: h}) - return nil - }) - if err != nil { - return err - } - cs.mu.Lock() - defer cs.mu.Unlock() - cs.tree = txn.Commit() - return nil -} - -// HandleChange notifies the source about a modification operation -func (cs *CachableSource) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { - cs.mu.Lock() - if cs.txn == nil { - cs.txn = cs.tree.Txn() - } - if kind == fsutil.ChangeKindDelete { - cs.txn.Delete([]byte(p)) - cs.mu.Unlock() - return - } - - h, ok := fi.(hashed) - if !ok { - cs.mu.Unlock() - return errors.Errorf("invalid fileinfo: %s", p) - } - - hfi := &fileInfo{ - sum: h.Digest().Hex(), - } - cs.txn.Insert([]byte(p), hfi) - cs.mu.Unlock() - return nil -} - -func (cs *CachableSource) getRoot() *iradix.Node { - cs.mu.Lock() - if cs.txn != nil { - cs.tree = cs.txn.Commit() - cs.txn = nil - } - t := cs.tree - cs.mu.Unlock() - return t.Root() -} - -// Close closes the source -func (cs *CachableSource) Close() error { - return nil -} - -// Hash returns a hash for a single file in the source -func (cs *CachableSource) Hash(path string) (string, error) { - n := cs.getRoot() - // TODO: check this for symlinks - v, ok := n.Get([]byte(path)) - if !ok { - return path, nil - } - return v.(*fileInfo).sum, nil -} - -// Root returns a root directory for the source -func (cs *CachableSource) Root() containerfs.ContainerFS { - return cs.root -} - -type fileInfo struct { - sum string -} - -func (fi *fileInfo) Hash() string { - return fi.sum -} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go deleted file mode 100644 index 1d23bbe65..000000000 --- a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go +++ /dev/null @@ -1,525 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: tarsum.proto -// DO NOT EDIT! - -/* -Package remotecontext is a generated protocol buffer package. - -It is generated from these files: - tarsum.proto - -It has these top-level messages: - TarsumBackup -*/ -package remotecontext - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type TarsumBackup struct { - Hashes map[string]string `protobuf:"bytes,1,rep,name=Hashes" json:"Hashes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *TarsumBackup) Reset() { *m = TarsumBackup{} } -func (*TarsumBackup) ProtoMessage() {} -func (*TarsumBackup) Descriptor() ([]byte, []int) { return fileDescriptorTarsum, []int{0} } - -func (m *TarsumBackup) GetHashes() map[string]string { - if m != nil { - return m.Hashes - } - return nil -} - -func init() { - proto.RegisterType((*TarsumBackup)(nil), "remotecontext.TarsumBackup") -} -func (this *TarsumBackup) Equal(that interface{}) bool { - if that == nil { - if this == nil { - return true - } - return false - } - - that1, ok := that.(*TarsumBackup) - if !ok { - that2, ok := that.(TarsumBackup) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - if this == nil { - return true - } - return false - } else if this == nil { - return false - } - if len(this.Hashes) != len(that1.Hashes) { - return false - } - for i := range this.Hashes { - if this.Hashes[i] != that1.Hashes[i] { - return false - } - } - return true -} -func (this *TarsumBackup) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&remotecontext.TarsumBackup{") - keysForHashes := make([]string, 0, len(this.Hashes)) - for k := range this.Hashes { - keysForHashes = append(keysForHashes, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) - mapStringForHashes := "map[string]string{" - for _, k := range keysForHashes { - mapStringForHashes += fmt.Sprintf("%#v: %#v,", k, this.Hashes[k]) - } - mapStringForHashes += "}" - if this.Hashes != nil { - s = append(s, "Hashes: "+mapStringForHashes+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringTarsum(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *TarsumBackup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Hashes) > 0 { - for k := range m.Hashes { - dAtA[i] = 0xa - i++ - v := m.Hashes[k] - mapSize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) - i = encodeVarintTarsum(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintTarsum(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintTarsum(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil -} - -func encodeFixed64Tarsum(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Tarsum(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintTarsum(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *TarsumBackup) Size() (n int) { - var l int - _ = l - if len(m.Hashes) > 0 { - for k, v := range m.Hashes { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) - n += mapEntrySize + 1 + sovTarsum(uint64(mapEntrySize)) - } - } - return n -} - -func sovTarsum(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozTarsum(x uint64) (n int) { - return sovTarsum(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *TarsumBackup) String() string { - if this == nil { - return "nil" - } - keysForHashes := make([]string, 0, len(this.Hashes)) - for k := range this.Hashes { - keysForHashes = append(keysForHashes, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) - mapStringForHashes := "map[string]string{" - for _, k := range keysForHashes { - mapStringForHashes += fmt.Sprintf("%v: %v,", k, this.Hashes[k]) - } - mapStringForHashes += "}" - s := strings.Join([]string{`&TarsumBackup{`, - `Hashes:` + mapStringForHashes + `,`, - `}`, - }, "") - return s -} -func valueToStringTarsum(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *TarsumBackup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TarsumBackup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TarsumBackup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hashes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTarsum - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthTarsum - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Hashes == nil { - m.Hashes = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTarsum - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthTarsum - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Hashes[mapkey] = mapvalue - } else { - var mapvalue string - m.Hashes[mapkey] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTarsum(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTarsum - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTarsum(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTarsum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTarsum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTarsum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthTarsum - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTarsum - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipTarsum(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthTarsum = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTarsum = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("tarsum.proto", fileDescriptorTarsum) } - -var fileDescriptorTarsum = []byte{ - // 196 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x49, 0x2c, 0x2a, - 0x2e, 0xcd, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2d, 0x4a, 0xcd, 0xcd, 0x2f, 0x49, - 0x4d, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0x51, 0xea, 0x62, 0xe4, 0xe2, 0x09, 0x01, 0xcb, 0x3b, - 0x25, 0x26, 0x67, 0x97, 0x16, 0x08, 0xd9, 0x73, 0xb1, 0x79, 0x24, 0x16, 0x67, 0xa4, 0x16, 0x4b, - 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xa9, 0xeb, 0xa1, 0x68, 0xd0, 0x43, 0x56, 0xac, 0x07, 0x51, - 0xe9, 0x9a, 0x57, 0x52, 0x54, 0x19, 0x04, 0xd5, 0x26, 0x65, 0xc9, 0xc5, 0x8d, 0x24, 0x2c, 0x24, - 0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x62, 0x0a, 0x89, - 0x70, 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b, - 0x46, 0x27, 0x9d, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xb1, - 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, - 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, - 0xc7, 0x90, 0xc4, 0x06, 0xf6, 0x90, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x89, 0x57, 0x7d, 0x3f, - 0xe0, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/docker/docker/cli/cobra.go b/vendor/github.com/docker/docker/cli/cobra.go deleted file mode 100644 index 8ed1fddc0..000000000 --- a/vendor/github.com/docker/docker/cli/cobra.go +++ /dev/null @@ -1,131 +0,0 @@ -package cli // import "github.com/docker/docker/cli" - -import ( - "fmt" - - "github.com/docker/docker/pkg/term" - "github.com/spf13/cobra" -) - -// SetupRootCommand sets default usage, help, and error handling for the -// root command. -func SetupRootCommand(rootCmd *cobra.Command) { - cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) - cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) - cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) - cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) - cobra.AddTemplateFunc("wrappedFlagUsages", wrappedFlagUsages) - - rootCmd.SetUsageTemplate(usageTemplate) - rootCmd.SetHelpTemplate(helpTemplate) - rootCmd.SetFlagErrorFunc(FlagErrorFunc) - rootCmd.SetVersionTemplate("Docker version {{.Version}}\n") - - rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") - rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") -} - -// FlagErrorFunc prints an error message which matches the format of the -// docker/docker/cli error messages -func FlagErrorFunc(cmd *cobra.Command, err error) error { - if err == nil { - return nil - } - - usage := "" - if cmd.HasSubCommands() { - usage = "\n\n" + cmd.UsageString() - } - return StatusError{ - Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), - StatusCode: 125, - } -} - -func hasSubCommands(cmd *cobra.Command) bool { - return len(operationSubCommands(cmd)) > 0 -} - -func hasManagementSubCommands(cmd *cobra.Command) bool { - return len(managementSubCommands(cmd)) > 0 -} - -func operationSubCommands(cmd *cobra.Command) []*cobra.Command { - var cmds []*cobra.Command - for _, sub := range cmd.Commands() { - if sub.IsAvailableCommand() && !sub.HasSubCommands() { - cmds = append(cmds, sub) - } - } - return cmds -} - -func wrappedFlagUsages(cmd *cobra.Command) string { - width := 80 - if ws, err := term.GetWinsize(0); err == nil { - width = int(ws.Width) - } - return cmd.Flags().FlagUsagesWrapped(width - 1) -} - -func managementSubCommands(cmd *cobra.Command) []*cobra.Command { - var cmds []*cobra.Command - for _, sub := range cmd.Commands() { - if sub.IsAvailableCommand() && sub.HasSubCommands() { - cmds = append(cmds, sub) - } - } - return cmds -} - -var usageTemplate = `Usage: - -{{- if not .HasSubCommands}} {{.UseLine}}{{end}} -{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}} - -{{ .Short | trim }} - -{{- if gt .Aliases 0}} - -Aliases: - {{.NameAndAliases}} - -{{- end}} -{{- if .HasExample}} - -Examples: -{{ .Example }} - -{{- end}} -{{- if .HasAvailableFlags}} - -Options: -{{ wrappedFlagUsages . | trimRightSpace}} - -{{- end}} -{{- if hasManagementSubCommands . }} - -Management Commands: - -{{- range managementSubCommands . }} - {{rpad .Name .NamePadding }} {{.Short}} -{{- end}} - -{{- end}} -{{- if hasSubCommands .}} - -Commands: - -{{- range operationSubCommands . }} - {{rpad .Name .NamePadding }} {{.Short}} -{{- end}} -{{- end}} - -{{- if .HasSubCommands }} - -Run '{{.CommandPath}} COMMAND --help' for more information on a command. -{{- end}} -` - -var helpTemplate = ` -{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/vendor/github.com/docker/docker/cli/config/configdir.go b/vendor/github.com/docker/docker/cli/config/configdir.go deleted file mode 100644 index 4bef4e104..000000000 --- a/vendor/github.com/docker/docker/cli/config/configdir.go +++ /dev/null @@ -1,25 +0,0 @@ -package config // import "github.com/docker/docker/cli/config" - -import ( - "os" - "path/filepath" - - "github.com/docker/docker/pkg/homedir" -) - -var ( - configDir = os.Getenv("DOCKER_CONFIG") - configFileDir = ".docker" -) - -// Dir returns the path to the configuration directory as specified by the DOCKER_CONFIG environment variable. -// TODO: this was copied from cli/config/configfile and should be removed once cmd/dockerd moves -func Dir() string { - return configDir -} - -func init() { - if configDir == "" { - configDir = filepath.Join(homedir.Get(), configFileDir) - } -} diff --git a/vendor/github.com/docker/docker/cli/debug/debug.go b/vendor/github.com/docker/docker/cli/debug/debug.go deleted file mode 100644 index 2303e15c9..000000000 --- a/vendor/github.com/docker/docker/cli/debug/debug.go +++ /dev/null @@ -1,26 +0,0 @@ -package debug // import "github.com/docker/docker/cli/debug" - -import ( - "os" - - "github.com/sirupsen/logrus" -) - -// Enable sets the DEBUG env var to true -// and makes the logger to log at debug level. -func Enable() { - os.Setenv("DEBUG", "1") - logrus.SetLevel(logrus.DebugLevel) -} - -// Disable sets the DEBUG env var to false -// and makes the logger to log at info level. -func Disable() { - os.Setenv("DEBUG", "") - logrus.SetLevel(logrus.InfoLevel) -} - -// IsEnabled checks whether the debug flag is set or not. -func IsEnabled() bool { - return os.Getenv("DEBUG") != "" -} diff --git a/vendor/github.com/docker/docker/cli/error.go b/vendor/github.com/docker/docker/cli/error.go deleted file mode 100644 index ea7c0eb50..000000000 --- a/vendor/github.com/docker/docker/cli/error.go +++ /dev/null @@ -1,33 +0,0 @@ -package cli // import "github.com/docker/docker/cli" - -import ( - "fmt" - "strings" -) - -// Errors is a list of errors. -// Useful in a loop if you don't want to return the error right away and you want to display after the loop, -// all the errors that happened during the loop. -type Errors []error - -func (errList Errors) Error() string { - if len(errList) < 1 { - return "" - } - - out := make([]string, len(errList)) - for i := range errList { - out[i] = errList[i].Error() - } - return strings.Join(out, ", ") -} - -// StatusError reports an unsuccessful exit by a command. -type StatusError struct { - Status string - StatusCode int -} - -func (e StatusError) Error() string { - return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) -} diff --git a/vendor/github.com/docker/docker/cli/required.go b/vendor/github.com/docker/docker/cli/required.go deleted file mode 100644 index e1ff02d2e..000000000 --- a/vendor/github.com/docker/docker/cli/required.go +++ /dev/null @@ -1,27 +0,0 @@ -package cli // import "github.com/docker/docker/cli" - -import ( - "strings" - - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -// NoArgs validates args and returns an error if there are any args -func NoArgs(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return nil - } - - if cmd.HasSubCommands() { - return errors.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) - } - - return errors.Errorf( - "\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) -} diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go deleted file mode 100644 index c4772a04e..000000000 --- a/vendor/github.com/docker/docker/client/build_prune.go +++ /dev/null @@ -1,30 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" -) - -// BuildCachePrune requests the daemon to delete unused cache data -func (cli *Client) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) { - if err := cli.NewVersionError("1.31", "build prune"); err != nil { - return nil, err - } - - report := types.BuildCachePruneReport{} - - serverResp, err := cli.post(ctx, "/build/prune", nil, nil, nil) - if err != nil { - return nil, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return nil, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return &report, nil -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go deleted file mode 100644 index 921024fe4..000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_create.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types" -) - -// CheckpointCreate creates a checkpoint from the given container with the given name -func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { - resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go deleted file mode 100644 index 54f55fa76..000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// CheckpointDelete deletes the checkpoint with the given name from the given container -func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { - query := url.Values{} - if options.CheckpointDir != "" { - query.Set("dir", options.CheckpointDir) - } - - resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go deleted file mode 100644 index 2b73fb553..000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" -) - -// CheckpointList returns the checkpoints of the given container in the docker host -func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { - var checkpoints []types.Checkpoint - - query := url.Values{} - if options.CheckpointDir != "" { - query.Set("dir", options.CheckpointDir) - } - - resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) - if err != nil { - return checkpoints, wrapResponseError(err, resp, "container", container) - } - - err = json.NewDecoder(resp.body).Decode(&checkpoints) - ensureReaderClosed(resp) - return checkpoints, err -} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go deleted file mode 100644 index b874b3b52..000000000 --- a/vendor/github.com/docker/docker/client/client.go +++ /dev/null @@ -1,402 +0,0 @@ -/* -Package client is a Go client for the Docker Engine API. - -For more information about the Engine API, see the documentation: -https://docs.docker.com/engine/reference/api/ - -Usage - -You use the library by creating a client object and calling methods on it. The -client can be created either from environment variables with NewEnvClient, or -configured manually with NewClient. - -For example, to list running containers (the equivalent of "docker ps"): - - package main - - import ( - "context" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" - ) - - func main() { - cli, err := client.NewEnvClient() - if err != nil { - panic(err) - } - - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) - if err != nil { - panic(err) - } - - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) - } - } - -*/ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "fmt" - "net" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "strings" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/pkg/errors" -) - -// ErrRedirect is the error returned by checkRedirect when the request is non-GET. -var ErrRedirect = errors.New("unexpected redirect in response") - -// Client is the API client that performs all operations -// against a docker server. -type Client struct { - // scheme sets the scheme for the client - scheme string - // host holds the server address to connect to - host string - // proto holds the client protocol i.e. unix. - proto string - // addr holds the client address. - addr string - // basePath holds the path to prepend to the requests. - basePath string - // client used to send and receive http requests. - client *http.Client - // version of the server to talk to. - version string - // custom http headers configured by users. - customHTTPHeaders map[string]string - // manualOverride is set to true when the version was set by users. - manualOverride bool -} - -// CheckRedirect specifies the policy for dealing with redirect responses: -// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. -// -// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . -// The Docker client (and by extension docker API client) can be made to to send a request -// like POST /containers//start where what would normally be in the name section of the URL is empty. -// This triggers an HTTP 301 from the daemon. -// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. -// This behavior change manifests in the client in that before the 301 was not followed and -// the client did not generate an error, but now results in a message like Error response from daemon: page not found. -func CheckRedirect(req *http.Request, via []*http.Request) error { - if via[0].Method == http.MethodGet { - return http.ErrUseLastResponse - } - return ErrRedirect -} - -// NewEnvClient initializes a new API client based on environment variables. -// See FromEnv for a list of support environment variables. -// -// Deprecated: use NewClientWithOpts(FromEnv) -func NewEnvClient() (*Client, error) { - return NewClientWithOpts(FromEnv) -} - -// FromEnv configures the client with values from environment variables. -// -// Supported environment variables: -// DOCKER_HOST to set the url to the docker server. -// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. -// DOCKER_CERT_PATH to load the TLS certificates from. -// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. -func FromEnv(c *Client) error { - if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { - options := tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", - } - tlsc, err := tlsconfig.Client(options) - if err != nil { - return err - } - - c.client = &http.Client{ - Transport: &http.Transport{TLSClientConfig: tlsc}, - CheckRedirect: CheckRedirect, - } - } - - if host := os.Getenv("DOCKER_HOST"); host != "" { - if err := WithHost(host)(c); err != nil { - return err - } - } - - if version := os.Getenv("DOCKER_API_VERSION"); version != "" { - c.version = version - c.manualOverride = true - } - return nil -} - -// WithTLSClientConfig applies a tls config to the client transport. -func WithTLSClientConfig(cacertPath, certPath, keyPath string) func(*Client) error { - return func(c *Client) error { - opts := tlsconfig.Options{ - CAFile: cacertPath, - CertFile: certPath, - KeyFile: keyPath, - ExclusiveRootPools: true, - } - config, err := tlsconfig.Client(opts) - if err != nil { - return errors.Wrap(err, "failed to create tls config") - } - if transport, ok := c.client.Transport.(*http.Transport); ok { - transport.TLSClientConfig = config - return nil - } - return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) - } -} - -// WithDialer applies the dialer.DialContext to the client transport. This can be -// used to set the Timeout and KeepAlive settings of the client. -func WithDialer(dialer *net.Dialer) func(*Client) error { - return func(c *Client) error { - if transport, ok := c.client.Transport.(*http.Transport); ok { - transport.DialContext = dialer.DialContext - return nil - } - return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) - } -} - -// WithVersion overrides the client version with the specified one -func WithVersion(version string) func(*Client) error { - return func(c *Client) error { - c.version = version - return nil - } -} - -// WithHost overrides the client host with the specified one. -func WithHost(host string) func(*Client) error { - return func(c *Client) error { - hostURL, err := ParseHostURL(host) - if err != nil { - return err - } - c.host = host - c.proto = hostURL.Scheme - c.addr = hostURL.Host - c.basePath = hostURL.Path - if transport, ok := c.client.Transport.(*http.Transport); ok { - return sockets.ConfigureTransport(transport, c.proto, c.addr) - } - return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) - } -} - -// WithHTTPClient overrides the client http client with the specified one -func WithHTTPClient(client *http.Client) func(*Client) error { - return func(c *Client) error { - if client != nil { - c.client = client - } - return nil - } -} - -// WithHTTPHeaders overrides the client default http headers -func WithHTTPHeaders(headers map[string]string) func(*Client) error { - return func(c *Client) error { - c.customHTTPHeaders = headers - return nil - } -} - -// NewClientWithOpts initializes a new API client with default values. It takes functors -// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))` -// It also initializes the custom http headers to add to each request. -// -// It won't send any version information if the version number is empty. It is -// highly recommended that you set a version or your client may break if the -// server is upgraded. -func NewClientWithOpts(ops ...func(*Client) error) (*Client, error) { - client, err := defaultHTTPClient(DefaultDockerHost) - if err != nil { - return nil, err - } - c := &Client{ - host: DefaultDockerHost, - version: api.DefaultVersion, - scheme: "http", - client: client, - proto: defaultProto, - addr: defaultAddr, - } - - for _, op := range ops { - if err := op(c); err != nil { - return nil, err - } - } - - if _, ok := c.client.Transport.(http.RoundTripper); !ok { - return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", c.client.Transport) - } - tlsConfig := resolveTLSConfig(c.client.Transport) - if tlsConfig != nil { - // TODO(stevvooe): This isn't really the right way to write clients in Go. - // `NewClient` should probably only take an `*http.Client` and work from there. - // Unfortunately, the model of having a host-ish/url-thingy as the connection - // string has us confusing protocol and transport layers. We continue doing - // this to avoid breaking existing clients but this should be addressed. - c.scheme = "https" - } - - return c, nil -} - -func defaultHTTPClient(host string) (*http.Client, error) { - url, err := ParseHostURL(host) - if err != nil { - return nil, err - } - transport := new(http.Transport) - sockets.ConfigureTransport(transport, url.Scheme, url.Host) - return &http.Client{ - Transport: transport, - CheckRedirect: CheckRedirect, - }, nil -} - -// NewClient initializes a new API client for the given host and API version. -// It uses the given http client as transport. -// It also initializes the custom http headers to add to each request. -// -// It won't send any version information if the version number is empty. It is -// highly recommended that you set a version or your client may break if the -// server is upgraded. -// Deprecated: use NewClientWithOpts -func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { - return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) -} - -// Close the transport used by the client -func (cli *Client) Close() error { - if t, ok := cli.client.Transport.(*http.Transport); ok { - t.CloseIdleConnections() - } - return nil -} - -// getAPIPath returns the versioned request path to call the api. -// It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(p string, query url.Values) string { - var apiPath string - if cli.version != "" { - v := strings.TrimPrefix(cli.version, "v") - apiPath = path.Join(cli.basePath, "/v"+v, p) - } else { - apiPath = path.Join(cli.basePath, p) - } - return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() -} - -// ClientVersion returns the API version used by this client. -func (cli *Client) ClientVersion() string { - return cli.version -} - -// NegotiateAPIVersion queries the API and updates the version to match the -// API version. Any errors are silently ignored. -func (cli *Client) NegotiateAPIVersion(ctx context.Context) { - ping, _ := cli.Ping(ctx) - cli.NegotiateAPIVersionPing(ping) -} - -// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion -// if the ping version is less than the default version. -func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { - if cli.manualOverride { - return - } - - // try the latest version before versioning headers existed - if p.APIVersion == "" { - p.APIVersion = "1.24" - } - - // if the client is not initialized with a version, start with the latest supported version - if cli.version == "" { - cli.version = api.DefaultVersion - } - - // if server version is lower than the client version, downgrade - if versions.LessThan(p.APIVersion, cli.version) { - cli.version = p.APIVersion - } -} - -// DaemonHost returns the host address used by the client -func (cli *Client) DaemonHost() string { - return cli.host -} - -// HTTPClient returns a copy of the HTTP client bound to the server -func (cli *Client) HTTPClient() *http.Client { - return &*cli.client -} - -// ParseHostURL parses a url string, validates the string is a host url, and -// returns the parsed URL -func ParseHostURL(host string) (*url.URL, error) { - protoAddrParts := strings.SplitN(host, "://", 2) - if len(protoAddrParts) == 1 { - return nil, fmt.Errorf("unable to parse docker host `%s`", host) - } - - var basePath string - proto, addr := protoAddrParts[0], protoAddrParts[1] - if proto == "tcp" { - parsed, err := url.Parse("tcp://" + addr) - if err != nil { - return nil, err - } - addr = parsed.Host - basePath = parsed.Path - } - return &url.URL{ - Scheme: proto, - Host: addr, - Path: basePath, - }, nil -} - -// CustomHTTPHeaders returns the custom http headers stored by the client. -func (cli *Client) CustomHTTPHeaders() map[string]string { - m := make(map[string]string) - for k, v := range cli.customHTTPHeaders { - m[k] = v - } - return m -} - -// SetCustomHTTPHeaders that will be set on every HTTP request made by the client. -// Deprecated: use WithHTTPHeaders when creating the client. -func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { - cli.customHTTPHeaders = headers -} diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go deleted file mode 100644 index 3d24470ba..000000000 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build linux freebsd openbsd darwin - -package client // import "github.com/docker/docker/client" - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "unix:///var/run/docker.sock" - -const defaultProto = "unix" -const defaultAddr = "/var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go deleted file mode 100644 index c649e5441..000000000 --- a/vendor/github.com/docker/docker/client/client_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package client // import "github.com/docker/docker/client" - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "npipe:////./pipe/docker_engine" - -const defaultProto = "npipe" -const defaultAddr = "//./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go deleted file mode 100644 index c8b802ad3..000000000 --- a/vendor/github.com/docker/docker/client/config_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// ConfigCreate creates a new Config. -func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { - var response types.ConfigCreateResponse - if err := cli.NewVersionError("1.30", "config create"); err != nil { - return response, err - } - resp, err := cli.post(ctx, "/configs/create", nil, config, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go deleted file mode 100644 index 4ac566ad8..000000000 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types/swarm" -) - -// ConfigInspectWithRaw returns the config information with raw data -func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { - if id == "" { - return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} - } - if err := cli.NewVersionError("1.30", "config inspect"); err != nil { - return swarm.Config{}, nil, err - } - resp, err := cli.get(ctx, "/configs/"+id, nil, nil) - if err != nil { - return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return swarm.Config{}, nil, err - } - - var config swarm.Config - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&config) - - return config, body, err -} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go deleted file mode 100644 index 2b9d54606..000000000 --- a/vendor/github.com/docker/docker/client/config_list.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// ConfigList returns the list of configs. -func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { - if err := cli.NewVersionError("1.30", "config list"); err != nil { - return nil, err - } - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/configs", query, nil) - if err != nil { - return nil, err - } - - var configs []swarm.Config - err = json.NewDecoder(resp.body).Decode(&configs) - ensureReaderClosed(resp) - return configs, err -} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go deleted file mode 100644 index a96871e98..000000000 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ /dev/null @@ -1,13 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ConfigRemove removes a Config. -func (cli *Client) ConfigRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.30", "config remove"); err != nil { - return err - } - resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "config", id) -} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go deleted file mode 100644 index 39e59cf85..000000000 --- a/vendor/github.com/docker/docker/client/config_update.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" -) - -// ConfigUpdate attempts to update a Config -func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { - if err := cli.NewVersionError("1.30", "config update"); err != nil { - return err - } - query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go deleted file mode 100644 index 88ba1ef63..000000000 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ /dev/null @@ -1,57 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerAttach attaches a connection to a container in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -// -// The stream format on the response will be in one of two formats: -// -// If the container is using a TTY, there is only a single stream (stdout), and -// data is copied directly from the container output stream, no extra -// multiplexing or headers. -// -// If the container is *not* using a TTY, streams for stdout and stderr are -// multiplexed. -// The format of the multiplexed stream is as follows: -// -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} -// -// STREAM_TYPE can be 1 for stdout and 2 for stderr -// -// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. -// This is the size of OUTPUT. -// -// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this -// stream. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { - query := url.Values{} - if options.Stream { - query.Set("stream", "1") - } - if options.Stdin { - query.Set("stdin", "1") - } - if options.Stdout { - query.Set("stdout", "1") - } - if options.Stderr { - query.Set("stderr", "1") - } - if options.DetachKeys != "" { - query.Set("detachKeys", options.DetachKeys) - } - if options.Logs { - query.Set("logs", "1") - } - - headers := map[string][]string{"Content-Type": {"text/plain"}} - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) -} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go deleted file mode 100644 index 377a2ea68..000000000 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ /dev/null @@ -1,55 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "errors" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ContainerCommit applies changes into a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { - var repository, tag string - if options.Reference != "" { - ref, err := reference.ParseNormalizedNamed(options.Reference) - if err != nil { - return types.IDResponse{}, err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") - } - ref = reference.TagNameOnly(ref) - - if tagged, ok := ref.(reference.Tagged); ok { - tag = tagged.Tag() - } - repository = reference.FamiliarName(ref) - } - - query := url.Values{} - query.Set("container", container) - query.Set("repo", repository) - query.Set("tag", tag) - query.Set("comment", options.Comment) - query.Set("author", options.Author) - for _, change := range options.Changes { - query.Add("changes", change) - } - if !options.Pause { - query.Set("pause", "0") - } - - var response types.IDResponse - resp, err := cli.post(ctx, "/commit", query, options.Config, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go deleted file mode 100644 index d706260ce..000000000 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ /dev/null @@ -1,101 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "path/filepath" - "strings" - - "github.com/docker/docker/api/types" -) - -// ContainerStatPath returns Stat information about a path inside the container filesystem. -func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { - query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - - urlStr := "/containers/" + containerID + "/archive" - response, err := cli.head(ctx, urlStr, query, nil) - if err != nil { - return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path) - } - defer ensureReaderClosed(response) - return getContainerPathStatFromHeader(response.header) -} - -// CopyToContainer copies content into the container filesystem. -// Note that `content` must be a Reader for a TAR archive -func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { - query := url.Values{} - query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. - // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. - if !options.AllowOverwriteDirWithFile { - query.Set("noOverwriteDirNonDir", "true") - } - - if options.CopyUIDGID { - query.Set("copyUIDGID", "true") - } - - apiPath := "/containers/" + containerID + "/archive" - - response, err := cli.putRaw(ctx, apiPath, query, content, nil) - if err != nil { - return wrapResponseError(err, response, "container:path", containerID+":"+dstPath) - } - defer ensureReaderClosed(response) - - if response.statusCode != http.StatusOK { - return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) - } - - return nil -} - -// CopyFromContainer gets the content from the container and returns it as a Reader -// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { - query := make(url.Values, 1) - query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - - apiPath := "/containers/" + containerID + "/archive" - response, err := cli.get(ctx, apiPath, query, nil) - if err != nil { - return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath) - } - - if response.statusCode != http.StatusOK { - return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) - } - - // In order to get the copy behavior right, we need to know information - // about both the source and the destination. The response headers include - // stat info about the source that we can use in deciding exactly how to - // copy it locally. Along with the stat info about the local destination, - // we have everything we need to handle the multiple possibilities there - // can be when copying a file/dir from one location to another file/dir. - stat, err := getContainerPathStatFromHeader(response.header) - if err != nil { - return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) - } - return response.body, stat, err -} - -func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { - var stat types.ContainerPathStat - - encodedStat := header.Get("X-Docker-Container-Path-Stat") - statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) - - err := json.NewDecoder(statDecoder).Decode(&stat) - if err != nil { - err = fmt.Errorf("unable to decode container path stat header: %s", err) - } - - return stat, err -} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go deleted file mode 100644 index d269a6189..000000000 --- a/vendor/github.com/docker/docker/client/container_create.go +++ /dev/null @@ -1,56 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strings" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/versions" -) - -type configWrapper struct { - *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig -} - -// ContainerCreate creates a new container based in the given configuration. -// It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { - var response container.ContainerCreateCreatedBody - - if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { - return response, err - } - - // When using API 1.24 and under, the client is responsible for removing the container - if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { - hostConfig.AutoRemove = false - } - - query := url.Values{} - if containerName != "" { - query.Set("name", containerName) - } - - body := configWrapper{ - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - } - - serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) - if err != nil { - if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { - return response, objectNotFoundError{object: "image", id: config.Image} - } - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go deleted file mode 100644 index 3b7c90c96..000000000 --- a/vendor/github.com/docker/docker/client/container_diff.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/container" -) - -// ContainerDiff shows differences in a container filesystem since it was started. -func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) { - var changes []container.ContainerChangeResponseItem - - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) - if err != nil { - return changes, err - } - - err = json.NewDecoder(serverResp.body).Decode(&changes) - ensureReaderClosed(serverResp) - return changes, err -} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go deleted file mode 100644 index 535536b1e..000000000 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ /dev/null @@ -1,54 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { - var response types.IDResponse - - if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { - return response, err - } - - resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) - if err != nil { - return response, err - } - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} - -// ContainerExecStart starts an exec process already created in the docker host. -func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { - resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) - ensureReaderClosed(resp) - return err -} - -// ContainerExecAttach attaches a connection to an exec process in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { - headers := map[string][]string{"Content-Type": {"application/json"}} - return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) -} - -// ContainerExecInspect returns information about a specific exec process on the docker host. -func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { - var response types.ContainerExecInspect - resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go deleted file mode 100644 index d0c0a5cba..000000000 --- a/vendor/github.com/docker/docker/client/container_export.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" -) - -// ContainerExport retrieves the raw contents of a container -// and returns them as an io.ReadCloser. It's up to the caller -// to close the stream. -func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) - if err != nil { - return nil, err - } - - return serverResp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go deleted file mode 100644 index f453064cf..000000000 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ /dev/null @@ -1,53 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerInspect returns the container information. -func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { - if containerID == "" { - return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} - } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) - if err != nil { - return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID) - } - - var response types.ContainerJSON - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} - -// ContainerInspectWithRaw returns the container information and its raw representation. -func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { - if containerID == "" { - return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} - } - query := url.Values{} - if getSize { - query.Set("size", "1") - } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) - if err != nil { - return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return types.ContainerJSON{}, nil, err - } - - var response types.ContainerJSON - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go deleted file mode 100644 index 4d6f1d23d..000000000 --- a/vendor/github.com/docker/docker/client/container_kill.go +++ /dev/null @@ -1,16 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// ContainerKill terminates the container process but does not remove the container from the docker host. -func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { - query := url.Values{} - query.Set("signal", signal) - - resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go deleted file mode 100644 index 9c218e221..000000000 --- a/vendor/github.com/docker/docker/client/container_list.go +++ /dev/null @@ -1,56 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { - query := url.Values{} - - if options.All { - query.Set("all", "1") - } - - if options.Limit != -1 { - query.Set("limit", strconv.Itoa(options.Limit)) - } - - if options.Since != "" { - query.Set("since", options.Since) - } - - if options.Before != "" { - query.Set("before", options.Before) - } - - if options.Size { - query.Set("size", "1") - } - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/containers/json", query, nil) - if err != nil { - return nil, err - } - - var containers []types.Container - err = json.NewDecoder(resp.body).Decode(&containers) - ensureReaderClosed(resp) - return containers, err -} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go deleted file mode 100644 index 5b6541f03..000000000 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ /dev/null @@ -1,80 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types" - timetypes "github.com/docker/docker/api/types/time" - "github.com/pkg/errors" -) - -// ContainerLogs returns the logs generated by a container in an io.ReadCloser. -// It's up to the caller to close the stream. -// -// The stream format on the response will be in one of two formats: -// -// If the container is using a TTY, there is only a single stream (stdout), and -// data is copied directly from the container output stream, no extra -// multiplexing or headers. -// -// If the container is *not* using a TTY, streams for stdout and stderr are -// multiplexed. -// The format of the multiplexed stream is as follows: -// -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} -// -// STREAM_TYPE can be 1 for stdout and 2 for stderr -// -// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. -// This is the size of OUTPUT. -// -// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this -// stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "since"`) - } - query.Set("since", ts) - } - - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "until"`) - } - query.Set("until", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) - if err != nil { - return nil, wrapResponseError(err, resp, "container", container) - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go deleted file mode 100644 index 5e7271a37..000000000 --- a/vendor/github.com/docker/docker/client/container_pause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ContainerPause pauses the main process of a given container without terminating it. -func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go deleted file mode 100644 index 14f88d93b..000000000 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// ContainersPrune requests the daemon to delete unused data -func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { - var report types.ContainersPruneReport - - if err := cli.NewVersionError("1.25", "container prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) - if err != nil { - return report, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go deleted file mode 100644 index ab4cfc16f..000000000 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ /dev/null @@ -1,27 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerRemove kills and removes a container from the docker host. -func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { - query := url.Values{} - if options.RemoveVolumes { - query.Set("v", "1") - } - if options.RemoveLinks { - query.Set("link", "1") - } - - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "container", containerID) -} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go deleted file mode 100644 index 240fdf552..000000000 --- a/vendor/github.com/docker/docker/client/container_rename.go +++ /dev/null @@ -1,15 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// ContainerRename changes the name of a given container. -func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { - query := url.Values{} - query.Set("name", newContainerName) - resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go deleted file mode 100644 index a9d4c0c79..000000000 --- a/vendor/github.com/docker/docker/client/container_resize.go +++ /dev/null @@ -1,29 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" -) - -// ContainerResize changes the size of the tty for a container. -func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) -} - -// ContainerExecResize changes the size of the tty for an exec process running inside a container. -func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) -} - -func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { - query := url.Values{} - query.Set("h", strconv.Itoa(int(height))) - query.Set("w", strconv.Itoa(int(width))) - - resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go deleted file mode 100644 index 41e421969..000000000 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "time" - - timetypes "github.com/docker/docker/api/types/time" -) - -// ContainerRestart stops and starts a container again. -// It makes the daemon to wait for the container to be up again for -// a specific amount of time, given the timeout. -func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { - query := url.Values{} - if timeout != nil { - query.Set("t", timetypes.DurationToSecondsString(*timeout)) - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go deleted file mode 100644 index c2e0b15dc..000000000 --- a/vendor/github.com/docker/docker/client/container_start.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerStart sends a request to the docker daemon to start a container. -func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { - query := url.Values{} - if len(options.CheckpointID) != 0 { - query.Set("checkpoint", options.CheckpointID) - } - if len(options.CheckpointDir) != 0 { - query.Set("checkpoint-dir", options.CheckpointDir) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go deleted file mode 100644 index 6ef44c774..000000000 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ContainerStats returns near realtime stats for a given container. -// It's up to the caller to close the io.ReadCloser returned. -func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { - query := url.Values{} - query.Set("stream", "0") - if stream { - query.Set("stream", "1") - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return types.ContainerStats{}, err - } - - osType := getDockerOS(resp.header.Get("Server")) - return types.ContainerStats{Body: resp.body, OSType: osType}, err -} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go deleted file mode 100644 index 629d7ab64..000000000 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "time" - - timetypes "github.com/docker/docker/api/types/time" -) - -// ContainerStop stops a container. In case the container fails to stop -// gracefully within a time frame specified by the timeout argument, -// it is forcefully terminated (killed). -// -// If the timeout is nil, the container's StopTimeout value is used, if set, -// otherwise the engine default. A negative timeout value can be specified, -// meaning no timeout, i.e. no forceful termination is performed. -func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { - query := url.Values{} - if timeout != nil { - query.Set("t", timetypes.DurationToSecondsString(*timeout)) - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go deleted file mode 100644 index 9c9fce7a0..000000000 --- a/vendor/github.com/docker/docker/client/container_top.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strings" - - "github.com/docker/docker/api/types/container" -) - -// ContainerTop shows process information from within a container. -func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { - var response container.ContainerTopOKBody - query := url.Values{} - if len(arguments) > 0 { - query.Set("ps_args", strings.Join(arguments, " ")) - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go deleted file mode 100644 index 1d8f87316..000000000 --- a/vendor/github.com/docker/docker/client/container_unpause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ContainerUnpause resumes the process execution within a container -func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go deleted file mode 100644 index 14e7f23df..000000000 --- a/vendor/github.com/docker/docker/client/container_update.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/container" -) - -// ContainerUpdate updates resources of a container -func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { - var response container.ContainerUpdateOKBody - serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go deleted file mode 100644 index 6ab8c1da9..000000000 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ /dev/null @@ -1,83 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -// ContainerWait waits until the specified container is in a certain state -// indicated by the given condition, either "not-running" (default), -// "next-exit", or "removed". -// -// If this client's API version is before 1.30, condition is ignored and -// ContainerWait will return immediately with the two channels, as the server -// will wait as if the condition were "not-running". -// -// If this client's API version is at least 1.30, ContainerWait blocks until -// the request has been acknowledged by the server (with a response header), -// then returns two channels on which the caller can wait for the exit status -// of the container or an error if there was a problem either beginning the -// wait request or in getting the response. This allows the caller to -// synchronize ContainerWait with other calls, such as specifying a -// "next-exit" condition before issuing a ContainerStart request. -func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { - if versions.LessThan(cli.ClientVersion(), "1.30") { - return cli.legacyContainerWait(ctx, containerID) - } - - resultC := make(chan container.ContainerWaitOKBody) - errC := make(chan error, 1) - - query := url.Values{} - query.Set("condition", string(condition)) - - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) - if err != nil { - defer ensureReaderClosed(resp) - errC <- err - return resultC, errC - } - - go func() { - defer ensureReaderClosed(resp) - var res container.ContainerWaitOKBody - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - errC <- err - return - } - - resultC <- res - }() - - return resultC, errC -} - -// legacyContainerWait returns immediately and doesn't have an option to wait -// until the container is removed. -func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { - resultC := make(chan container.ContainerWaitOKBody) - errC := make(chan error) - - go func() { - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) - if err != nil { - errC <- err - return - } - defer ensureReaderClosed(resp) - - var res container.ContainerWaitOKBody - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - errC <- err - return - } - - resultC <- res - }() - - return resultC, errC -} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go deleted file mode 100644 index 8eb30eb5d..000000000 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" -) - -// DiskUsage requests the current data usage from the daemon -func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { - var du types.DiskUsage - - serverResp, err := cli.get(ctx, "/system/df", nil, nil) - if err != nil { - return du, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { - return du, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return du, nil -} diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go deleted file mode 100644 index 7245bbeed..000000000 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - registrytypes "github.com/docker/docker/api/types/registry" -) - -// DistributionInspect returns the image digest with full Manifest -func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { - // Contact the registry to retrieve digest and platform information - var distributionInspect registrytypes.DistributionInspect - if image == "" { - return distributionInspect, objectNotFoundError{object: "distribution", id: image} - } - - if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { - return distributionInspect, err - } - var headers map[string][]string - - if encodedRegistryAuth != "" { - headers = map[string][]string{ - "X-Registry-Auth": {encodedRegistryAuth}, - } - } - - resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) - if err != nil { - return distributionInspect, err - } - - err = json.NewDecoder(resp.body).Decode(&distributionInspect) - ensureReaderClosed(resp) - return distributionInspect, err -} diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go deleted file mode 100644 index 0461af329..000000000 --- a/vendor/github.com/docker/docker/client/errors.go +++ /dev/null @@ -1,132 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "fmt" - "net/http" - - "github.com/docker/docker/api/types/versions" - "github.com/pkg/errors" -) - -// errConnectionFailed implements an error returned when connection failed. -type errConnectionFailed struct { - host string -} - -// Error returns a string representation of an errConnectionFailed -func (err errConnectionFailed) Error() string { - if err.host == "" { - return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" - } - return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) -} - -// IsErrConnectionFailed returns true if the error is caused by connection failed. -func IsErrConnectionFailed(err error) bool { - _, ok := errors.Cause(err).(errConnectionFailed) - return ok -} - -// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. -func ErrorConnectionFailed(host string) error { - return errConnectionFailed{host: host} -} - -type notFound interface { - error - NotFound() bool // Is the error a NotFound error -} - -// IsErrNotFound returns true if the error is a NotFound error, which is returned -// by the API when some object is not found. -func IsErrNotFound(err error) bool { - te, ok := err.(notFound) - return ok && te.NotFound() -} - -type objectNotFoundError struct { - object string - id string -} - -func (e objectNotFoundError) NotFound() bool { - return true -} - -func (e objectNotFoundError) Error() string { - return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) -} - -func wrapResponseError(err error, resp serverResponse, object, id string) error { - switch { - case err == nil: - return nil - case resp.statusCode == http.StatusNotFound: - return objectNotFoundError{object: object, id: id} - case resp.statusCode == http.StatusNotImplemented: - return notImplementedError{message: err.Error()} - default: - return err - } -} - -// unauthorizedError represents an authorization error in a remote registry. -type unauthorizedError struct { - cause error -} - -// Error returns a string representation of an unauthorizedError -func (u unauthorizedError) Error() string { - return u.cause.Error() -} - -// IsErrUnauthorized returns true if the error is caused -// when a remote registry authentication fails -func IsErrUnauthorized(err error) bool { - _, ok := err.(unauthorizedError) - return ok -} - -type pluginPermissionDenied struct { - name string -} - -func (e pluginPermissionDenied) Error() string { - return "Permission denied while installing plugin " + e.name -} - -// IsErrPluginPermissionDenied returns true if the error is caused -// when a user denies a plugin's permissions -func IsErrPluginPermissionDenied(err error) bool { - _, ok := err.(pluginPermissionDenied) - return ok -} - -type notImplementedError struct { - message string -} - -func (e notImplementedError) Error() string { - return e.message -} - -func (e notImplementedError) NotImplemented() bool { - return true -} - -// IsErrNotImplemented returns true if the error is a NotImplemented error. -// This is returned by the API when a requested feature has not been -// implemented. -func IsErrNotImplemented(err error) bool { - te, ok := err.(notImplementedError) - return ok && te.NotImplemented() -} - -// NewVersionError returns an error if the APIVersion required -// if less than the current supported version -func (cli *Client) NewVersionError(APIrequired, feature string) error { - if cli.version != "" && versions.LessThan(cli.version, APIrequired) { - return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) - } - return nil -} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go deleted file mode 100644 index 6e5653895..000000000 --- a/vendor/github.com/docker/docker/client/events.go +++ /dev/null @@ -1,101 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - timetypes "github.com/docker/docker/api/types/time" -) - -// Events returns a stream of events in the daemon. It's up to the caller to close the stream -// by cancelling the context. Once the stream has been completely read an io.EOF error will -// be sent over the error channel. If an error is sent all processing will be stopped. It's up -// to the caller to reopen the stream in the event of an error by reinvoking this method. -func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { - - messages := make(chan events.Message) - errs := make(chan error, 1) - - started := make(chan struct{}) - go func() { - defer close(errs) - - query, err := buildEventsQueryParams(cli.version, options) - if err != nil { - close(started) - errs <- err - return - } - - resp, err := cli.get(ctx, "/events", query, nil) - if err != nil { - close(started) - errs <- err - return - } - defer resp.body.Close() - - decoder := json.NewDecoder(resp.body) - - close(started) - for { - select { - case <-ctx.Done(): - errs <- ctx.Err() - return - default: - var event events.Message - if err := decoder.Decode(&event); err != nil { - errs <- err - return - } - - select { - case messages <- event: - case <-ctx.Done(): - errs <- ctx.Err() - return - } - } - } - }() - <-started - - return messages, errs -} - -func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { - query := url.Values{} - ref := time.Now() - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, ref) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, ref) - if err != nil { - return nil, err - } - query.Set("until", ts) - } - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) - if err != nil { - return nil, err - } - query.Set("filters", filterJSON) - } - - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go deleted file mode 100644 index 35f5dd86d..000000000 --- a/vendor/github.com/docker/docker/client/hijack.go +++ /dev/null @@ -1,129 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bufio" - "context" - "crypto/tls" - "fmt" - "net" - "net/http" - "net/http/httputil" - "net/url" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/go-connections/sockets" - "github.com/pkg/errors" -) - -// postHijacked sends a POST request and hijacks the connection. -func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { - bodyEncoded, err := encodeData(body) - if err != nil { - return types.HijackedResponse{}, err - } - - apiPath := cli.getAPIPath(path, query) - req, err := http.NewRequest("POST", apiPath, bodyEncoded) - if err != nil { - return types.HijackedResponse{}, err - } - req = cli.addHeaders(req, headers) - - conn, err := cli.setupHijackConn(req, "tcp") - if err != nil { - return types.HijackedResponse{}, err - } - - return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err -} - -func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { - if tlsConfig != nil && proto != "unix" && proto != "npipe" { - return tls.Dial(proto, addr, tlsConfig) - } - if proto == "npipe" { - return sockets.DialPipe(addr, 32*time.Second) - } - return net.Dial(proto, addr) -} - -func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, error) { - req.Host = cli.addr - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", proto) - - conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) - if err != nil { - return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") - } - - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := conn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - clientconn := httputil.NewClientConn(conn, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - resp, err := clientconn.Do(req) - if err != httputil.ErrPersistEOF { - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusSwitchingProtocols { - resp.Body.Close() - return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) - } - } - - c, br := clientconn.Hijack() - if br.Buffered() > 0 { - // If there is buffered content, wrap the connection. We return an - // object that implements CloseWrite iff the underlying connection - // implements it. - if _, ok := c.(types.CloseWriter); ok { - c = &hijackedConnCloseWriter{&hijackedConn{c, br}} - } else { - c = &hijackedConn{c, br} - } - } else { - br.Reset(nil) - } - - return c, nil -} - -// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case -// that a) there was already buffered data in the http layer when Hijack() was -// called, and b) the underlying net.Conn does *not* implement CloseWrite(). -// hijackedConn does not implement CloseWrite() either. -type hijackedConn struct { - net.Conn - r *bufio.Reader -} - -func (c *hijackedConn) Read(b []byte) (int, error) { - return c.r.Read(b) -} - -// hijackedConnCloseWriter is a hijackedConn which additionally implements -// CloseWrite(). It is returned by setupHijackConn in the case that a) there -// was already buffered data in the http layer when Hijack() was called, and b) -// the underlying net.Conn *does* implement CloseWrite(). -type hijackedConnCloseWriter struct { - *hijackedConn -} - -var _ types.CloseWriter = &hijackedConnCloseWriter{} - -func (c *hijackedConnCloseWriter) CloseWrite() error { - conn := c.Conn.(types.CloseWriter) - return conn.CloseWrite() -} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go deleted file mode 100644 index 672146031..000000000 --- a/vendor/github.com/docker/docker/client/image_build.go +++ /dev/null @@ -1,137 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/base64" - "encoding/json" - "io" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" -) - -// ImageBuild sends request to the daemon to build images. -// The Body in the response implement an io.ReadCloser and it's up to the caller to -// close it. -func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { - query, err := cli.imageBuildOptionsToQuery(options) - if err != nil { - return types.ImageBuildResponse{}, err - } - - headers := http.Header(make(map[string][]string)) - buf, err := json.Marshal(options.AuthConfigs) - if err != nil { - return types.ImageBuildResponse{}, err - } - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - - if options.Platform != "" { - if err := cli.NewVersionError("1.32", "platform"); err != nil { - return types.ImageBuildResponse{}, err - } - query.Set("platform", options.Platform) - } - headers.Set("Content-Type", "application/x-tar") - - serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) - if err != nil { - return types.ImageBuildResponse{}, err - } - - osType := getDockerOS(serverResp.header.Get("Server")) - - return types.ImageBuildResponse{ - Body: serverResp.body, - OSType: osType, - }, nil -} - -func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { - query := url.Values{ - "t": options.Tags, - "securityopt": options.SecurityOpt, - "extrahosts": options.ExtraHosts, - } - if options.SuppressOutput { - query.Set("q", "1") - } - if options.RemoteContext != "" { - query.Set("remote", options.RemoteContext) - } - if options.NoCache { - query.Set("nocache", "1") - } - if options.Remove { - query.Set("rm", "1") - } else { - query.Set("rm", "0") - } - - if options.ForceRemove { - query.Set("forcerm", "1") - } - - if options.PullParent { - query.Set("pull", "1") - } - - if options.Squash { - if err := cli.NewVersionError("1.25", "squash"); err != nil { - return query, err - } - query.Set("squash", "1") - } - - if !container.Isolation.IsDefault(options.Isolation) { - query.Set("isolation", string(options.Isolation)) - } - - query.Set("cpusetcpus", options.CPUSetCPUs) - query.Set("networkmode", options.NetworkMode) - query.Set("cpusetmems", options.CPUSetMems) - query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) - query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) - query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) - query.Set("memory", strconv.FormatInt(options.Memory, 10)) - query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) - query.Set("cgroupparent", options.CgroupParent) - query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) - query.Set("dockerfile", options.Dockerfile) - query.Set("target", options.Target) - - ulimitsJSON, err := json.Marshal(options.Ulimits) - if err != nil { - return query, err - } - query.Set("ulimits", string(ulimitsJSON)) - - buildArgsJSON, err := json.Marshal(options.BuildArgs) - if err != nil { - return query, err - } - query.Set("buildargs", string(buildArgsJSON)) - - labelsJSON, err := json.Marshal(options.Labels) - if err != nil { - return query, err - } - query.Set("labels", string(labelsJSON)) - - cacheFromJSON, err := json.Marshal(options.CacheFrom) - if err != nil { - return query, err - } - query.Set("cachefrom", string(cacheFromJSON)) - if options.SessionID != "" { - query.Set("session", options.SessionID) - } - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go deleted file mode 100644 index 239380474..000000000 --- a/vendor/github.com/docker/docker/client/image_create.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ImageCreate creates a new image based in the parent options. -// It returns the JSON content in the response body. -func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(parentReference) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) - query.Set("tag", getAPITagFromNamedRef(ref)) - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/create", query, nil, headers) -} diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go deleted file mode 100644 index 0151b9517..000000000 --- a/vendor/github.com/docker/docker/client/image_history.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/image" -) - -// ImageHistory returns the changes in an image in history format. -func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { - var history []image.HistoryResponseItem - serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) - if err != nil { - return history, err - } - - err = json.NewDecoder(serverResp.body).Decode(&history) - ensureReaderClosed(serverResp) - return history, err -} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go deleted file mode 100644 index c2972ea95..000000000 --- a/vendor/github.com/docker/docker/client/image_import.go +++ /dev/null @@ -1,40 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ImageImport creates a new image based in the source options. -// It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { - if ref != "" { - //Check if the given image name can be resolved - if _, err := reference.ParseNormalizedNamed(ref); err != nil { - return nil, err - } - } - - query := url.Values{} - query.Set("fromSrc", source.SourceName) - query.Set("repo", ref) - query.Set("tag", options.Tag) - query.Set("message", options.Message) - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - for _, change := range options.Changes { - query.Add("changes", change) - } - - resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go deleted file mode 100644 index 2f8f6d2f1..000000000 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types" -) - -// ImageInspectWithRaw returns the image information and its raw representation. -func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { - if imageID == "" { - return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} - } - serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) - if err != nil { - return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return types.ImageInspect{}, nil, err - } - - var response types.ImageInspect - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go deleted file mode 100644 index 32fae27b3..000000000 --- a/vendor/github.com/docker/docker/client/image_list.go +++ /dev/null @@ -1,45 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/versions" -) - -// ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { - var images []types.ImageSummary - query := url.Values{} - - optionFilters := options.Filters - referenceFilters := optionFilters.Get("reference") - if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { - query.Set("filter", referenceFilters[0]) - for _, filterValue := range referenceFilters { - optionFilters.Del("reference", filterValue) - } - } - if optionFilters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) - if err != nil { - return images, err - } - query.Set("filters", filterJSON) - } - if options.All { - query.Set("all", "1") - } - - serverResp, err := cli.get(ctx, "/images/json", query, nil) - if err != nil { - return images, err - } - - err = json.NewDecoder(serverResp.body).Decode(&images) - ensureReaderClosed(serverResp) - return images, err -} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go deleted file mode 100644 index 91016e493..000000000 --- a/vendor/github.com/docker/docker/client/image_load.go +++ /dev/null @@ -1,29 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ImageLoad loads an image in the docker host from the client host. -// It's up to the caller to close the io.ReadCloser in the -// ImageLoadResponse returned by this function. -func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { - v := url.Values{} - v.Set("quiet", "0") - if quiet { - v.Set("quiet", "1") - } - headers := map[string][]string{"Content-Type": {"application/x-tar"}} - resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) - if err != nil { - return types.ImageLoadResponse{}, err - } - return types.ImageLoadResponse{ - Body: resp.body, - JSON: resp.header.Get("Content-Type") == "application/json", - }, nil -} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go deleted file mode 100644 index 78ee3f6c4..000000000 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// ImagesPrune requests the daemon to delete unused data -func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { - var report types.ImagesPruneReport - - if err := cli.NewVersionError("1.25", "image prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) - if err != nil { - return report, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go deleted file mode 100644 index d97aacf8c..000000000 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ /dev/null @@ -1,64 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ImagePull requests the docker host to pull an image from a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -// -// FIXME(vdemeester): there is currently used in a few way in docker/docker -// - if not in trusted content, ref is used to pass the whole reference, and tag is empty -// - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(refStr) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) - if !options.All { - query.Set("tag", getAPITagFromNamedRef(ref)) - } - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -// getAPITagFromNamedRef returns a tag from the specified reference. -// This function is necessary as long as the docker "server" api expects -// digests to be sent as tags and makes a distinction between the name -// and tag/digest part of a reference. -func getAPITagFromNamedRef(ref reference.Named) string { - if digested, ok := ref.(reference.Digested); ok { - return digested.Digest().String() - } - ref = reference.TagNameOnly(ref) - if tagged, ok := ref.(reference.Tagged); ok { - return tagged.Tag() - } - return "" -} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go deleted file mode 100644 index a15871c2b..000000000 --- a/vendor/github.com/docker/docker/client/image_push.go +++ /dev/null @@ -1,55 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "errors" - "io" - "net/http" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ImagePush requests the docker host to push an image to a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(image) - if err != nil { - return nil, err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return nil, errors.New("cannot push a digest reference") - } - - tag := "" - name := reference.FamiliarName(ref) - - if nameTaggedRef, isNamedTagged := ref.(reference.NamedTagged); isNamedTagged { - tag = nameTaggedRef.Tag() - } - - query := url.Values{} - query.Set("tag", tag) - - resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) -} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go deleted file mode 100644 index 45d6e6f0d..000000000 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" -) - -// ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { - query := url.Values{} - - if options.Force { - query.Set("force", "1") - } - if !options.PruneChildren { - query.Set("noprune", "1") - } - - var dels []types.ImageDeleteResponseItem - resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) - if err != nil { - return dels, wrapResponseError(err, resp, "image", imageID) - } - - err = json.NewDecoder(resp.body).Decode(&dels) - ensureReaderClosed(resp) - return dels, err -} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go deleted file mode 100644 index d1314e4b2..000000000 --- a/vendor/github.com/docker/docker/client/image_save.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" -) - -// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. -// It's up to the caller to store the images and close the stream. -func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { - query := url.Values{ - "names": imageIDs, - } - - resp, err := cli.get(ctx, "/images/get", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go deleted file mode 100644 index 176de3c58..000000000 --- a/vendor/github.com/docker/docker/client/image_search.go +++ /dev/null @@ -1,51 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" -) - -// ImageSearch makes the docker host to search by a term in a remote registry. -// The list of results is not sorted in any fashion. -func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { - var results []registry.SearchResult - query := url.Values{} - query.Set("term", term) - query.Set("limit", fmt.Sprintf("%d", options.Limit)) - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return results, err - } - query.Set("filters", filterJSON) - } - - resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return results, privilegeErr - } - resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) - } - if err != nil { - return results, err - } - - err = json.NewDecoder(resp.body).Decode(&results) - ensureReaderClosed(resp) - return results, err -} - -func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.get(ctx, "/images/search", query, headers) -} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go deleted file mode 100644 index 5652bfc25..000000000 --- a/vendor/github.com/docker/docker/client/image_tag.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/pkg/errors" -) - -// ImageTag tags an image in the docker host -func (cli *Client) ImageTag(ctx context.Context, source, target string) error { - if _, err := reference.ParseAnyReference(source); err != nil { - return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) - } - - ref, err := reference.ParseNormalizedNamed(target) - if err != nil { - return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - - ref = reference.TagNameOnly(ref) - - query := url.Values{} - query.Set("repo", reference.FamiliarName(ref)) - if tagged, ok := ref.(reference.Tagged); ok { - query.Set("tag", tagged.Tag()) - } - - resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go deleted file mode 100644 index 121f256ab..000000000 --- a/vendor/github.com/docker/docker/client/info.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/docker/api/types" -) - -// Info returns information about the docker server. -func (cli *Client) Info(ctx context.Context) (types.Info, error) { - var info types.Info - serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) - if err != nil { - return info, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { - return info, fmt.Errorf("Error reading remote info: %v", err) - } - - return info, nil -} diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go deleted file mode 100644 index 0487a0b9f..000000000 --- a/vendor/github.com/docker/docker/client/interface.go +++ /dev/null @@ -1,197 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net" - "net/http" - "time" - - "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/image" - networktypes "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - volumetypes "github.com/docker/docker/api/types/volume" -) - -// CommonAPIClient is the common methods between stable and experimental versions of APIClient. -type CommonAPIClient interface { - ConfigAPIClient - ContainerAPIClient - DistributionAPIClient - ImageAPIClient - NodeAPIClient - NetworkAPIClient - PluginAPIClient - ServiceAPIClient - SwarmAPIClient - SecretAPIClient - SystemAPIClient - VolumeAPIClient - ClientVersion() string - DaemonHost() string - HTTPClient() *http.Client - ServerVersion(ctx context.Context) (types.Version, error) - NegotiateAPIVersion(ctx context.Context) - NegotiateAPIVersionPing(types.Ping) - DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) - Close() error -} - -// ContainerAPIClient defines API client methods for the containers -type ContainerAPIClient interface { - ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) - ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, containerName string) (containertypes.ContainerCreateCreatedBody, error) - ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error) - ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) - ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) - ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error - ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error - ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) - ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) - ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) - ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) - ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) - ContainerPause(ctx context.Context, container string) error - ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error - ContainerRename(ctx context.Context, container, newContainerName string) error - ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error - ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error - ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) - ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) - ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error - ContainerStop(ctx context.Context, container string, timeout *time.Duration) error - ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error) - ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig containertypes.UpdateConfig) (containertypes.ContainerUpdateOKBody, error) - ContainerWait(ctx context.Context, container string, condition containertypes.WaitCondition) (<-chan containertypes.ContainerWaitOKBody, <-chan error) - CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) - CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error - ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) -} - -// DistributionAPIClient defines API client methods for the registry -type DistributionAPIClient interface { - DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) -} - -// ImageAPIClient defines API client methods for the images -type ImageAPIClient interface { - ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) - BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) - ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) - ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) - ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) - ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) - ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) - ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) - ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) - ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) - ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) - ImageTag(ctx context.Context, image, ref string) error - ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) -} - -// NetworkAPIClient defines API client methods for the networks -type NetworkAPIClient interface { - NetworkConnect(ctx context.Context, network, container string, config *networktypes.EndpointSettings) error - NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) - NetworkDisconnect(ctx context.Context, network, container string, force bool) error - NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) - NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) - NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) - NetworkRemove(ctx context.Context, network string) error - NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) -} - -// NodeAPIClient defines API client methods for the nodes -type NodeAPIClient interface { - NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) - NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) - NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error - NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error -} - -// PluginAPIClient defines API client methods for the plugins -type PluginAPIClient interface { - PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) - PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error - PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error - PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error - PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) - PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) - PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) - PluginSet(ctx context.Context, name string, args []string) error - PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) - PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error -} - -// ServiceAPIClient defines API client methods for the services -type ServiceAPIClient interface { - ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) - ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) - ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) - ServiceRemove(ctx context.Context, serviceID string) error - ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) - ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) - TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) - TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) - TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) -} - -// SwarmAPIClient defines API client methods for the swarm -type SwarmAPIClient interface { - SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) - SwarmJoin(ctx context.Context, req swarm.JoinRequest) error - SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) - SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error - SwarmLeave(ctx context.Context, force bool) error - SwarmInspect(ctx context.Context) (swarm.Swarm, error) - SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error -} - -// SystemAPIClient defines API client methods for the system -type SystemAPIClient interface { - Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) - Info(ctx context.Context) (types.Info, error) - RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) - DiskUsage(ctx context.Context) (types.DiskUsage, error) - Ping(ctx context.Context) (types.Ping, error) -} - -// VolumeAPIClient defines API client methods for the volumes -type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) - VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) - VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) - VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) - VolumeRemove(ctx context.Context, volumeID string, force bool) error - VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) -} - -// SecretAPIClient defines API client methods for secrets -type SecretAPIClient interface { - SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) - SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) - SecretRemove(ctx context.Context, id string) error - SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) - SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error -} - -// ConfigAPIClient defines API client methods for configs -type ConfigAPIClient interface { - ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) - ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) - ConfigRemove(ctx context.Context, id string) error - ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) - ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error -} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go deleted file mode 100644 index 402ffb512..000000000 --- a/vendor/github.com/docker/docker/client/interface_experimental.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types" -) - -type apiClientExperimental interface { - CheckpointAPIClient -} - -// CheckpointAPIClient defines API client methods for the checkpoints -type CheckpointAPIClient interface { - CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error - CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error - CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) -} diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go deleted file mode 100644 index 5502cd742..000000000 --- a/vendor/github.com/docker/docker/client/interface_stable.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -// APIClient is an interface that clients that talk with a docker server must implement. -type APIClient interface { - CommonAPIClient - apiClientExperimental -} - -// Ensure that Client always implements APIClient. -var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go deleted file mode 100644 index 7d6618190..000000000 --- a/vendor/github.com/docker/docker/client/login.go +++ /dev/null @@ -1,29 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" -) - -// RegistryLogin authenticates the docker server with a given docker registry. -// It returns unauthorizedError when the authentication fails. -func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { - resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) - - if resp.statusCode == http.StatusUnauthorized { - return registry.AuthenticateOKBody{}, unauthorizedError{err} - } - if err != nil { - return registry.AuthenticateOKBody{}, err - } - - var response registry.AuthenticateOKBody - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go deleted file mode 100644 index 571894613..000000000 --- a/vendor/github.com/docker/docker/client/network_connect.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" -) - -// NetworkConnect connects a container to an existent network in the docker host. -func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { - nc := types.NetworkConnect{ - Container: containerID, - EndpointConfig: config, - } - resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go deleted file mode 100644 index 41da2ac61..000000000 --- a/vendor/github.com/docker/docker/client/network_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// NetworkCreate creates a new network in the docker host. -func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { - networkCreateRequest := types.NetworkCreateRequest{ - NetworkCreate: options, - Name: name, - } - var response types.NetworkCreateResponse - serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) - if err != nil { - return response, err - } - - json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go deleted file mode 100644 index dd1567665..000000000 --- a/vendor/github.com/docker/docker/client/network_disconnect.go +++ /dev/null @@ -1,15 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types" -) - -// NetworkDisconnect disconnects a container from an existent network in the docker host. -func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { - nd := types.NetworkDisconnect{Container: containerID, Force: force} - resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go deleted file mode 100644 index 025f6d875..000000000 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ /dev/null @@ -1,49 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - "net/url" - - "github.com/docker/docker/api/types" -) - -// NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { - networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) - return networkResource, err -} - -// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. -func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { - if networkID == "" { - return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID} - } - var ( - networkResource types.NetworkResource - resp serverResponse - err error - ) - query := url.Values{} - if options.Verbose { - query.Set("verbose", "true") - } - if options.Scope != "" { - query.Set("scope", options.Scope) - } - resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) - if err != nil { - return networkResource, nil, wrapResponseError(err, resp, "network", networkID) - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return networkResource, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&networkResource) - return networkResource, body, err -} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go deleted file mode 100644 index f16b2f562..000000000 --- a/vendor/github.com/docker/docker/client/network_list.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// NetworkList returns the list of networks configured in the docker host. -func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { - query := url.Values{} - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - var networkResources []types.NetworkResource - resp, err := cli.get(ctx, "/networks", query, nil) - if err != nil { - return networkResources, err - } - err = json.NewDecoder(resp.body).Decode(&networkResources) - ensureReaderClosed(resp) - return networkResources, err -} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go deleted file mode 100644 index 6418b8b60..000000000 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// NetworksPrune requests the daemon to delete unused networks -func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { - var report types.NetworksPruneReport - - if err := cli.NewVersionError("1.25", "network prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) - if err != nil { - return report, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving network prune report: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go deleted file mode 100644 index 12741437b..000000000 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// NetworkRemove removes an existent network from the docker host. -func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { - resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "network", networkID) -} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go deleted file mode 100644 index 593b2e9f0..000000000 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types/swarm" -) - -// NodeInspectWithRaw returns the node information. -func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { - if nodeID == "" { - return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} - } - serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) - if err != nil { - return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return swarm.Node{}, nil, err - } - - var response swarm.Node - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go deleted file mode 100644 index 9883f6fc5..000000000 --- a/vendor/github.com/docker/docker/client/node_list.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// NodeList returns the list of nodes. -func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/nodes", query, nil) - if err != nil { - return nil, err - } - - var nodes []swarm.Node - err = json.NewDecoder(resp.body).Decode(&nodes) - ensureReaderClosed(resp) - return nodes, err -} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go deleted file mode 100644 index e7a750571..000000000 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// NodeRemove removes a Node. -func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "node", nodeID) -} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go deleted file mode 100644 index de32a617f..000000000 --- a/vendor/github.com/docker/docker/client/node_update.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" -) - -// NodeUpdate updates a Node. -func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { - query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go deleted file mode 100644 index 85d38adb5..000000000 --- a/vendor/github.com/docker/docker/client/ping.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "path" - - "github.com/docker/docker/api/types" -) - -// Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers -func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { - var ping types.Ping - req, err := cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil) - if err != nil { - return ping, err - } - serverResp, err := cli.doRequest(ctx, req) - if err != nil { - return ping, err - } - defer ensureReaderClosed(serverResp) - - if serverResp.header != nil { - ping.APIVersion = serverResp.header.Get("API-Version") - - if serverResp.header.Get("Docker-Experimental") == "true" { - ping.Experimental = true - } - ping.OSType = serverResp.header.Get("OSType") - } - return ping, cli.checkResponseErr(serverResp) -} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go deleted file mode 100644 index 4591db50f..000000000 --- a/vendor/github.com/docker/docker/client/plugin_create.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginCreate creates a plugin -func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { - headers := http.Header(make(map[string][]string)) - headers.Set("Content-Type", "application/x-tar") - - query := url.Values{} - query.Set("name", createOptions.RepoName) - - resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) - if err != nil { - return err - } - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go deleted file mode 100644 index 01f6574f9..000000000 --- a/vendor/github.com/docker/docker/client/plugin_disable.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginDisable disables a plugin -func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go deleted file mode 100644 index 736da48bd..000000000 --- a/vendor/github.com/docker/docker/client/plugin_enable.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" -) - -// PluginEnable enables a plugin -func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { - query := url.Values{} - query.Set("timeout", strconv.Itoa(options.Timeout)) - - resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go deleted file mode 100644 index 0ab7beaee..000000000 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types" -) - -// PluginInspectWithRaw inspects an existing plugin -func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { - if name == "" { - return nil, nil, objectNotFoundError{object: "plugin", id: name} - } - resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) - if err != nil { - return nil, nil, wrapResponseError(err, resp, "plugin", name) - } - - defer ensureReaderClosed(resp) - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return nil, nil, err - } - var p types.Plugin - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&p) - return &p, body, err -} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go deleted file mode 100644 index 13baa40a9..000000000 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ /dev/null @@ -1,113 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "io" - "net/http" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/pkg/errors" -) - -// PluginInstall installs a plugin -func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { - query := url.Values{} - if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { - return nil, errors.Wrap(err, "invalid remote reference") - } - query.Set("remote", options.RemoteRef) - - privileges, err := cli.checkPluginPermissions(ctx, query, options) - if err != nil { - return nil, err - } - - // set name for plugin pull, if empty should default to remote reference - query.Set("name", name) - - resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) - if err != nil { - return nil, err - } - - name = resp.header.Get("Docker-Plugin-Name") - - pr, pw := io.Pipe() - go func() { // todo: the client should probably be designed more around the actual api - _, err := io.Copy(pw, resp.body) - if err != nil { - pw.CloseWithError(err) - return - } - defer func() { - if err != nil { - delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) - ensureReaderClosed(delResp) - } - }() - if len(options.Args) > 0 { - if err := cli.PluginSet(ctx, name, options.Args); err != nil { - pw.CloseWithError(err) - return - } - } - - if options.Disabled { - pw.Close() - return - } - - enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) - pw.CloseWithError(enableErr) - }() - return pr, nil -} - -func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.get(ctx, "/plugins/privileges", query, headers) -} - -func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/plugins/pull", query, privileges, headers) -} - -func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { - resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - // todo: do inspect before to check existing name before checking privileges - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - ensureReaderClosed(resp) - return nil, privilegeErr - } - options.RegistryAuth = newAuthHeader - resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - } - if err != nil { - ensureReaderClosed(resp) - return nil, err - } - - var privileges types.PluginPrivileges - if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { - ensureReaderClosed(resp) - return nil, err - } - ensureReaderClosed(resp) - - if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { - accept, err := options.AcceptPermissionsFunc(privileges) - if err != nil { - return nil, err - } - if !accept { - return nil, pluginPermissionDenied{options.RemoteRef} - } - } - return privileges, nil -} diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go deleted file mode 100644 index ade1051a9..000000000 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// PluginList returns the installed plugins -func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { - var plugins types.PluginsListResponse - query := url.Values{} - - if filter.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, filter) - if err != nil { - return plugins, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/plugins", query, nil) - if err != nil { - return plugins, wrapResponseError(err, resp, "plugin", "") - } - - err = json.NewDecoder(resp.body).Decode(&plugins) - ensureReaderClosed(resp) - return plugins, err -} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go deleted file mode 100644 index d20bfe844..000000000 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ /dev/null @@ -1,16 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" -) - -// PluginPush pushes a plugin to a registry -func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go deleted file mode 100644 index 8563bab0d..000000000 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types" -) - -// PluginRemove removes a plugin -func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "plugin", name) -} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go deleted file mode 100644 index dcf5752ca..000000000 --- a/vendor/github.com/docker/docker/client/plugin_set.go +++ /dev/null @@ -1,12 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" -) - -// PluginSet modifies settings for an existing plugin -func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { - resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go deleted file mode 100644 index 115cea945..000000000 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ /dev/null @@ -1,39 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/pkg/errors" -) - -// PluginUpgrade upgrades a plugin -func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { - if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { - return nil, err - } - query := url.Values{} - if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { - return nil, errors.Wrap(err, "invalid remote reference") - } - query.Set("remote", options.RemoteRef) - - privileges, err := cli.checkPluginPermissions(ctx, query, options) - if err != nil { - return nil, err - } - - resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) -} diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go deleted file mode 100644 index a19d62aa5..000000000 --- a/vendor/github.com/docker/docker/client/request.go +++ /dev/null @@ -1,259 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/pkg/errors" - "golang.org/x/net/context/ctxhttp" -) - -// serverResponse is a wrapper for http API responses. -type serverResponse struct { - body io.ReadCloser - header http.Header - statusCode int - reqURL *url.URL -} - -// head sends an http request to the docker API using the method HEAD. -func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) -} - -// get sends an http request to the docker API using the method GET with a specific Go context. -func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "GET", path, query, nil, headers) -} - -// post sends an http request to the docker API using the method POST with a specific Go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { - body, headers, err := encodeBody(obj, headers) - if err != nil { - return serverResponse{}, err - } - return cli.sendRequest(ctx, "POST", path, query, body, headers) -} - -func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "POST", path, query, body, headers) -} - -// put sends an http request to the docker API using the method PUT. -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { - body, headers, err := encodeBody(obj, headers) - if err != nil { - return serverResponse{}, err - } - return cli.sendRequest(ctx, "PUT", path, query, body, headers) -} - -// putRaw sends an http request to the docker API using the method PUT. -func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "PUT", path, query, body, headers) -} - -// delete sends an http request to the docker API using the method DELETE. -func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) -} - -type headers map[string][]string - -func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { - if obj == nil { - return nil, headers, nil - } - - body, err := encodeData(obj) - if err != nil { - return nil, headers, err - } - if headers == nil { - headers = make(map[string][]string) - } - headers["Content-Type"] = []string{"application/json"} - return body, headers, nil -} - -func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { - expectedPayload := (method == "POST" || method == "PUT") - if expectedPayload && body == nil { - body = bytes.NewReader([]byte{}) - } - - req, err := http.NewRequest(method, path, body) - if err != nil { - return nil, err - } - req = cli.addHeaders(req, headers) - - if cli.proto == "unix" || cli.proto == "npipe" { - // For local communications, it doesn't matter what the host is. We just - // need a valid and meaningful host name. (See #189) - req.Host = "docker" - } - - req.URL.Host = cli.addr - req.URL.Scheme = cli.scheme - - if expectedPayload && req.Header.Get("Content-Type") == "" { - req.Header.Set("Content-Type", "text/plain") - } - return req, nil -} - -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { - req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) - if err != nil { - return serverResponse{}, err - } - resp, err := cli.doRequest(ctx, req) - if err != nil { - return resp, err - } - return resp, cli.checkResponseErr(resp) -} - -func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { - serverResp := serverResponse{statusCode: -1, reqURL: req.URL} - - resp, err := ctxhttp.Do(ctx, cli.client, req) - if err != nil { - if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { - return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) - } - - if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { - return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) - } - - // Don't decorate context sentinel errors; users may be comparing to - // them directly. - switch err { - case context.Canceled, context.DeadlineExceeded: - return serverResp, err - } - - if nErr, ok := err.(*url.Error); ok { - if nErr, ok := nErr.Err.(*net.OpError); ok { - if os.IsPermission(nErr.Err) { - return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host) - } - } - } - - if err, ok := err.(net.Error); ok { - if err.Timeout() { - return serverResp, ErrorConnectionFailed(cli.host) - } - if !err.Temporary() { - if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { - return serverResp, ErrorConnectionFailed(cli.host) - } - } - } - - // Although there's not a strongly typed error for this in go-winio, - // lots of people are using the default configuration for the docker - // daemon on Windows where the daemon is listening on a named pipe - // `//./pipe/docker_engine, and the client must be running elevated. - // Give users a clue rather than the not-overly useful message - // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: - // open //./pipe/docker_engine: The system cannot find the file specified.`. - // Note we can't string compare "The system cannot find the file specified" as - // this is localised - for example in French the error would be - // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` - if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { - err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") - } - - return serverResp, errors.Wrap(err, "error during connect") - } - - if resp != nil { - serverResp.statusCode = resp.StatusCode - serverResp.body = resp.Body - serverResp.header = resp.Header - } - return serverResp, nil -} - -func (cli *Client) checkResponseErr(serverResp serverResponse) error { - if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { - return nil - } - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return err - } - if len(body) == 0 { - return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) - } - - var ct string - if serverResp.header != nil { - ct = serverResp.header.Get("Content-Type") - } - - var errorMessage string - if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { - var errorResponse types.ErrorResponse - if err := json.Unmarshal(body, &errorResponse); err != nil { - return fmt.Errorf("Error reading JSON: %v", err) - } - errorMessage = errorResponse.Message - } else { - errorMessage = string(body) - } - - return fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) -} - -func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { - // Add CLI Config's HTTP Headers BEFORE we set the Docker headers - // then the user can't change OUR headers - for k, v := range cli.customHTTPHeaders { - if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { - continue - } - req.Header.Set(k, v) - } - - if headers != nil { - for k, v := range headers { - req.Header[k] = v - } - } - return req -} - -func encodeData(data interface{}) (*bytes.Buffer, error) { - params := bytes.NewBuffer(nil) - if data != nil { - if err := json.NewEncoder(params).Encode(data); err != nil { - return nil, err - } - } - return params, nil -} - -func ensureReaderClosed(response serverResponse) { - if response.body != nil { - // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(ioutil.Discard, response.body, 512) - response.body.Close() - } -} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go deleted file mode 100644 index 09fae82f2..000000000 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// SecretCreate creates a new Secret. -func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { - var response types.SecretCreateResponse - if err := cli.NewVersionError("1.25", "secret create"); err != nil { - return response, err - } - resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go deleted file mode 100644 index e8322f458..000000000 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types/swarm" -) - -// SecretInspectWithRaw returns the secret information with raw data -func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { - if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { - return swarm.Secret{}, nil, err - } - if id == "" { - return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} - } - resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) - if err != nil { - return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return swarm.Secret{}, nil, err - } - - var secret swarm.Secret - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&secret) - - return secret, body, err -} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go deleted file mode 100644 index f6bf7ba47..000000000 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// SecretList returns the list of secrets. -func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { - if err := cli.NewVersionError("1.25", "secret list"); err != nil { - return nil, err - } - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/secrets", query, nil) - if err != nil { - return nil, err - } - - var secrets []swarm.Secret - err = json.NewDecoder(resp.body).Decode(&secrets) - ensureReaderClosed(resp) - return secrets, err -} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go deleted file mode 100644 index e9d521829..000000000 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ /dev/null @@ -1,13 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// SecretRemove removes a Secret. -func (cli *Client) SecretRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.25", "secret remove"); err != nil { - return err - } - resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "secret", id) -} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go deleted file mode 100644 index 164256bbc..000000000 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" -) - -// SecretUpdate attempts to update a Secret -func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { - if err := cli.NewVersionError("1.25", "secret update"); err != nil { - return err - } - query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go deleted file mode 100644 index 8fadda4a9..000000000 --- a/vendor/github.com/docker/docker/client/service_create.go +++ /dev/null @@ -1,166 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// ServiceCreate creates a new Service. -func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { - var distErr error - - headers := map[string][]string{ - "version": {cli.version}, - } - - if options.EncodedRegistryAuth != "" { - headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} - } - - // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container - if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { - service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} - } - - if err := validateServiceSpec(service); err != nil { - return types.ServiceCreateResponse{}, err - } - - // ensure that the image is tagged - var imgPlatforms []swarm.Platform - if service.TaskTemplate.ContainerSpec != nil { - if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { - service.TaskTemplate.ContainerSpec.Image = taggedImg - } - if options.QueryRegistry { - var img string - img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) - if img != "" { - service.TaskTemplate.ContainerSpec.Image = img - } - } - } - - // ensure that the image is tagged - if service.TaskTemplate.PluginSpec != nil { - if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { - service.TaskTemplate.PluginSpec.Remote = taggedImg - } - if options.QueryRegistry { - var img string - img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) - if img != "" { - service.TaskTemplate.PluginSpec.Remote = img - } - } - } - - if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { - service.TaskTemplate.Placement = &swarm.Placement{} - } - if len(imgPlatforms) > 0 { - service.TaskTemplate.Placement.Platforms = imgPlatforms - } - - var response types.ServiceCreateResponse - resp, err := cli.post(ctx, "/services/create", nil, service, headers) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - - if distErr != nil { - response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) - } - - ensureReaderClosed(resp) - return response, err -} - -func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { - distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) - var platforms []swarm.Platform - if err != nil { - return "", nil, err - } - - imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) - - if len(distributionInspect.Platforms) > 0 { - platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) - for _, p := range distributionInspect.Platforms { - // clear architecture field for arm. This is a temporary patch to address - // https://github.com/docker/swarmkit/issues/2294. The issue is that while - // image manifests report "arm" as the architecture, the node reports - // something like "armv7l" (includes the variant), which causes arm images - // to stop working with swarm mode. This patch removes the architecture - // constraint for arm images to ensure tasks get scheduled. - arch := p.Architecture - if strings.ToLower(arch) == "arm" { - arch = "" - } - platforms = append(platforms, swarm.Platform{ - Architecture: arch, - OS: p.OS, - }) - } - } - return imageWithDigest, platforms, err -} - -// imageWithDigestString takes an image string and a digest, and updates -// the image string if it didn't originally contain a digest. It returns -// an empty string if there are no updates. -func imageWithDigestString(image string, dgst digest.Digest) string { - namedRef, err := reference.ParseNormalizedNamed(image) - if err == nil { - if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { - // ensure that image gets a default tag if none is provided - img, err := reference.WithDigest(namedRef, dgst) - if err == nil { - return reference.FamiliarString(img) - } - } - } - return "" -} - -// imageWithTagString takes an image string, and returns a tagged image -// string, adding a 'latest' tag if one was not provided. It returns an -// empty string if a canonical reference was provided -func imageWithTagString(image string) string { - namedRef, err := reference.ParseNormalizedNamed(image) - if err == nil { - return reference.FamiliarString(reference.TagNameOnly(namedRef)) - } - return "" -} - -// digestWarning constructs a formatted warning string using the -// image name that could not be pinned by digest. The formatting -// is hardcoded, but could me made smarter in the future -func digestWarning(image string) string { - return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) -} - -func validateServiceSpec(s swarm.ServiceSpec) error { - if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { - return errors.New("must not specify both a container spec and a plugin spec in the task template") - } - if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { - return errors.New("mismatched runtime with plugin spec") - } - if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { - return errors.New("mismatched runtime with container spec") - } - return nil -} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go deleted file mode 100644 index de6aa22de..000000000 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// ServiceInspectWithRaw returns the service information and the raw data. -func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { - if serviceID == "" { - return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} - } - query := url.Values{} - query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) - serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) - if err != nil { - return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return swarm.Service{}, nil, err - } - - var response swarm.Service - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go deleted file mode 100644 index 7d53e2b9b..000000000 --- a/vendor/github.com/docker/docker/client/service_list.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// ServiceList returns the list of services. -func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/services", query, nil) - if err != nil { - return nil, err - } - - var services []swarm.Service - err = json.NewDecoder(resp.body).Decode(&services) - ensureReaderClosed(resp) - return services, err -} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go deleted file mode 100644 index 906fd4059..000000000 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ /dev/null @@ -1,52 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types" - timetypes "github.com/docker/docker/api/types/time" - "github.com/pkg/errors" -) - -// ServiceLogs returns the logs generated by a service in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, errors.Wrap(err, `invalid value for "since"`) - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go deleted file mode 100644 index fe3421bec..000000000 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "context" - -// ServiceRemove kills and removes a service. -func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { - resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "service", serviceID) -} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go deleted file mode 100644 index 5a7a61b01..000000000 --- a/vendor/github.com/docker/docker/client/service_update.go +++ /dev/null @@ -1,92 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" -) - -// ServiceUpdate updates a Service. -func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { - var ( - query = url.Values{} - distErr error - ) - - headers := map[string][]string{ - "version": {cli.version}, - } - - if options.EncodedRegistryAuth != "" { - headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} - } - - if options.RegistryAuthFrom != "" { - query.Set("registryAuthFrom", options.RegistryAuthFrom) - } - - if options.Rollback != "" { - query.Set("rollback", options.Rollback) - } - - query.Set("version", strconv.FormatUint(version.Index, 10)) - - if err := validateServiceSpec(service); err != nil { - return types.ServiceUpdateResponse{}, err - } - - var imgPlatforms []swarm.Platform - // ensure that the image is tagged - if service.TaskTemplate.ContainerSpec != nil { - if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { - service.TaskTemplate.ContainerSpec.Image = taggedImg - } - if options.QueryRegistry { - var img string - img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) - if img != "" { - service.TaskTemplate.ContainerSpec.Image = img - } - } - } - - // ensure that the image is tagged - if service.TaskTemplate.PluginSpec != nil { - if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { - service.TaskTemplate.PluginSpec.Remote = taggedImg - } - if options.QueryRegistry { - var img string - img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) - if img != "" { - service.TaskTemplate.PluginSpec.Remote = img - } - } - } - - if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { - service.TaskTemplate.Placement = &swarm.Placement{} - } - if len(imgPlatforms) > 0 { - service.TaskTemplate.Placement.Platforms = imgPlatforms - } - - var response types.ServiceUpdateResponse - resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - - if distErr != nil { - response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) - } - - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/session.go b/vendor/github.com/docker/docker/client/session.go deleted file mode 100644 index c247123b4..000000000 --- a/vendor/github.com/docker/docker/client/session.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net" - "net/http" -) - -// DialSession returns a connection that can be used communication with daemon -func (cli *Client) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { - req, err := http.NewRequest("POST", "/session", nil) - if err != nil { - return nil, err - } - req = cli.addHeaders(req, meta) - - return cli.setupHijackConn(req, proto) -} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go deleted file mode 100644 index 0c50c01a8..000000000 --- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// SwarmGetUnlockKey retrieves the swarm's unlock key. -func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { - serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) - if err != nil { - return types.SwarmUnlockKeyResponse{}, err - } - - var response types.SwarmUnlockKeyResponse - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go deleted file mode 100644 index 742ca0f04..000000000 --- a/vendor/github.com/docker/docker/client/swarm_init.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmInit initializes the swarm. -func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { - serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) - if err != nil { - return "", err - } - - var response string - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go deleted file mode 100644 index cfaabb25b..000000000 --- a/vendor/github.com/docker/docker/client/swarm_inspect.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmInspect inspects the swarm. -func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { - serverResp, err := cli.get(ctx, "/swarm", nil, nil) - if err != nil { - return swarm.Swarm{}, err - } - - var response swarm.Swarm - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go deleted file mode 100644 index a1cf0455d..000000000 --- a/vendor/github.com/docker/docker/client/swarm_join.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmJoin joins the swarm. -func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { - resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go deleted file mode 100644 index 90ca84b36..000000000 --- a/vendor/github.com/docker/docker/client/swarm_leave.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" -) - -// SwarmLeave leaves the swarm. -func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { - query := url.Values{} - if force { - query.Set("force", "1") - } - resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go deleted file mode 100644 index d2412f7d4..000000000 --- a/vendor/github.com/docker/docker/client/swarm_unlock.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmUnlock unlocks locked swarm. -func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { - serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) - ensureReaderClosed(serverResp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go deleted file mode 100644 index 56a5bea76..000000000 --- a/vendor/github.com/docker/docker/client/swarm_update.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "fmt" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" -) - -// SwarmUpdate updates the swarm. -func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { - query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) - query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) - query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) - resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go deleted file mode 100644 index e1c0a736d..000000000 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types/swarm" -) - -// TaskInspectWithRaw returns the task information and its raw representation.. -func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { - if taskID == "" { - return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} - } - serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) - if err != nil { - return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return swarm.Task{}, nil, err - } - - var response swarm.Task - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go deleted file mode 100644 index 42d20c1b8..000000000 --- a/vendor/github.com/docker/docker/client/task_list.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" -) - -// TaskList returns the list of tasks. -func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/tasks", query, nil) - if err != nil { - return nil, err - } - - var tasks []swarm.Task - err = json.NewDecoder(resp.body).Decode(&tasks) - ensureReaderClosed(resp) - return tasks, err -} diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go deleted file mode 100644 index 6222fab57..000000000 --- a/vendor/github.com/docker/docker/client/task_logs.go +++ /dev/null @@ -1,51 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/docker/docker/api/types" - timetypes "github.com/docker/docker/api/types/time" -) - -// TaskLogs returns the logs generated by a task in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go deleted file mode 100644 index 554134436..000000000 --- a/vendor/github.com/docker/docker/client/transport.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "crypto/tls" - "net/http" -) - -// resolveTLSConfig attempts to resolve the TLS configuration from the -// RoundTripper. -func resolveTLSConfig(transport http.RoundTripper) *tls.Config { - switch tr := transport.(type) { - case *http.Transport: - return tr.TLSClientConfig - default: - return nil - } -} diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go deleted file mode 100644 index 7f3ff44eb..000000000 --- a/vendor/github.com/docker/docker/client/utils.go +++ /dev/null @@ -1,34 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - "regexp" - - "github.com/docker/docker/api/types/filters" -) - -var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) - -// getDockerOS returns the operating system based on the server header from the daemon. -func getDockerOS(serverHeader string) string { - var osType string - matches := headerRegexp.FindStringSubmatch(serverHeader) - if len(matches) > 0 { - osType = matches[1] - } - return osType -} - -// getFiltersQuery returns a url query with "filters" query term, based on the -// filters provided. -func getFiltersQuery(f filters.Args) (url.Values, error) { - query := url.Values{} - if f.Len() > 0 { - filterJSON, err := filters.ToJSON(f) - if err != nil { - return query, err - } - query.Set("filters", filterJSON) - } - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go deleted file mode 100644 index 1989f6d6d..000000000 --- a/vendor/github.com/docker/docker/client/version.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" -) - -// ServerVersion returns information of the docker client and server host. -func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { - resp, err := cli.get(ctx, "/version", nil, nil) - if err != nil { - return types.Version{}, err - } - - var server types.Version - err = json.NewDecoder(resp.body).Decode(&server) - ensureReaderClosed(resp) - return server, err -} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go deleted file mode 100644 index f1f6fcdc4..000000000 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - - "github.com/docker/docker/api/types" - volumetypes "github.com/docker/docker/api/types/volume" -) - -// VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { - var volume types.Volume - resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) - if err != nil { - return volume, err - } - err = json.NewDecoder(resp.body).Decode(&volume) - ensureReaderClosed(resp) - return volume, err -} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go deleted file mode 100644 index f840682d2..000000000 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types" -) - -// VolumeInspect returns the information about a specific volume in the docker host. -func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { - volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) - return volume, err -} - -// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation -func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { - if volumeID == "" { - return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} - } - - var volume types.Volume - resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) - if err != nil { - return volume, nil, wrapResponseError(err, resp, "volume", volumeID) - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return volume, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&volume) - return volume, body, err -} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go deleted file mode 100644 index 284554d67..000000000 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - volumetypes "github.com/docker/docker/api/types/volume" -) - -// VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { - var volumes volumetypes.VolumeListOKBody - query := url.Values{} - - if filter.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, filter) - if err != nil { - return volumes, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/volumes", query, nil) - if err != nil { - return volumes, err - } - - err = json.NewDecoder(resp.body).Decode(&volumes) - ensureReaderClosed(resp) - return volumes, err -} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go deleted file mode 100644 index 70041efed..000000000 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// VolumesPrune requests the daemon to delete unused data -func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { - var report types.VolumesPruneReport - - if err := cli.NewVersionError("1.25", "volume prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) - if err != nil { - return report, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving volume prune report: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go deleted file mode 100644 index fc5a71d33..000000000 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "context" - "net/url" - - "github.com/docker/docker/api/types/versions" -) - -// VolumeRemove removes a volume from the docker host. -func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { - query := url.Values{} - if versions.GreaterThanOrEqualTo(cli.version, "1.25") { - if force { - query.Set("force", "1") - } - } - resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "volume", volumeID) -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/config.go b/vendor/github.com/docker/docker/cmd/dockerd/config.go deleted file mode 100644 index abdac9a7f..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/config.go +++ /dev/null @@ -1,99 +0,0 @@ -package main - -import ( - "runtime" - - "github.com/docker/docker/daemon/config" - "github.com/docker/docker/opts" - "github.com/docker/docker/registry" - "github.com/spf13/pflag" -) - -const ( - // defaultShutdownTimeout is the default shutdown timeout for the daemon - defaultShutdownTimeout = 15 - // defaultTrustKeyFile is the default filename for the trust key - defaultTrustKeyFile = "key.json" -) - -// installCommonConfigFlags adds flags to the pflag.FlagSet to configure the daemon -func installCommonConfigFlags(conf *config.Config, flags *pflag.FlagSet) { - var maxConcurrentDownloads, maxConcurrentUploads int - - installRegistryServiceFlags(&conf.ServiceOptions, flags) - - flags.Var(opts.NewNamedListOptsRef("storage-opts", &conf.GraphOptions, nil), "storage-opt", "Storage driver options") - flags.Var(opts.NewNamedListOptsRef("authorization-plugins", &conf.AuthorizationPlugins, nil), "authorization-plugin", "Authorization plugins to load") - flags.Var(opts.NewNamedListOptsRef("exec-opts", &conf.ExecOptions, nil), "exec-opt", "Runtime execution options") - flags.StringVarP(&conf.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file") - flags.StringVarP(&conf.Root, "graph", "g", defaultDataRoot, "Root of the Docker runtime") - flags.StringVar(&conf.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files") - flags.StringVar(&conf.ContainerdAddr, "containerd", "", "containerd grpc address") - - // "--graph" is "soft-deprecated" in favor of "data-root". This flag was added - // before Docker 1.0, so won't be removed, only hidden, to discourage its usage. - flags.MarkHidden("graph") - - flags.StringVar(&conf.Root, "data-root", defaultDataRoot, "Root directory of persistent Docker state") - - flags.BoolVarP(&conf.AutoRestart, "restart", "r", true, "--restart on the daemon has been deprecated in favor of --restart policies on docker run") - flags.MarkDeprecated("restart", "Please use a restart policy on docker run") - - // Windows doesn't support setting the storage driver - there is no choice as to which ones to use. - if runtime.GOOS != "windows" { - flags.StringVarP(&conf.GraphDriver, "storage-driver", "s", "", "Storage driver to use") - } - - flags.IntVar(&conf.Mtu, "mtu", 0, "Set the containers network MTU") - flags.BoolVar(&conf.RawLogs, "raw-logs", false, "Full timestamps without ANSI coloring") - flags.Var(opts.NewListOptsRef(&conf.DNS, opts.ValidateIPAddress), "dns", "DNS server to use") - flags.Var(opts.NewNamedListOptsRef("dns-opts", &conf.DNSOptions, nil), "dns-opt", "DNS options to use") - flags.Var(opts.NewListOptsRef(&conf.DNSSearch, opts.ValidateDNSSearch), "dns-search", "DNS search domains to use") - flags.Var(opts.NewNamedListOptsRef("labels", &conf.Labels, opts.ValidateLabel), "label", "Set key=value labels to the daemon") - flags.StringVar(&conf.LogConfig.Type, "log-driver", "json-file", "Default driver for container logs") - flags.Var(opts.NewNamedMapOpts("log-opts", conf.LogConfig.Config, nil), "log-opt", "Default log driver options for containers") - flags.StringVar(&conf.ClusterAdvertise, "cluster-advertise", "", "Address or interface name to advertise") - flags.StringVar(&conf.ClusterStore, "cluster-store", "", "URL of the distributed storage backend") - flags.Var(opts.NewNamedMapOpts("cluster-store-opts", conf.ClusterOpts, nil), "cluster-store-opt", "Set cluster store options") - flags.StringVar(&conf.CorsHeaders, "api-cors-header", "", "Set CORS headers in the Engine API") - flags.IntVar(&maxConcurrentDownloads, "max-concurrent-downloads", config.DefaultMaxConcurrentDownloads, "Set the max concurrent downloads for each pull") - flags.IntVar(&maxConcurrentUploads, "max-concurrent-uploads", config.DefaultMaxConcurrentUploads, "Set the max concurrent uploads for each push") - flags.IntVar(&conf.ShutdownTimeout, "shutdown-timeout", defaultShutdownTimeout, "Set the default shutdown timeout") - flags.IntVar(&conf.NetworkDiagnosticPort, "network-diagnostic-port", 0, "TCP port number of the network diagnostic server") - flags.MarkHidden("network-diagnostic-port") - - flags.StringVar(&conf.SwarmDefaultAdvertiseAddr, "swarm-default-advertise-addr", "", "Set default address or interface for swarm advertised address") - flags.BoolVar(&conf.Experimental, "experimental", false, "Enable experimental features") - - flags.StringVar(&conf.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on") - - flags.Var(opts.NewNamedListOptsRef("node-generic-resources", &conf.NodeGenericResources, opts.ValidateSingleGenericResource), "node-generic-resource", "Advertise user-defined resource") - - flags.IntVar(&conf.NetworkControlPlaneMTU, "network-control-plane-mtu", config.DefaultNetworkMtu, "Network Control plane MTU") - - // "--deprecated-key-path" is to allow configuration of the key used - // for the daemon ID and the deprecated image signing. It was never - // exposed as a command line option but is added here to allow - // overriding the default path in configuration. - flags.Var(opts.NewQuotedString(&conf.TrustKeyPath), "deprecated-key-path", "Path to key file for ID and image signing") - flags.MarkHidden("deprecated-key-path") - - conf.MaxConcurrentDownloads = &maxConcurrentDownloads - conf.MaxConcurrentUploads = &maxConcurrentUploads -} - -func installRegistryServiceFlags(options *registry.ServiceOptions, flags *pflag.FlagSet) { - ana := opts.NewNamedListOptsRef("allow-nondistributable-artifacts", &options.AllowNondistributableArtifacts, registry.ValidateIndexName) - mirrors := opts.NewNamedListOptsRef("registry-mirrors", &options.Mirrors, registry.ValidateMirror) - insecureRegistries := opts.NewNamedListOptsRef("insecure-registries", &options.InsecureRegistries, registry.ValidateIndexName) - - flags.Var(ana, "allow-nondistributable-artifacts", "Allow push of nondistributable artifacts to registry") - flags.Var(mirrors, "registry-mirror", "Preferred Docker registry mirror") - flags.Var(insecureRegistries, "insecure-registry", "Enable insecure registry communication") - - if runtime.GOOS != "windows" { - // TODO: Remove this flag after 3 release cycles (18.03) - flags.BoolVar(&options.V2Only, "disable-legacy-registry", true, "Disable contacting legacy registries") - flags.MarkHidden("disable-legacy-registry") - } -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/config_common_unix.go b/vendor/github.com/docker/docker/cmd/dockerd/config_common_unix.go deleted file mode 100644 index febf30ae9..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/config_common_unix.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build linux freebsd - -package main - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/daemon/config" - "github.com/docker/docker/opts" - "github.com/spf13/pflag" -) - -var ( - defaultPidFile = "/var/run/docker.pid" - defaultDataRoot = "/var/lib/docker" - defaultExecRoot = "/var/run/docker" -) - -// installUnixConfigFlags adds command-line options to the top-level flag parser for -// the current process that are common across Unix platforms. -func installUnixConfigFlags(conf *config.Config, flags *pflag.FlagSet) { - conf.Runtimes = make(map[string]types.Runtime) - - flags.StringVarP(&conf.SocketGroup, "group", "G", "docker", "Group for the unix socket") - flags.StringVar(&conf.BridgeConfig.IP, "bip", "", "Specify network bridge IP") - flags.StringVarP(&conf.BridgeConfig.Iface, "bridge", "b", "", "Attach containers to a network bridge") - flags.StringVar(&conf.BridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") - flags.Var(opts.NewIPOpt(&conf.BridgeConfig.DefaultGatewayIPv4, ""), "default-gateway", "Container default gateway IPv4 address") - flags.Var(opts.NewIPOpt(&conf.BridgeConfig.DefaultGatewayIPv6, ""), "default-gateway-v6", "Container default gateway IPv6 address") - flags.BoolVar(&conf.BridgeConfig.InterContainerCommunication, "icc", true, "Enable inter-container communication") - flags.Var(opts.NewIPOpt(&conf.BridgeConfig.DefaultIP, "0.0.0.0"), "ip", "Default IP when binding container ports") - flags.Var(opts.NewNamedRuntimeOpt("runtimes", &conf.Runtimes, config.StockRuntimeName), "add-runtime", "Register an additional OCI compatible runtime") - flags.StringVar(&conf.DefaultRuntime, "default-runtime", config.StockRuntimeName, "Default OCI runtime for containers") - -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/config_unix.go b/vendor/github.com/docker/docker/cmd/dockerd/config_unix.go deleted file mode 100644 index 2dbd84b1d..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/config_unix.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build linux freebsd - -package main - -import ( - "github.com/docker/docker/daemon/config" - "github.com/docker/docker/opts" - "github.com/docker/go-units" - "github.com/spf13/pflag" -) - -// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon -func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) { - // First handle install flags which are consistent cross-platform - installCommonConfigFlags(conf, flags) - - // Then install flags common to unix platforms - installUnixConfigFlags(conf, flags) - - conf.Ulimits = make(map[string]*units.Ulimit) - conf.NetworkConfig.DefaultAddressPools = opts.PoolsOpt{} - - // Set default value for `--default-shm-size` - conf.ShmSize = opts.MemBytes(config.DefaultShmSize) - - // Then platform-specific install flags - flags.BoolVar(&conf.EnableSelinuxSupport, "selinux-enabled", false, "Enable selinux support") - flags.Var(opts.NewNamedUlimitOpt("default-ulimits", &conf.Ulimits), "default-ulimit", "Default ulimits for containers") - flags.BoolVar(&conf.BridgeConfig.EnableIPTables, "iptables", true, "Enable addition of iptables rules") - flags.BoolVar(&conf.BridgeConfig.EnableIPForward, "ip-forward", true, "Enable net.ipv4.ip_forward") - flags.BoolVar(&conf.BridgeConfig.EnableIPMasq, "ip-masq", true, "Enable IP masquerading") - flags.BoolVar(&conf.BridgeConfig.EnableIPv6, "ipv6", false, "Enable IPv6 networking") - flags.StringVar(&conf.BridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs") - flags.BoolVar(&conf.BridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic") - flags.StringVar(&conf.BridgeConfig.UserlandProxyPath, "userland-proxy-path", "", "Path to the userland proxy binary") - flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers") - flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces") - flags.BoolVar(&conf.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running") - flags.IntVar(&conf.OOMScoreAdjust, "oom-score-adjust", -500, "Set the oom_score_adj for the daemon") - flags.BoolVar(&conf.Init, "init", false, "Run an init in the container to forward signals and reap processes") - flags.StringVar(&conf.InitPath, "init-path", "", "Path to the docker-init binary") - flags.Int64Var(&conf.CPURealtimePeriod, "cpu-rt-period", 0, "Limit the CPU real-time period in microseconds") - flags.Int64Var(&conf.CPURealtimeRuntime, "cpu-rt-runtime", 0, "Limit the CPU real-time runtime in microseconds") - flags.StringVar(&conf.SeccompProfile, "seccomp-profile", "", "Path to seccomp profile") - flags.Var(&conf.ShmSize, "default-shm-size", "Default shm size for containers") - flags.BoolVar(&conf.NoNewPrivileges, "no-new-privileges", false, "Set no-new-privileges by default for new containers") - flags.StringVar(&conf.IpcMode, "default-ipc-mode", config.DefaultIpcMode, `Default mode for containers ipc ("shareable" | "private")`) - flags.Var(&conf.NetworkConfig.DefaultAddressPools, "default-address-pool", "Default address pools for node specific local networks") - -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/config_windows.go b/vendor/github.com/docker/docker/cmd/dockerd/config_windows.go deleted file mode 100644 index 36af76645..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/config_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -package main - -import ( - "os" - "path/filepath" - - "github.com/docker/docker/daemon/config" - "github.com/spf13/pflag" -) - -var ( - defaultPidFile string - defaultDataRoot = filepath.Join(os.Getenv("programdata"), "docker") - defaultExecRoot = filepath.Join(os.Getenv("programdata"), "docker", "exec-root") -) - -// installConfigFlags adds flags to the pflag.FlagSet to configure the daemon -func installConfigFlags(conf *config.Config, flags *pflag.FlagSet) { - // First handle install flags which are consistent cross-platform - installCommonConfigFlags(conf, flags) - - // Then platform-specific install flags. - flags.StringVar(&conf.BridgeConfig.FixedCIDR, "fixed-cidr", "", "IPv4 subnet for fixed IPs") - flags.StringVarP(&conf.BridgeConfig.Iface, "bridge", "b", "", "Attach containers to a virtual switch") - flags.StringVarP(&conf.SocketGroup, "group", "G", "", "Users or groups that can access the named pipe") -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon.go deleted file mode 100644 index 6b0be5f7f..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon.go +++ /dev/null @@ -1,626 +0,0 @@ -package main - -import ( - "context" - "crypto/tls" - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - "time" - - "github.com/docker/distribution/uuid" - "github.com/docker/docker/api" - apiserver "github.com/docker/docker/api/server" - buildbackend "github.com/docker/docker/api/server/backend/build" - "github.com/docker/docker/api/server/middleware" - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/api/server/router/build" - checkpointrouter "github.com/docker/docker/api/server/router/checkpoint" - "github.com/docker/docker/api/server/router/container" - distributionrouter "github.com/docker/docker/api/server/router/distribution" - "github.com/docker/docker/api/server/router/image" - "github.com/docker/docker/api/server/router/network" - pluginrouter "github.com/docker/docker/api/server/router/plugin" - sessionrouter "github.com/docker/docker/api/server/router/session" - swarmrouter "github.com/docker/docker/api/server/router/swarm" - systemrouter "github.com/docker/docker/api/server/router/system" - "github.com/docker/docker/api/server/router/volume" - "github.com/docker/docker/builder/dockerfile" - "github.com/docker/docker/builder/fscache" - "github.com/docker/docker/cli/debug" - "github.com/docker/docker/daemon" - "github.com/docker/docker/daemon/cluster" - "github.com/docker/docker/daemon/config" - "github.com/docker/docker/daemon/listeners" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/libcontainerd" - dopts "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/authorization" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/pidfile" - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/plugin" - "github.com/docker/docker/registry" - "github.com/docker/docker/runconfig" - "github.com/docker/go-connections/tlsconfig" - swarmapi "github.com/docker/swarmkit/api" - "github.com/moby/buildkit/session" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/spf13/pflag" -) - -// DaemonCli represents the daemon CLI. -type DaemonCli struct { - *config.Config - configFile *string - flags *pflag.FlagSet - - api *apiserver.Server - d *daemon.Daemon - authzMiddleware *authorization.Middleware // authzMiddleware enables to dynamically reload the authorization plugins -} - -// NewDaemonCli returns a daemon CLI -func NewDaemonCli() *DaemonCli { - return &DaemonCli{} -} - -func (cli *DaemonCli) start(opts *daemonOptions) (err error) { - stopc := make(chan bool) - defer close(stopc) - - // warn from uuid package when running the daemon - uuid.Loggerf = logrus.Warnf - - opts.SetDefaultOptions(opts.flags) - - if cli.Config, err = loadDaemonCliConfig(opts); err != nil { - return err - } - cli.configFile = &opts.configFile - cli.flags = opts.flags - - if cli.Config.Debug { - debug.Enable() - } - - if cli.Config.Experimental { - logrus.Warn("Running experimental build") - } - - logrus.SetFormatter(&logrus.TextFormatter{ - TimestampFormat: jsonmessage.RFC3339NanoFixed, - DisableColors: cli.Config.RawLogs, - FullTimestamp: true, - }) - - system.InitLCOW(cli.Config.Experimental) - - if err := setDefaultUmask(); err != nil { - return fmt.Errorf("Failed to set umask: %v", err) - } - - // Create the daemon root before we create ANY other files (PID, or migrate keys) - // to ensure the appropriate ACL is set (particularly relevant on Windows) - if err := daemon.CreateDaemonRoot(cli.Config); err != nil { - return err - } - - if cli.Pidfile != "" { - pf, err := pidfile.New(cli.Pidfile) - if err != nil { - return fmt.Errorf("Error starting daemon: %v", err) - } - defer func() { - if err := pf.Remove(); err != nil { - logrus.Error(err) - } - }() - } - - serverConfig, err := newAPIServerConfig(cli) - if err != nil { - return fmt.Errorf("Failed to create API server: %v", err) - } - cli.api = apiserver.New(serverConfig) - - hosts, err := loadListeners(cli, serverConfig) - if err != nil { - return fmt.Errorf("Failed to load listeners: %v", err) - } - - registryService, err := registry.NewService(cli.Config.ServiceOptions) - if err != nil { - return err - } - - rOpts, err := cli.getRemoteOptions() - if err != nil { - return fmt.Errorf("Failed to generate containerd options: %v", err) - } - containerdRemote, err := libcontainerd.New(filepath.Join(cli.Config.Root, "containerd"), filepath.Join(cli.Config.ExecRoot, "containerd"), rOpts...) - if err != nil { - return err - } - signal.Trap(func() { - cli.stop() - <-stopc // wait for daemonCli.start() to return - }, logrus.StandardLogger()) - - // Notify that the API is active, but before daemon is set up. - preNotifySystem() - - pluginStore := plugin.NewStore() - - if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil { - logrus.Fatalf("Error creating middlewares: %v", err) - } - - d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote, pluginStore) - if err != nil { - return fmt.Errorf("Error starting daemon: %v", err) - } - - d.StoreHosts(hosts) - - // validate after NewDaemon has restored enabled plugins. Dont change order. - if err := validateAuthzPlugins(cli.Config.AuthorizationPlugins, pluginStore); err != nil { - return fmt.Errorf("Error validating authorization plugin: %v", err) - } - - // TODO: move into startMetricsServer() - if cli.Config.MetricsAddress != "" { - if !d.HasExperimental() { - return fmt.Errorf("metrics-addr is only supported when experimental is enabled") - } - if err := startMetricsServer(cli.Config.MetricsAddress); err != nil { - return err - } - } - - c, err := createAndStartCluster(cli, d) - if err != nil { - logrus.Fatalf("Error starting cluster component: %v", err) - } - - // Restart all autostart containers which has a swarm endpoint - // and is not yet running now that we have successfully - // initialized the cluster. - d.RestartSwarmContainers() - - logrus.Info("Daemon has completed initialization") - - cli.d = d - - routerOptions, err := newRouterOptions(cli.Config, d) - if err != nil { - return err - } - routerOptions.api = cli.api - routerOptions.cluster = c - - initRouter(routerOptions) - - // process cluster change notifications - watchCtx, cancel := context.WithCancel(context.Background()) - defer cancel() - go d.ProcessClusterNotifications(watchCtx, c.GetWatchStream()) - - cli.setupConfigReloadTrap() - - // The serve API routine never exits unless an error occurs - // We need to start it as a goroutine and wait on it so - // daemon doesn't exit - serveAPIWait := make(chan error) - go cli.api.Wait(serveAPIWait) - - // after the daemon is done setting up we can notify systemd api - notifySystem() - - // Daemon is fully initialized and handling API traffic - // Wait for serve API to complete - errAPI := <-serveAPIWait - c.Cleanup() - shutdownDaemon(d) - containerdRemote.Cleanup() - if errAPI != nil { - return fmt.Errorf("Shutting down due to ServeAPI error: %v", errAPI) - } - - return nil -} - -type routerOptions struct { - sessionManager *session.Manager - buildBackend *buildbackend.Backend - buildCache *fscache.FSCache - daemon *daemon.Daemon - api *apiserver.Server - cluster *cluster.Cluster -} - -func newRouterOptions(config *config.Config, daemon *daemon.Daemon) (routerOptions, error) { - opts := routerOptions{} - sm, err := session.NewManager() - if err != nil { - return opts, errors.Wrap(err, "failed to create sessionmanager") - } - - builderStateDir := filepath.Join(config.Root, "builder") - - buildCache, err := fscache.NewFSCache(fscache.Opt{ - Backend: fscache.NewNaiveCacheBackend(builderStateDir), - Root: builderStateDir, - GCPolicy: fscache.GCPolicy{ // TODO: expose this in config - MaxSize: 1024 * 1024 * 512, // 512MB - MaxKeepDuration: 7 * 24 * time.Hour, // 1 week - }, - }) - if err != nil { - return opts, errors.Wrap(err, "failed to create fscache") - } - - manager, err := dockerfile.NewBuildManager(daemon.BuilderBackend(), sm, buildCache, daemon.IDMappings()) - if err != nil { - return opts, err - } - - bb, err := buildbackend.NewBackend(daemon.ImageService(), manager, buildCache) - if err != nil { - return opts, errors.Wrap(err, "failed to create buildmanager") - } - - return routerOptions{ - sessionManager: sm, - buildBackend: bb, - buildCache: buildCache, - daemon: daemon, - }, nil -} - -func (cli *DaemonCli) reloadConfig() { - reload := func(c *config.Config) { - - // Revalidate and reload the authorization plugins - if err := validateAuthzPlugins(c.AuthorizationPlugins, cli.d.PluginStore); err != nil { - logrus.Fatalf("Error validating authorization plugin: %v", err) - return - } - cli.authzMiddleware.SetPlugins(c.AuthorizationPlugins) - - // The namespaces com.docker.*, io.docker.*, org.dockerproject.* have been documented - // to be reserved for Docker's internal use, but this was never enforced. Allowing - // configured labels to use these namespaces are deprecated for 18.05. - // - // The following will check the usage of such labels, and report a warning for deprecation. - // - // TODO: At the next stable release, the validation should be folded into the other - // configuration validation functions and an error will be returned instead, and this - // block should be deleted. - if err := config.ValidateReservedNamespaceLabels(c.Labels); err != nil { - logrus.Warnf("Configured labels using reserved namespaces is deprecated: %s", err) - } - - if err := cli.d.Reload(c); err != nil { - logrus.Errorf("Error reconfiguring the daemon: %v", err) - return - } - - if c.IsValueSet("debug") { - debugEnabled := debug.IsEnabled() - switch { - case debugEnabled && !c.Debug: // disable debug - debug.Disable() - case c.Debug && !debugEnabled: // enable debug - debug.Enable() - } - } - } - - if err := config.Reload(*cli.configFile, cli.flags, reload); err != nil { - logrus.Error(err) - } -} - -func (cli *DaemonCli) stop() { - cli.api.Close() -} - -// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case -// d.Shutdown() is waiting too long to kill container or worst it's -// blocked there -func shutdownDaemon(d *daemon.Daemon) { - shutdownTimeout := d.ShutdownTimeout() - ch := make(chan struct{}) - go func() { - d.Shutdown() - close(ch) - }() - if shutdownTimeout < 0 { - <-ch - logrus.Debug("Clean shutdown succeeded") - return - } - select { - case <-ch: - logrus.Debug("Clean shutdown succeeded") - case <-time.After(time.Duration(shutdownTimeout) * time.Second): - logrus.Error("Force shutdown daemon") - } -} - -func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) { - conf := opts.daemonConfig - flags := opts.flags - conf.Debug = opts.Debug - conf.Hosts = opts.Hosts - conf.LogLevel = opts.LogLevel - conf.TLS = opts.TLS - conf.TLSVerify = opts.TLSVerify - conf.CommonTLSOptions = config.CommonTLSOptions{} - - if opts.TLSOptions != nil { - conf.CommonTLSOptions.CAFile = opts.TLSOptions.CAFile - conf.CommonTLSOptions.CertFile = opts.TLSOptions.CertFile - conf.CommonTLSOptions.KeyFile = opts.TLSOptions.KeyFile - } - - if conf.TrustKeyPath == "" { - conf.TrustKeyPath = filepath.Join( - getDaemonConfDir(conf.Root), - defaultTrustKeyFile) - } - - if flags.Changed("graph") && flags.Changed("data-root") { - return nil, fmt.Errorf(`cannot specify both "--graph" and "--data-root" option`) - } - - if opts.configFile != "" { - c, err := config.MergeDaemonConfigurations(conf, flags, opts.configFile) - if err != nil { - if flags.Changed("config-file") || !os.IsNotExist(err) { - return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v", opts.configFile, err) - } - } - // the merged configuration can be nil if the config file didn't exist. - // leave the current configuration as it is if when that happens. - if c != nil { - conf = c - } - } - - if err := config.Validate(conf); err != nil { - return nil, err - } - - if runtime.GOOS != "windows" { - if flags.Changed("disable-legacy-registry") { - // TODO: Remove this error after 3 release cycles (18.03) - return nil, errors.New("ERROR: The '--disable-legacy-registry' flag has been removed. Interacting with legacy (v1) registries is no longer supported") - } - if !conf.V2Only { - // TODO: Remove this error after 3 release cycles (18.03) - return nil, errors.New("ERROR: The 'disable-legacy-registry' configuration option has been removed. Interacting with legacy (v1) registries is no longer supported") - } - } - - if flags.Changed("graph") { - logrus.Warnf(`The "-g / --graph" flag is deprecated. Please use "--data-root" instead`) - } - - // Check if duplicate label-keys with different values are found - newLabels, err := config.GetConflictFreeLabels(conf.Labels) - if err != nil { - return nil, err - } - // The namespaces com.docker.*, io.docker.*, org.dockerproject.* have been documented - // to be reserved for Docker's internal use, but this was never enforced. Allowing - // configured labels to use these namespaces are deprecated for 18.05. - // - // The following will check the usage of such labels, and report a warning for deprecation. - // - // TODO: At the next stable release, the validation should be folded into the other - // configuration validation functions and an error will be returned instead, and this - // block should be deleted. - if err := config.ValidateReservedNamespaceLabels(newLabels); err != nil { - logrus.Warnf("Configured labels using reserved namespaces is deprecated: %s", err) - } - conf.Labels = newLabels - - // Regardless of whether the user sets it to true or false, if they - // specify TLSVerify at all then we need to turn on TLS - if conf.IsValueSet(FlagTLSVerify) { - conf.TLS = true - } - - // ensure that the log level is the one set after merging configurations - setLogLevel(conf.LogLevel) - - return conf, nil -} - -func initRouter(opts routerOptions) { - decoder := runconfig.ContainerDecoder{} - - routers := []router.Router{ - // we need to add the checkpoint router before the container router or the DELETE gets masked - checkpointrouter.NewRouter(opts.daemon, decoder), - container.NewRouter(opts.daemon, decoder), - image.NewRouter(opts.daemon.ImageService()), - systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildCache), - volume.NewRouter(opts.daemon.VolumesService()), - build.NewRouter(opts.buildBackend, opts.daemon), - sessionrouter.NewRouter(opts.sessionManager), - swarmrouter.NewRouter(opts.cluster), - pluginrouter.NewRouter(opts.daemon.PluginManager()), - distributionrouter.NewRouter(opts.daemon.ImageService()), - } - - if opts.daemon.NetworkControllerEnabled() { - routers = append(routers, network.NewRouter(opts.daemon, opts.cluster)) - } - - if opts.daemon.HasExperimental() { - for _, r := range routers { - for _, route := range r.Routes() { - if experimental, ok := route.(router.ExperimentalRoute); ok { - experimental.Enable() - } - } - } - } - - opts.api.InitRouter(routers...) -} - -// TODO: remove this from cli and return the authzMiddleware -func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore plugingetter.PluginGetter) error { - v := cfg.Version - - exp := middleware.NewExperimentalMiddleware(cli.Config.Experimental) - s.UseMiddleware(exp) - - vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) - s.UseMiddleware(vm) - - if cfg.CorsHeaders != "" { - c := middleware.NewCORSMiddleware(cfg.CorsHeaders) - s.UseMiddleware(c) - } - - cli.authzMiddleware = authorization.NewMiddleware(cli.Config.AuthorizationPlugins, pluginStore) - cli.Config.AuthzMiddleware = cli.authzMiddleware - s.UseMiddleware(cli.authzMiddleware) - return nil -} - -func (cli *DaemonCli) getRemoteOptions() ([]libcontainerd.RemoteOption, error) { - opts := []libcontainerd.RemoteOption{} - - pOpts, err := cli.getPlatformRemoteOptions() - if err != nil { - return nil, err - } - opts = append(opts, pOpts...) - return opts, nil -} - -func newAPIServerConfig(cli *DaemonCli) (*apiserver.Config, error) { - serverConfig := &apiserver.Config{ - Logging: true, - SocketGroup: cli.Config.SocketGroup, - Version: dockerversion.Version, - CorsHeaders: cli.Config.CorsHeaders, - } - - if cli.Config.TLS { - tlsOptions := tlsconfig.Options{ - CAFile: cli.Config.CommonTLSOptions.CAFile, - CertFile: cli.Config.CommonTLSOptions.CertFile, - KeyFile: cli.Config.CommonTLSOptions.KeyFile, - ExclusiveRootPools: true, - } - - if cli.Config.TLSVerify { - // server requires and verifies client's certificate - tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert - } - tlsConfig, err := tlsconfig.Server(tlsOptions) - if err != nil { - return nil, err - } - serverConfig.TLSConfig = tlsConfig - } - - if len(cli.Config.Hosts) == 0 { - cli.Config.Hosts = make([]string, 1) - } - - return serverConfig, nil -} - -func loadListeners(cli *DaemonCli, serverConfig *apiserver.Config) ([]string, error) { - var hosts []string - for i := 0; i < len(cli.Config.Hosts); i++ { - var err error - if cli.Config.Hosts[i], err = dopts.ParseHost(cli.Config.TLS, cli.Config.Hosts[i]); err != nil { - return nil, fmt.Errorf("error parsing -H %s : %v", cli.Config.Hosts[i], err) - } - - protoAddr := cli.Config.Hosts[i] - protoAddrParts := strings.SplitN(protoAddr, "://", 2) - if len(protoAddrParts) != 2 { - return nil, fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) - } - - proto := protoAddrParts[0] - addr := protoAddrParts[1] - - // It's a bad idea to bind to TCP without tlsverify. - if proto == "tcp" && (serverConfig.TLSConfig == nil || serverConfig.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert) { - logrus.Warn("[!] DON'T BIND ON ANY IP ADDRESS WITHOUT setting --tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING [!]") - } - ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) - if err != nil { - return nil, err - } - ls = wrapListeners(proto, ls) - // If we're binding to a TCP port, make sure that a container doesn't try to use it. - if proto == "tcp" { - if err := allocateDaemonPort(addr); err != nil { - return nil, err - } - } - logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr) - hosts = append(hosts, protoAddrParts[1]) - cli.api.Accept(addr, ls...) - } - - return hosts, nil -} - -func createAndStartCluster(cli *DaemonCli, d *daemon.Daemon) (*cluster.Cluster, error) { - name, _ := os.Hostname() - - // Use a buffered channel to pass changes from store watch API to daemon - // A buffer allows store watch API and daemon processing to not wait for each other - watchStream := make(chan *swarmapi.WatchMessage, 32) - - c, err := cluster.New(cluster.Config{ - Root: cli.Config.Root, - Name: name, - Backend: d, - VolumeBackend: d.VolumesService(), - ImageBackend: d.ImageService(), - PluginBackend: d.PluginManager(), - NetworkSubnetsProvider: d, - DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, - RaftHeartbeatTick: cli.Config.SwarmRaftHeartbeatTick, - RaftElectionTick: cli.Config.SwarmRaftElectionTick, - RuntimeRoot: cli.getSwarmRunRoot(), - WatchStream: watchStream, - }) - if err != nil { - return nil, err - } - d.SetCluster(c) - err = c.Start() - - return c, err -} - -// validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver -// plugins present on the host and available to the daemon -func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error { - for _, reqPlugin := range requestedPlugins { - if _, err := pg.Get(reqPlugin, authorization.AuthZApiImplements, plugingetter.Lookup); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go deleted file mode 100644 index 6d013b810..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon_freebsd.go +++ /dev/null @@ -1,9 +0,0 @@ -package main - -// preNotifySystem sends a message to the host when the API is active, but before the daemon is -func preNotifySystem() { -} - -// notifySystem sends a message to the host when the server is ready to be used -func notifySystem() { -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go deleted file mode 100644 index cf2d65275..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon_linux.go +++ /dev/null @@ -1,13 +0,0 @@ -package main - -import systemdDaemon "github.com/coreos/go-systemd/daemon" - -// preNotifySystem sends a message to the host when the API is active, but before the daemon is -func preNotifySystem() { -} - -// notifySystem sends a message to the host when the server is ready to be used -func notifySystem() { - // Tell the init daemon we are accepting requests - go systemdDaemon.SdNotify(false, systemdDaemon.SdNotifyReady) -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go deleted file mode 100644 index 2561baa77..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon_unix.go +++ /dev/null @@ -1,117 +0,0 @@ -// +build !windows - -package main - -import ( - "fmt" - "net" - "os" - "os/signal" - "path/filepath" - "strconv" - - "github.com/containerd/containerd/runtime/linux" - "github.com/docker/docker/cmd/dockerd/hack" - "github.com/docker/docker/daemon" - "github.com/docker/docker/libcontainerd" - "github.com/docker/libnetwork/portallocator" - "golang.org/x/sys/unix" -) - -const defaultDaemonConfigFile = "/etc/docker/daemon.json" - -// setDefaultUmask sets the umask to 0022 to avoid problems -// caused by custom umask -func setDefaultUmask() error { - desiredUmask := 0022 - unix.Umask(desiredUmask) - if umask := unix.Umask(desiredUmask); umask != desiredUmask { - return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) - } - - return nil -} - -func getDaemonConfDir(_ string) string { - return "/etc/docker" -} - -func (cli *DaemonCli) getPlatformRemoteOptions() ([]libcontainerd.RemoteOption, error) { - opts := []libcontainerd.RemoteOption{ - libcontainerd.WithOOMScore(cli.Config.OOMScoreAdjust), - libcontainerd.WithPlugin("linux", &linux.Config{ - Shim: daemon.DefaultShimBinary, - Runtime: daemon.DefaultRuntimeBinary, - RuntimeRoot: filepath.Join(cli.Config.Root, "runc"), - ShimDebug: cli.Config.Debug, - }), - } - if cli.Config.Debug { - opts = append(opts, libcontainerd.WithLogLevel("debug")) - } - if cli.Config.ContainerdAddr != "" { - opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) - } else { - opts = append(opts, libcontainerd.WithStartDaemon(true)) - } - - return opts, nil -} - -// setupConfigReloadTrap configures the USR2 signal to reload the configuration. -func (cli *DaemonCli) setupConfigReloadTrap() { - c := make(chan os.Signal, 1) - signal.Notify(c, unix.SIGHUP) - go func() { - for range c { - cli.reloadConfig() - } - }() -} - -// getSwarmRunRoot gets the root directory for swarm to store runtime state -// For example, the control socket -func (cli *DaemonCli) getSwarmRunRoot() string { - return filepath.Join(cli.Config.ExecRoot, "swarm") -} - -// allocateDaemonPort ensures that there are no containers -// that try to use any port allocated for the docker server. -func allocateDaemonPort(addr string) error { - host, port, err := net.SplitHostPort(addr) - if err != nil { - return err - } - - intPort, err := strconv.Atoi(port) - if err != nil { - return err - } - - var hostIPs []net.IP - if parsedIP := net.ParseIP(host); parsedIP != nil { - hostIPs = append(hostIPs, parsedIP) - } else if hostIPs, err = net.LookupIP(host); err != nil { - return fmt.Errorf("failed to lookup %s address in host specification", host) - } - - pa := portallocator.Get() - for _, hostIP := range hostIPs { - if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil { - return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) - } - } - return nil -} - -func wrapListeners(proto string, ls []net.Listener) []net.Listener { - switch proto { - case "unix": - ls[0] = &hack.MalformedHostHeaderOverride{Listener: ls[0]} - case "fd": - for i := range ls { - ls[i] = &hack.MalformedHostHeaderOverride{Listener: ls[i]} - } - } - return ls -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go b/vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go deleted file mode 100644 index 224c50945..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/daemon_windows.go +++ /dev/null @@ -1,85 +0,0 @@ -package main - -import ( - "fmt" - "net" - "os" - "path/filepath" - - "github.com/docker/docker/libcontainerd" - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -var defaultDaemonConfigFile = "" - -// setDefaultUmask doesn't do anything on windows -func setDefaultUmask() error { - return nil -} - -func getDaemonConfDir(root string) string { - return filepath.Join(root, `\config`) -} - -// preNotifySystem sends a message to the host when the API is active, but before the daemon is -func preNotifySystem() { - // start the service now to prevent timeouts waiting for daemon to start - // but still (eventually) complete all requests that are sent after this - if service != nil { - err := service.started() - if err != nil { - logrus.Fatal(err) - } - } -} - -// notifySystem sends a message to the host when the server is ready to be used -func notifySystem() { -} - -// notifyShutdown is called after the daemon shuts down but before the process exits. -func notifyShutdown(err error) { - if service != nil { - if err != nil { - logrus.Fatal(err) - } - service.stopped(err) - } -} - -func (cli *DaemonCli) getPlatformRemoteOptions() ([]libcontainerd.RemoteOption, error) { - return nil, nil -} - -// setupConfigReloadTrap configures a Win32 event to reload the configuration. -func (cli *DaemonCli) setupConfigReloadTrap() { - go func() { - sa := windows.SecurityAttributes{ - Length: 0, - } - event := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid()) - ev, _ := windows.UTF16PtrFromString(event) - if h, _ := windows.CreateEvent(&sa, 0, 0, ev); h != 0 { - logrus.Debugf("Config reload - waiting signal at %s", event) - for { - windows.WaitForSingleObject(h, windows.INFINITE) - cli.reloadConfig() - } - } - }() -} - -// getSwarmRunRoot gets the root directory for swarm to store runtime state -// For example, the control socket -func (cli *DaemonCli) getSwarmRunRoot() string { - return "" -} - -func allocateDaemonPort(addr string) error { - return nil -} - -func wrapListeners(proto string, ls []net.Listener) []net.Listener { - return ls -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/docker.go b/vendor/github.com/docker/docker/cmd/dockerd/docker.go deleted file mode 100644 index 463482e93..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/docker.go +++ /dev/null @@ -1,67 +0,0 @@ -package main - -import ( - "fmt" - "os" - "runtime" - - "github.com/docker/docker/cli" - "github.com/docker/docker/daemon/config" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/docker/pkg/term" - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" -) - -func newDaemonCommand() *cobra.Command { - opts := newDaemonOptions(config.New()) - - cmd := &cobra.Command{ - Use: "dockerd [OPTIONS]", - Short: "A self-sufficient runtime for containers.", - SilenceUsage: true, - SilenceErrors: true, - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - opts.flags = cmd.Flags() - return runDaemon(opts) - }, - DisableFlagsInUseLine: true, - Version: fmt.Sprintf("%s, build %s", dockerversion.Version, dockerversion.GitCommit), - } - cli.SetupRootCommand(cmd) - - flags := cmd.Flags() - flags.BoolP("version", "v", false, "Print version information and quit") - flags.StringVar(&opts.configFile, "config-file", defaultDaemonConfigFile, "Daemon configuration file") - opts.InstallFlags(flags) - installConfigFlags(opts.daemonConfig, flags) - installServiceFlags(flags) - - return cmd -} - -func main() { - if reexec.Init() { - return - } - - // Set terminal emulation based on platform as required. - _, stdout, stderr := term.StdStreams() - - // @jhowardmsft - maybe there is a historic reason why on non-Windows, stderr is used - // here. However, on Windows it makes no sense and there is no need. - if runtime.GOOS == "windows" { - logrus.SetOutput(stdout) - } else { - logrus.SetOutput(stderr) - } - - cmd := newDaemonCommand() - cmd.SetOutput(stdout) - if err := cmd.Execute(); err != nil { - fmt.Fprintf(stderr, "%s\n", err) - os.Exit(1) - } -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/docker_unix.go b/vendor/github.com/docker/docker/cmd/dockerd/docker_unix.go deleted file mode 100644 index 0dec48663..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/docker_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package main - -func runDaemon(opts *daemonOptions) error { - daemonCli := NewDaemonCli() - return daemonCli.start(opts) -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go b/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go deleted file mode 100644 index bd8bc5a58..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/docker_windows.go +++ /dev/null @@ -1,38 +0,0 @@ -package main - -import ( - "path/filepath" - - _ "github.com/docker/docker/autogen/winresources/dockerd" - "github.com/sirupsen/logrus" -) - -func runDaemon(opts *daemonOptions) error { - daemonCli := NewDaemonCli() - - // On Windows, this may be launching as a service or with an option to - // register the service. - stop, runAsService, err := initService(daemonCli) - if err != nil { - logrus.Fatal(err) - } - - if stop { - return nil - } - - // Windows specific settings as these are not defaulted. - if opts.configFile == "" { - opts.configFile = filepath.Join(opts.daemonConfig.Root, `config\daemon.json`) - } - if runAsService { - // If Windows SCM manages the service - no need for PID files - opts.daemonConfig.Pidfile = "" - } else if opts.daemonConfig.Pidfile == "" { - opts.daemonConfig.Pidfile = filepath.Join(opts.daemonConfig.Root, "docker.pid") - } - - err = daemonCli.start(opts) - notifyShutdown(err) - return err -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go b/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go deleted file mode 100644 index ddd5eb9d8..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/hack/malformed_host_override.go +++ /dev/null @@ -1,121 +0,0 @@ -// +build !windows - -package hack // import "github.com/docker/docker/cmd/dockerd/hack" - -import "net" - -// MalformedHostHeaderOverride is a wrapper to be able -// to overcome the 400 Bad request coming from old docker -// clients that send an invalid Host header. -type MalformedHostHeaderOverride struct { - net.Listener -} - -// MalformedHostHeaderOverrideConn wraps the underlying unix -// connection and keeps track of the first read from http.Server -// which just reads the headers. -type MalformedHostHeaderOverrideConn struct { - net.Conn - first bool -} - -var closeConnHeader = []byte("\r\nConnection: close\r") - -// Read reads the first *read* request from http.Server to inspect -// the Host header. If the Host starts with / then we're talking to -// an old docker client which send an invalid Host header. To not -// error out in http.Server we rewrite the first bytes of the request -// to sanitize the Host header itself. -// In case we're not dealing with old docker clients the data is just passed -// to the server w/o modification. -func (l *MalformedHostHeaderOverrideConn) Read(b []byte) (n int, err error) { - // http.Server uses a 4k buffer - if l.first && len(b) == 4096 { - // This keeps track of the first read from http.Server which just reads - // the headers - l.first = false - // The first read of the connection by http.Server is done limited to - // DefaultMaxHeaderBytes (usually 1 << 20) + 4096. - // Here we do the first read which gets us all the http headers to - // be inspected and modified below. - c, err := l.Conn.Read(b) - if err != nil { - return c, err - } - - var ( - start, end int - firstLineFeed = -1 - buf []byte - ) - for i := 0; i <= c-1-7; i++ { - if b[i] == '\n' && firstLineFeed == -1 { - firstLineFeed = i - } - if b[i] != '\n' { - continue - } - - if b[i+1] == '\r' && b[i+2] == '\n' { - return c, nil - } - - if b[i+1] != 'H' { - continue - } - if b[i+2] != 'o' { - continue - } - if b[i+3] != 's' { - continue - } - if b[i+4] != 't' { - continue - } - if b[i+5] != ':' { - continue - } - if b[i+6] != ' ' { - continue - } - if b[i+7] != '/' { - continue - } - // ensure clients other than the docker clients do not get this hack - if i != firstLineFeed { - return c, nil - } - start = i + 7 - // now find where the value ends - for ii, bbb := range b[start:c] { - if bbb == '\n' { - end = start + ii - break - } - } - buf = make([]byte, 0, c+len(closeConnHeader)-(end-start)) - // strip the value of the host header and - // inject `Connection: close` to ensure we don't reuse this connection - buf = append(buf, b[:start]...) - buf = append(buf, closeConnHeader...) - buf = append(buf, b[end:c]...) - copy(b, buf) - break - } - if len(buf) == 0 { - return c, nil - } - return len(buf), nil - } - return l.Conn.Read(b) -} - -// Accept makes the listener accepts connections and wraps the connection -// in a MalformedHostHeaderOverrideConn initializing first to true. -func (l *MalformedHostHeaderOverride) Accept() (net.Conn, error) { - c, err := l.Listener.Accept() - if err != nil { - return c, err - } - return &MalformedHostHeaderOverrideConn{c, true}, nil -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/metrics.go b/vendor/github.com/docker/docker/cmd/dockerd/metrics.go deleted file mode 100644 index 20ceaf846..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/metrics.go +++ /dev/null @@ -1,27 +0,0 @@ -package main - -import ( - "net" - "net/http" - - "github.com/docker/go-metrics" - "github.com/sirupsen/logrus" -) - -func startMetricsServer(addr string) error { - if err := allocateDaemonPort(addr); err != nil { - return err - } - l, err := net.Listen("tcp", addr) - if err != nil { - return err - } - mux := http.NewServeMux() - mux.Handle("/metrics", metrics.Handler()) - go func() { - if err := http.Serve(l, mux); err != nil { - logrus.Errorf("serve metrics api: %s", err) - } - }() - return nil -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/options.go b/vendor/github.com/docker/docker/cmd/dockerd/options.go deleted file mode 100644 index a6276add5..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/options.go +++ /dev/null @@ -1,122 +0,0 @@ -package main - -import ( - "fmt" - "os" - "path/filepath" - - cliconfig "github.com/docker/docker/cli/config" - "github.com/docker/docker/daemon/config" - "github.com/docker/docker/opts" - "github.com/docker/go-connections/tlsconfig" - "github.com/sirupsen/logrus" - "github.com/spf13/pflag" -) - -const ( - // DefaultCaFile is the default filename for the CA pem file - DefaultCaFile = "ca.pem" - // DefaultKeyFile is the default filename for the key pem file - DefaultKeyFile = "key.pem" - // DefaultCertFile is the default filename for the cert pem file - DefaultCertFile = "cert.pem" - // FlagTLSVerify is the flag name for the TLS verification option - FlagTLSVerify = "tlsverify" -) - -var ( - dockerCertPath = os.Getenv("DOCKER_CERT_PATH") - dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" -) - -type daemonOptions struct { - configFile string - daemonConfig *config.Config - flags *pflag.FlagSet - Debug bool - Hosts []string - LogLevel string - TLS bool - TLSVerify bool - TLSOptions *tlsconfig.Options -} - -// newDaemonOptions returns a new daemonFlags -func newDaemonOptions(config *config.Config) *daemonOptions { - return &daemonOptions{ - daemonConfig: config, - } -} - -// InstallFlags adds flags for the common options on the FlagSet -func (o *daemonOptions) InstallFlags(flags *pflag.FlagSet) { - if dockerCertPath == "" { - dockerCertPath = cliconfig.Dir() - } - - flags.BoolVarP(&o.Debug, "debug", "D", false, "Enable debug mode") - flags.StringVarP(&o.LogLevel, "log-level", "l", "info", `Set the logging level ("debug"|"info"|"warn"|"error"|"fatal")`) - flags.BoolVar(&o.TLS, "tls", false, "Use TLS; implied by --tlsverify") - flags.BoolVar(&o.TLSVerify, FlagTLSVerify, dockerTLSVerify, "Use TLS and verify the remote") - - // TODO use flag flags.String("identity"}, "i", "", "Path to libtrust key file") - - o.TLSOptions = &tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, DefaultCaFile), - CertFile: filepath.Join(dockerCertPath, DefaultCertFile), - KeyFile: filepath.Join(dockerCertPath, DefaultKeyFile), - } - tlsOptions := o.TLSOptions - flags.Var(opts.NewQuotedString(&tlsOptions.CAFile), "tlscacert", "Trust certs signed only by this CA") - flags.Var(opts.NewQuotedString(&tlsOptions.CertFile), "tlscert", "Path to TLS certificate file") - flags.Var(opts.NewQuotedString(&tlsOptions.KeyFile), "tlskey", "Path to TLS key file") - - hostOpt := opts.NewNamedListOptsRef("hosts", &o.Hosts, opts.ValidateHost) - flags.VarP(hostOpt, "host", "H", "Daemon socket(s) to connect to") -} - -// SetDefaultOptions sets default values for options after flag parsing is -// complete -func (o *daemonOptions) SetDefaultOptions(flags *pflag.FlagSet) { - // Regardless of whether the user sets it to true or false, if they - // specify --tlsverify at all then we need to turn on TLS - // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need - // to check that here as well - if flags.Changed(FlagTLSVerify) || o.TLSVerify { - o.TLS = true - } - - if !o.TLS { - o.TLSOptions = nil - } else { - tlsOptions := o.TLSOptions - tlsOptions.InsecureSkipVerify = !o.TLSVerify - - // Reset CertFile and KeyFile to empty string if the user did not specify - // the respective flags and the respective default files were not found. - if !flags.Changed("tlscert") { - if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { - tlsOptions.CertFile = "" - } - } - if !flags.Changed("tlskey") { - if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { - tlsOptions.KeyFile = "" - } - } - } -} - -// setLogLevel sets the logrus logging level -func setLogLevel(logLevel string) { - if logLevel != "" { - lvl, err := logrus.ParseLevel(logLevel) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) - os.Exit(1) - } - logrus.SetLevel(lvl) - } else { - logrus.SetLevel(logrus.InfoLevel) - } -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go b/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go deleted file mode 100644 index bbcb7f3f3..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/service_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package main - -import ( - "github.com/spf13/pflag" -) - -func installServiceFlags(flags *pflag.FlagSet) { -} diff --git a/vendor/github.com/docker/docker/cmd/dockerd/service_windows.go b/vendor/github.com/docker/docker/cmd/dockerd/service_windows.go deleted file mode 100644 index 00432af64..000000000 --- a/vendor/github.com/docker/docker/cmd/dockerd/service_windows.go +++ /dev/null @@ -1,430 +0,0 @@ -package main - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "log" - "os" - "os/exec" - "path/filepath" - "time" - "unsafe" - - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" - "github.com/spf13/pflag" - "golang.org/x/sys/windows" - "golang.org/x/sys/windows/svc" - "golang.org/x/sys/windows/svc/debug" - "golang.org/x/sys/windows/svc/eventlog" - "golang.org/x/sys/windows/svc/mgr" -) - -var ( - flServiceName *string - flRegisterService *bool - flUnregisterService *bool - flRunService *bool - - setStdHandle = windows.NewLazySystemDLL("kernel32.dll").NewProc("SetStdHandle") - oldStderr windows.Handle - panicFile *os.File - - service *handler -) - -const ( - // These should match the values in event_messages.mc. - eventInfo = 1 - eventWarn = 1 - eventError = 1 - eventDebug = 2 - eventPanic = 3 - eventFatal = 4 - - eventExtraOffset = 10 // Add this to any event to get a string that supports extended data -) - -func installServiceFlags(flags *pflag.FlagSet) { - flServiceName = flags.String("service-name", "docker", "Set the Windows service name") - flRegisterService = flags.Bool("register-service", false, "Register the service and exit") - flUnregisterService = flags.Bool("unregister-service", false, "Unregister the service and exit") - flRunService = flags.Bool("run-service", false, "") - flags.MarkHidden("run-service") -} - -type handler struct { - tosvc chan bool - fromsvc chan error - daemonCli *DaemonCli -} - -type etwHook struct { - log *eventlog.Log -} - -func (h *etwHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, - logrus.DebugLevel, - } -} - -func (h *etwHook) Fire(e *logrus.Entry) error { - var ( - etype uint16 - eid uint32 - ) - - switch e.Level { - case logrus.PanicLevel: - etype = windows.EVENTLOG_ERROR_TYPE - eid = eventPanic - case logrus.FatalLevel: - etype = windows.EVENTLOG_ERROR_TYPE - eid = eventFatal - case logrus.ErrorLevel: - etype = windows.EVENTLOG_ERROR_TYPE - eid = eventError - case logrus.WarnLevel: - etype = windows.EVENTLOG_WARNING_TYPE - eid = eventWarn - case logrus.InfoLevel: - etype = windows.EVENTLOG_INFORMATION_TYPE - eid = eventInfo - case logrus.DebugLevel: - etype = windows.EVENTLOG_INFORMATION_TYPE - eid = eventDebug - default: - return errors.New("unknown level") - } - - // If there is additional data, include it as a second string. - exts := "" - if len(e.Data) > 0 { - fs := bytes.Buffer{} - for k, v := range e.Data { - fs.WriteString(k) - fs.WriteByte('=') - fmt.Fprint(&fs, v) - fs.WriteByte(' ') - } - - exts = fs.String()[:fs.Len()-1] - eid += eventExtraOffset - } - - if h.log == nil { - fmt.Fprintf(os.Stderr, "%s [%s]\n", e.Message, exts) - return nil - } - - var ( - ss [2]*uint16 - err error - ) - - ss[0], err = windows.UTF16PtrFromString(e.Message) - if err != nil { - return err - } - - count := uint16(1) - if exts != "" { - ss[1], err = windows.UTF16PtrFromString(exts) - if err != nil { - return err - } - - count++ - } - - return windows.ReportEvent(h.log.Handle, etype, 0, eid, 0, count, 0, &ss[0], nil) -} - -func getServicePath() (string, error) { - p, err := exec.LookPath(os.Args[0]) - if err != nil { - return "", err - } - return filepath.Abs(p) -} - -func registerService() error { - p, err := getServicePath() - if err != nil { - return err - } - m, err := mgr.Connect() - if err != nil { - return err - } - defer m.Disconnect() - - depends := []string{} - - // This dependency is required on build 14393 (RS1) - // it is added to the platform in newer builds - if system.GetOSVersion().Build == 14393 { - depends = append(depends, "ConDrv") - } - - c := mgr.Config{ - ServiceType: windows.SERVICE_WIN32_OWN_PROCESS, - StartType: mgr.StartAutomatic, - ErrorControl: mgr.ErrorNormal, - Dependencies: depends, - DisplayName: "Docker Engine", - } - - // Configure the service to launch with the arguments that were just passed. - args := []string{"--run-service"} - for _, a := range os.Args[1:] { - if a != "--register-service" && a != "--unregister-service" { - args = append(args, a) - } - } - - s, err := m.CreateService(*flServiceName, p, c, args...) - if err != nil { - return err - } - defer s.Close() - - // See http://stackoverflow.com/questions/35151052/how-do-i-configure-failure-actions-of-a-windows-service-written-in-go - const ( - scActionNone = 0 - scActionRestart = 1 - scActionReboot = 2 - scActionRunCommand = 3 - - serviceConfigFailureActions = 2 - ) - - type serviceFailureActions struct { - ResetPeriod uint32 - RebootMsg *uint16 - Command *uint16 - ActionsCount uint32 - Actions uintptr - } - - type scAction struct { - Type uint32 - Delay uint32 - } - t := []scAction{ - {Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)}, - {Type: scActionRestart, Delay: uint32(60 * time.Second / time.Millisecond)}, - {Type: scActionNone}, - } - lpInfo := serviceFailureActions{ResetPeriod: uint32(24 * time.Hour / time.Second), ActionsCount: uint32(3), Actions: uintptr(unsafe.Pointer(&t[0]))} - err = windows.ChangeServiceConfig2(s.Handle, serviceConfigFailureActions, (*byte)(unsafe.Pointer(&lpInfo))) - if err != nil { - return err - } - - return eventlog.Install(*flServiceName, p, false, eventlog.Info|eventlog.Warning|eventlog.Error) -} - -func unregisterService() error { - m, err := mgr.Connect() - if err != nil { - return err - } - defer m.Disconnect() - - s, err := m.OpenService(*flServiceName) - if err != nil { - return err - } - defer s.Close() - - eventlog.Remove(*flServiceName) - err = s.Delete() - if err != nil { - return err - } - return nil -} - -// initService is the entry point for running the daemon as a Windows -// service. It returns an indication to stop (if registering/un-registering); -// an indication of whether it is running as a service; and an error. -func initService(daemonCli *DaemonCli) (bool, bool, error) { - if *flUnregisterService { - if *flRegisterService { - return true, false, errors.New("--register-service and --unregister-service cannot be used together") - } - return true, false, unregisterService() - } - - if *flRegisterService { - return true, false, registerService() - } - - if !*flRunService { - return false, false, nil - } - - interactive, err := svc.IsAnInteractiveSession() - if err != nil { - return false, false, err - } - - h := &handler{ - tosvc: make(chan bool), - fromsvc: make(chan error), - daemonCli: daemonCli, - } - - var log *eventlog.Log - if !interactive { - log, err = eventlog.Open(*flServiceName) - if err != nil { - return false, false, err - } - } - - logrus.AddHook(&etwHook{log}) - logrus.SetOutput(ioutil.Discard) - - service = h - go func() { - if interactive { - err = debug.Run(*flServiceName, h) - } else { - err = svc.Run(*flServiceName, h) - } - - h.fromsvc <- err - }() - - // Wait for the first signal from the service handler. - err = <-h.fromsvc - if err != nil { - return false, false, err - } - return false, true, nil -} - -func (h *handler) started() error { - // This must be delayed until daemonCli initializes Config.Root - err := initPanicFile(filepath.Join(h.daemonCli.Config.Root, "panic.log")) - if err != nil { - return err - } - - h.tosvc <- false - return nil -} - -func (h *handler) stopped(err error) { - logrus.Debugf("Stopping service: %v", err) - h.tosvc <- err != nil - <-h.fromsvc -} - -func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (bool, uint32) { - s <- svc.Status{State: svc.StartPending, Accepts: 0} - // Unblock initService() - h.fromsvc <- nil - - // Wait for initialization to complete. - failed := <-h.tosvc - if failed { - logrus.Debug("Aborting service start due to failure during initialization") - return true, 1 - } - - s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)} - logrus.Debug("Service running") -Loop: - for { - select { - case failed = <-h.tosvc: - break Loop - case c := <-r: - switch c.Cmd { - case svc.Cmd(windows.SERVICE_CONTROL_PARAMCHANGE): - h.daemonCli.reloadConfig() - case svc.Interrogate: - s <- c.CurrentStatus - case svc.Stop, svc.Shutdown: - s <- svc.Status{State: svc.StopPending, Accepts: 0} - h.daemonCli.stop() - } - } - } - - removePanicFile() - if failed { - return true, 1 - } - return false, 0 -} - -func initPanicFile(path string) error { - var err error - panicFile, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0) - if err != nil { - return err - } - - st, err := panicFile.Stat() - if err != nil { - return err - } - - // If there are contents in the file already, move the file out of the way - // and replace it. - if st.Size() > 0 { - panicFile.Close() - os.Rename(path, path+".old") - panicFile, err = os.Create(path) - if err != nil { - return err - } - } - - // Update STD_ERROR_HANDLE to point to the panic file so that Go writes to - // it when it panics. Remember the old stderr to restore it before removing - // the panic file. - sh := windows.STD_ERROR_HANDLE - h, err := windows.GetStdHandle(uint32(sh)) - if err != nil { - return err - } - - oldStderr = h - - r, _, err := setStdHandle.Call(uintptr(sh), uintptr(panicFile.Fd())) - if r == 0 && err != nil { - return err - } - - // Reset os.Stderr to the panic file (so fmt.Fprintf(os.Stderr,...) actually gets redirected) - os.Stderr = os.NewFile(uintptr(panicFile.Fd()), "/dev/stderr") - - // Force threads that panic to write to stderr (the panicFile handle now), otherwise it will go into the ether - log.SetOutput(os.Stderr) - - return nil -} - -func removePanicFile() { - if st, err := panicFile.Stat(); err == nil { - if st.Size() == 0 { - sh := windows.STD_ERROR_HANDLE - setStdHandle.Call(uintptr(sh), uintptr(oldStderr)) - panicFile.Close() - os.Remove(panicFile.Name()) - } - } -} diff --git a/vendor/github.com/docker/docker/container/archive.go b/vendor/github.com/docker/docker/container/archive.go deleted file mode 100644 index ed72c4a40..000000000 --- a/vendor/github.com/docker/docker/container/archive.go +++ /dev/null @@ -1,86 +0,0 @@ -package container // import "github.com/docker/docker/container" - -import ( - "os" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" -) - -// ResolvePath resolves the given path in the container to a resource on the -// host. Returns a resolved path (absolute path to the resource on the host), -// the absolute path to the resource relative to the container's rootfs, and -// an error if the path points to outside the container's rootfs. -func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { - if container.BaseFS == nil { - return "", "", errors.New("ResolvePath: BaseFS of container " + container.ID + " is unexpectedly nil") - } - // Check if a drive letter supplied, it must be the system drive. No-op except on Windows - path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, container.BaseFS) - if err != nil { - return "", "", err - } - - // Consider the given path as an absolute path in the container. - absPath = archive.PreserveTrailingDotOrSeparator( - container.BaseFS.Join(string(container.BaseFS.Separator()), path), - path, - container.BaseFS.Separator()) - - // Split the absPath into its Directory and Base components. We will - // resolve the dir in the scope of the container then append the base. - dirPath, basePath := container.BaseFS.Split(absPath) - - resolvedDirPath, err := container.GetResourcePath(dirPath) - if err != nil { - return "", "", err - } - - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath = resolvedDirPath + string(container.BaseFS.Separator()) + basePath - return resolvedPath, absPath, nil -} - -// StatPath is the unexported version of StatPath. Locks and mounts should -// be acquired before calling this method and the given path should be fully -// resolved to a path on the host corresponding to the given absolute path -// inside the container. -func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { - if container.BaseFS == nil { - return nil, errors.New("StatPath: BaseFS of container " + container.ID + " is unexpectedly nil") - } - driver := container.BaseFS - - lstat, err := driver.Lstat(resolvedPath) - if err != nil { - return nil, err - } - - var linkTarget string - if lstat.Mode()&os.ModeSymlink != 0 { - // Fully evaluate the symlink in the scope of the container rootfs. - hostPath, err := container.GetResourcePath(absPath) - if err != nil { - return nil, err - } - - linkTarget, err = driver.Rel(driver.Path(), hostPath) - if err != nil { - return nil, err - } - - // Make it an absolute path. - linkTarget = driver.Join(string(driver.Separator()), linkTarget) - } - - return &types.ContainerPathStat{ - Name: driver.Base(absPath), - Size: lstat.Size(), - Mode: lstat.Mode(), - Mtime: lstat.ModTime(), - LinkTarget: linkTarget, - }, nil -} diff --git a/vendor/github.com/docker/docker/container/container.go b/vendor/github.com/docker/docker/container/container.go deleted file mode 100644 index 5f31d8df1..000000000 --- a/vendor/github.com/docker/docker/container/container.go +++ /dev/null @@ -1,720 +0,0 @@ -package container // import "github.com/docker/docker/container" - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - "sync" - "syscall" - "time" - - "github.com/containerd/containerd/cio" - containertypes "github.com/docker/docker/api/types/container" - mounttypes "github.com/docker/docker/api/types/mount" - swarmtypes "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/container/stream" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/jsonfilelog" - "github.com/docker/docker/daemon/network" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/restartmanager" - "github.com/docker/docker/volume" - volumemounts "github.com/docker/docker/volume/mounts" - "github.com/docker/go-units" - agentexec "github.com/docker/swarmkit/agent/exec" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const configFileName = "config.v2.json" - -// ExitStatus provides exit reasons for a container. -type ExitStatus struct { - // The exit code with which the container exited. - ExitCode int - - // Whether the container encountered an OOM. - OOMKilled bool - - // Time at which the container died - ExitedAt time.Time -} - -// Container holds the structure defining a container object. -type Container struct { - StreamConfig *stream.Config - // embed for Container to support states directly. - *State `json:"State"` // Needed for Engine API version <= 1.11 - Root string `json:"-"` // Path to the "home" of the container, including metadata. - BaseFS containerfs.ContainerFS `json:"-"` // interface containing graphdriver mount - RWLayer layer.RWLayer `json:"-"` - ID string - Created time.Time - Managed bool - Path string - Args []string - Config *containertypes.Config - ImageID image.ID `json:"Image"` - NetworkSettings *network.Settings - LogPath string - Name string - Driver string - OS string - // MountLabel contains the options for the 'mount' command - MountLabel string - ProcessLabel string - RestartCount int - HasBeenStartedBefore bool - HasBeenManuallyStopped bool // used for unless-stopped restart policy - MountPoints map[string]*volumemounts.MountPoint - HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable - ExecCommands *exec.Store `json:"-"` - DependencyStore agentexec.DependencyGetter `json:"-"` - SecretReferences []*swarmtypes.SecretReference - ConfigReferences []*swarmtypes.ConfigReference - // logDriver for closing - LogDriver logger.Logger `json:"-"` - LogCopier *logger.Copier `json:"-"` - restartManager restartmanager.RestartManager - attachContext *attachContext - - // Fields here are specific to Unix platforms - AppArmorProfile string - HostnamePath string - HostsPath string - ShmPath string - ResolvConfPath string - SeccompProfile string - NoNewPrivileges bool - - // Fields here are specific to Windows - NetworkSharedContainerID string `json:"-"` - SharedEndpointList []string `json:"-"` -} - -// NewBaseContainer creates a new container with its -// basic configuration. -func NewBaseContainer(id, root string) *Container { - return &Container{ - ID: id, - State: NewState(), - ExecCommands: exec.NewStore(), - Root: root, - MountPoints: make(map[string]*volumemounts.MountPoint), - StreamConfig: stream.NewConfig(), - attachContext: &attachContext{}, - } -} - -// FromDisk loads the container configuration stored in the host. -func (container *Container) FromDisk() error { - pth, err := container.ConfigPath() - if err != nil { - return err - } - - jsonSource, err := os.Open(pth) - if err != nil { - return err - } - defer jsonSource.Close() - - dec := json.NewDecoder(jsonSource) - - // Load container settings - if err := dec.Decode(container); err != nil { - return err - } - - // Ensure the operating system is set if blank. Assume it is the OS of the - // host OS if not, to ensure containers created before multiple-OS - // support are migrated - if container.OS == "" { - container.OS = runtime.GOOS - } - - return container.readHostConfig() -} - -// toDisk saves the container configuration on disk and returns a deep copy. -func (container *Container) toDisk() (*Container, error) { - var ( - buf bytes.Buffer - deepCopy Container - ) - pth, err := container.ConfigPath() - if err != nil { - return nil, err - } - - // Save container settings - f, err := ioutils.NewAtomicFileWriter(pth, 0600) - if err != nil { - return nil, err - } - defer f.Close() - - w := io.MultiWriter(&buf, f) - if err := json.NewEncoder(w).Encode(container); err != nil { - return nil, err - } - - if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil { - return nil, err - } - deepCopy.HostConfig, err = container.WriteHostConfig() - if err != nil { - return nil, err - } - - return &deepCopy, nil -} - -// CheckpointTo makes the Container's current state visible to queries, and persists state. -// Callers must hold a Container lock. -func (container *Container) CheckpointTo(store ViewDB) error { - deepCopy, err := container.toDisk() - if err != nil { - return err - } - return store.Save(deepCopy) -} - -// readHostConfig reads the host configuration from disk for the container. -func (container *Container) readHostConfig() error { - container.HostConfig = &containertypes.HostConfig{} - // If the hostconfig file does not exist, do not read it. - // (We still have to initialize container.HostConfig, - // but that's OK, since we just did that above.) - pth, err := container.HostConfigPath() - if err != nil { - return err - } - - f, err := os.Open(pth) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - defer f.Close() - - if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil { - return err - } - - container.InitDNSHostConfig() - - return nil -} - -// WriteHostConfig saves the host configuration on disk for the container, -// and returns a deep copy of the saved object. Callers must hold a Container lock. -func (container *Container) WriteHostConfig() (*containertypes.HostConfig, error) { - var ( - buf bytes.Buffer - deepCopy containertypes.HostConfig - ) - - pth, err := container.HostConfigPath() - if err != nil { - return nil, err - } - - f, err := ioutils.NewAtomicFileWriter(pth, 0644) - if err != nil { - return nil, err - } - defer f.Close() - - w := io.MultiWriter(&buf, f) - if err := json.NewEncoder(w).Encode(&container.HostConfig); err != nil { - return nil, err - } - - if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil { - return nil, err - } - return &deepCopy, nil -} - -// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir -func (container *Container) SetupWorkingDirectory(rootIDs idtools.IDPair) error { - // TODO @jhowardmsft, @gupta-ak LCOW Support. This will need revisiting. - // We will need to do remote filesystem operations here. - if container.OS != runtime.GOOS { - return nil - } - - if container.Config.WorkingDir == "" { - return nil - } - - container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) - pth, err := container.GetResourcePath(container.Config.WorkingDir) - if err != nil { - return err - } - - if err := idtools.MkdirAllAndChownNew(pth, 0755, rootIDs); err != nil { - pthInfo, err2 := os.Stat(pth) - if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { - return errors.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) - } - - return err - } - - return nil -} - -// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path -// sanitisation. Symlinks are all scoped to the BaseFS of the container, as -// though the container's BaseFS was `/`. -// -// The BaseFS of a container is the host-facing path which is bind-mounted as -// `/` inside the container. This method is essentially used to access a -// particular path inside the container as though you were a process in that -// container. -// -// NOTE: The returned path is *only* safely scoped inside the container's BaseFS -// if no component of the returned path changes (such as a component -// symlinking to a different path) between using this method and using the -// path. See symlink.FollowSymlinkInScope for more details. -func (container *Container) GetResourcePath(path string) (string, error) { - if container.BaseFS == nil { - return "", errors.New("GetResourcePath: BaseFS of container " + container.ID + " is unexpectedly nil") - } - // IMPORTANT - These are paths on the OS where the daemon is running, hence - // any filepath operations must be done in an OS agnostic way. - r, e := container.BaseFS.ResolveScopedPath(path, false) - - // Log this here on the daemon side as there's otherwise no indication apart - // from the error being propagated all the way back to the client. This makes - // debugging significantly easier and clearly indicates the error comes from the daemon. - if e != nil { - logrus.Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS.Path(), path, e) - } - return r, e -} - -// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path -// sanitisation. Symlinks are all scoped to the root of the container, as -// though the container's root was `/`. -// -// The root of a container is the host-facing configuration metadata directory. -// Only use this method to safely access the container's `container.json` or -// other metadata files. If in doubt, use container.GetResourcePath. -// -// NOTE: The returned path is *only* safely scoped inside the container's root -// if no component of the returned path changes (such as a component -// symlinking to a different path) between using this method and using the -// path. See symlink.FollowSymlinkInScope for more details. -func (container *Container) GetRootResourcePath(path string) (string, error) { - // IMPORTANT - These are paths on the OS where the daemon is running, hence - // any filepath operations must be done in an OS agnostic way. - cleanPath := filepath.Join(string(os.PathSeparator), path) - return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) -} - -// ExitOnNext signals to the monitor that it should not restart the container -// after we send the kill signal. -func (container *Container) ExitOnNext() { - container.RestartManager().Cancel() -} - -// HostConfigPath returns the path to the container's JSON hostconfig -func (container *Container) HostConfigPath() (string, error) { - return container.GetRootResourcePath("hostconfig.json") -} - -// ConfigPath returns the path to the container's JSON config -func (container *Container) ConfigPath() (string, error) { - return container.GetRootResourcePath(configFileName) -} - -// CheckpointDir returns the directory checkpoints are stored in -func (container *Container) CheckpointDir() string { - return filepath.Join(container.Root, "checkpoints") -} - -// StartLogger starts a new logger driver for the container. -func (container *Container) StartLogger() (logger.Logger, error) { - cfg := container.HostConfig.LogConfig - initDriver, err := logger.GetLogDriver(cfg.Type) - if err != nil { - return nil, errors.Wrap(err, "failed to get logging factory") - } - info := logger.Info{ - Config: cfg.Config, - ContainerID: container.ID, - ContainerName: container.Name, - ContainerEntrypoint: container.Path, - ContainerArgs: container.Args, - ContainerImageID: container.ImageID.String(), - ContainerImageName: container.Config.Image, - ContainerCreated: container.Created, - ContainerEnv: container.Config.Env, - ContainerLabels: container.Config.Labels, - DaemonName: "docker", - } - - // Set logging file for "json-logger" - if cfg.Type == jsonfilelog.Name { - info.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) - if err != nil { - return nil, err - } - - container.LogPath = info.LogPath - } - - l, err := initDriver(info) - if err != nil { - return nil, err - } - - if containertypes.LogMode(cfg.Config["mode"]) == containertypes.LogModeNonBlock { - bufferSize := int64(-1) - if s, exists := cfg.Config["max-buffer-size"]; exists { - bufferSize, err = units.RAMInBytes(s) - if err != nil { - return nil, err - } - } - l = logger.NewRingLogger(l, info, bufferSize) - } - return l, nil -} - -// GetProcessLabel returns the process label for the container. -func (container *Container) GetProcessLabel() string { - // even if we have a process label return "" if we are running - // in privileged mode - if container.HostConfig.Privileged { - return "" - } - return container.ProcessLabel -} - -// GetMountLabel returns the mounting label for the container. -// This label is empty if the container is privileged. -func (container *Container) GetMountLabel() string { - return container.MountLabel -} - -// GetExecIDs returns the list of exec commands running on the container. -func (container *Container) GetExecIDs() []string { - return container.ExecCommands.List() -} - -// ShouldRestart decides whether the daemon should restart the container or not. -// This is based on the container's restart policy. -func (container *Container) ShouldRestart() bool { - shouldRestart, _, _ := container.RestartManager().ShouldRestart(uint32(container.ExitCode()), container.HasBeenManuallyStopped, container.FinishedAt.Sub(container.StartedAt)) - return shouldRestart -} - -// AddMountPointWithVolume adds a new mount point configured with a volume to the container. -func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { - operatingSystem := container.OS - if operatingSystem == "" { - operatingSystem = runtime.GOOS - } - volumeParser := volumemounts.NewParser(operatingSystem) - container.MountPoints[destination] = &volumemounts.MountPoint{ - Type: mounttypes.TypeVolume, - Name: vol.Name(), - Driver: vol.DriverName(), - Destination: destination, - RW: rw, - Volume: vol, - CopyData: volumeParser.DefaultCopyMode(), - } -} - -// UnmountVolumes unmounts all volumes -func (container *Container) UnmountVolumes(volumeEventLog func(name, action string, attributes map[string]string)) error { - var errors []string - for _, volumeMount := range container.MountPoints { - if volumeMount.Volume == nil { - continue - } - - if err := volumeMount.Cleanup(); err != nil { - errors = append(errors, err.Error()) - continue - } - - attributes := map[string]string{ - "driver": volumeMount.Volume.DriverName(), - "container": container.ID, - } - volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) - } - if len(errors) > 0 { - return fmt.Errorf("error while unmounting volumes for container %s: %s", container.ID, strings.Join(errors, "; ")) - } - return nil -} - -// IsDestinationMounted checks whether a path is mounted on the container or not. -func (container *Container) IsDestinationMounted(destination string) bool { - return container.MountPoints[destination] != nil -} - -// StopSignal returns the signal used to stop the container. -func (container *Container) StopSignal() int { - var stopSignal syscall.Signal - if container.Config.StopSignal != "" { - stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) - } - - if int(stopSignal) == 0 { - stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) - } - return int(stopSignal) -} - -// StopTimeout returns the timeout (in seconds) used to stop the container. -func (container *Container) StopTimeout() int { - if container.Config.StopTimeout != nil { - return *container.Config.StopTimeout - } - return DefaultStopTimeout -} - -// InitDNSHostConfig ensures that the dns fields are never nil. -// New containers don't ever have those fields nil, -// but pre created containers can still have those nil values. -// The non-recommended host configuration in the start api can -// make these fields nil again, this corrects that issue until -// we remove that behavior for good. -// See https://github.com/docker/docker/pull/17779 -// for a more detailed explanation on why we don't want that. -func (container *Container) InitDNSHostConfig() { - container.Lock() - defer container.Unlock() - if container.HostConfig.DNS == nil { - container.HostConfig.DNS = make([]string, 0) - } - - if container.HostConfig.DNSSearch == nil { - container.HostConfig.DNSSearch = make([]string, 0) - } - - if container.HostConfig.DNSOptions == nil { - container.HostConfig.DNSOptions = make([]string, 0) - } -} - -// UpdateMonitor updates monitor configure for running container -func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) { - type policySetter interface { - SetPolicy(containertypes.RestartPolicy) - } - - if rm, ok := container.RestartManager().(policySetter); ok { - rm.SetPolicy(restartPolicy) - } -} - -// FullHostname returns hostname and optional domain appended to it. -func (container *Container) FullHostname() string { - fullHostname := container.Config.Hostname - if container.Config.Domainname != "" { - fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) - } - return fullHostname -} - -// RestartManager returns the current restartmanager instance connected to container. -func (container *Container) RestartManager() restartmanager.RestartManager { - if container.restartManager == nil { - container.restartManager = restartmanager.New(container.HostConfig.RestartPolicy, container.RestartCount) - } - return container.restartManager -} - -// ResetRestartManager initializes new restartmanager based on container config -func (container *Container) ResetRestartManager(resetCount bool) { - if container.restartManager != nil { - container.restartManager.Cancel() - } - if resetCount { - container.RestartCount = 0 - } - container.restartManager = nil -} - -type attachContext struct { - ctx context.Context - cancel context.CancelFunc - mu sync.Mutex -} - -// InitAttachContext initializes or returns existing context for attach calls to -// track container liveness. -func (container *Container) InitAttachContext() context.Context { - container.attachContext.mu.Lock() - defer container.attachContext.mu.Unlock() - if container.attachContext.ctx == nil { - container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background()) - } - return container.attachContext.ctx -} - -// CancelAttachContext cancels attach context. All attach calls should detach -// after this call. -func (container *Container) CancelAttachContext() { - container.attachContext.mu.Lock() - if container.attachContext.ctx != nil { - container.attachContext.cancel() - container.attachContext.ctx = nil - } - container.attachContext.mu.Unlock() -} - -func (container *Container) startLogging() error { - if container.HostConfig.LogConfig.Type == "none" { - return nil // do not start logging routines - } - - l, err := container.StartLogger() - if err != nil { - return fmt.Errorf("failed to initialize logging driver: %v", err) - } - - copier := logger.NewCopier(map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) - container.LogCopier = copier - copier.Run() - container.LogDriver = l - - return nil -} - -// StdinPipe gets the stdin stream of the container -func (container *Container) StdinPipe() io.WriteCloser { - return container.StreamConfig.StdinPipe() -} - -// StdoutPipe gets the stdout stream of the container -func (container *Container) StdoutPipe() io.ReadCloser { - return container.StreamConfig.StdoutPipe() -} - -// StderrPipe gets the stderr stream of the container -func (container *Container) StderrPipe() io.ReadCloser { - return container.StreamConfig.StderrPipe() -} - -// CloseStreams closes the container's stdio streams -func (container *Container) CloseStreams() error { - return container.StreamConfig.CloseStreams() -} - -// InitializeStdio is called by libcontainerd to connect the stdio. -func (container *Container) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) { - if err := container.startLogging(); err != nil { - container.Reset(false) - return nil, err - } - - container.StreamConfig.CopyToPipe(iop) - - if container.StreamConfig.Stdin() == nil && !container.Config.Tty { - if iop.Stdin != nil { - if err := iop.Stdin.Close(); err != nil { - logrus.Warnf("error closing stdin: %+v", err) - } - } - } - - return &rio{IO: iop, sc: container.StreamConfig}, nil -} - -// MountsResourcePath returns the path where mounts are stored for the given mount -func (container *Container) MountsResourcePath(mount string) (string, error) { - return container.GetRootResourcePath(filepath.Join("mounts", mount)) -} - -// SecretMountPath returns the path of the secret mount for the container -func (container *Container) SecretMountPath() (string, error) { - return container.MountsResourcePath("secrets") -} - -// SecretFilePath returns the path to the location of a secret on the host. -func (container *Container) SecretFilePath(secretRef swarmtypes.SecretReference) (string, error) { - secrets, err := container.SecretMountPath() - if err != nil { - return "", err - } - return filepath.Join(secrets, secretRef.SecretID), nil -} - -func getSecretTargetPath(r *swarmtypes.SecretReference) string { - if filepath.IsAbs(r.File.Name) { - return r.File.Name - } - - return filepath.Join(containerSecretMountPath, r.File.Name) -} - -// CreateDaemonEnvironment creates a new environment variable slice for this container. -func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string) []string { - // Setup environment - os := container.OS - if os == "" { - os = runtime.GOOS - } - env := []string{} - if runtime.GOOS != "windows" || (runtime.GOOS == "windows" && os == "linux") { - env = []string{ - "PATH=" + system.DefaultPathEnv(os), - "HOSTNAME=" + container.Config.Hostname, - } - if tty { - env = append(env, "TERM=xterm") - } - env = append(env, linkedEnv...) - } - - // because the env on the container can override certain default values - // we need to replace the 'env' keys where they match and append anything - // else. - env = ReplaceOrAppendEnvValues(env, container.Config.Env) - return env -} - -type rio struct { - cio.IO - - sc *stream.Config -} - -func (i *rio) Close() error { - i.IO.Close() - - return i.sc.CloseStreams() -} - -func (i *rio) Wait() { - i.sc.Wait() - - i.IO.Wait() -} diff --git a/vendor/github.com/docker/docker/container/container_unix.go b/vendor/github.com/docker/docker/container/container_unix.go deleted file mode 100644 index ed664f3ee..000000000 --- a/vendor/github.com/docker/docker/container/container_unix.go +++ /dev/null @@ -1,463 +0,0 @@ -// +build !windows - -package container // import "github.com/docker/docker/container" - -import ( - "io/ioutil" - "os" - "path/filepath" - - "github.com/containerd/continuity/fs" - "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" - mounttypes "github.com/docker/docker/api/types/mount" - swarmtypes "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/volume" - volumemounts "github.com/docker/docker/volume/mounts" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -const ( - // DefaultStopTimeout sets the default time, in seconds, to wait - // for the graceful container stop before forcefully terminating it. - DefaultStopTimeout = 10 - - containerSecretMountPath = "/run/secrets" -) - -// TrySetNetworkMount attempts to set the network mounts given a provided destination and -// the path to use for it; return true if the given destination was a network mount file -func (container *Container) TrySetNetworkMount(destination string, path string) bool { - if destination == "/etc/resolv.conf" { - container.ResolvConfPath = path - return true - } - if destination == "/etc/hostname" { - container.HostnamePath = path - return true - } - if destination == "/etc/hosts" { - container.HostsPath = path - return true - } - - return false -} - -// BuildHostnameFile writes the container's hostname file. -func (container *Container) BuildHostnameFile() error { - hostnamePath, err := container.GetRootResourcePath("hostname") - if err != nil { - return err - } - container.HostnamePath = hostnamePath - return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) -} - -// NetworkMounts returns the list of network mounts. -func (container *Container) NetworkMounts() []Mount { - var mounts []Mount - shared := container.HostConfig.NetworkMode.IsContainer() - parser := volumemounts.NewParser(container.OS) - if container.ResolvConfPath != "" { - if _, err := os.Stat(container.ResolvConfPath); err != nil { - logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) - } else { - writable := !container.HostConfig.ReadonlyRootfs - if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { - writable = m.RW - } else { - label.Relabel(container.ResolvConfPath, container.MountLabel, shared) - } - mounts = append(mounts, Mount{ - Source: container.ResolvConfPath, - Destination: "/etc/resolv.conf", - Writable: writable, - Propagation: string(parser.DefaultPropagationMode()), - }) - } - } - if container.HostnamePath != "" { - if _, err := os.Stat(container.HostnamePath); err != nil { - logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) - } else { - writable := !container.HostConfig.ReadonlyRootfs - if m, exists := container.MountPoints["/etc/hostname"]; exists { - writable = m.RW - } else { - label.Relabel(container.HostnamePath, container.MountLabel, shared) - } - mounts = append(mounts, Mount{ - Source: container.HostnamePath, - Destination: "/etc/hostname", - Writable: writable, - Propagation: string(parser.DefaultPropagationMode()), - }) - } - } - if container.HostsPath != "" { - if _, err := os.Stat(container.HostsPath); err != nil { - logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) - } else { - writable := !container.HostConfig.ReadonlyRootfs - if m, exists := container.MountPoints["/etc/hosts"]; exists { - writable = m.RW - } else { - label.Relabel(container.HostsPath, container.MountLabel, shared) - } - mounts = append(mounts, Mount{ - Source: container.HostsPath, - Destination: "/etc/hosts", - Writable: writable, - Propagation: string(parser.DefaultPropagationMode()), - }) - } - } - return mounts -} - -// CopyImagePathContent copies files in destination to the volume. -func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { - rootfs, err := container.GetResourcePath(destination) - if err != nil { - return err - } - - if _, err := os.Stat(rootfs); err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - id := stringid.GenerateNonCryptoID() - path, err := v.Mount(id) - if err != nil { - return err - } - - defer func() { - if err := v.Unmount(id); err != nil { - logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err) - } - }() - if err := label.Relabel(path, container.MountLabel, true); err != nil && err != unix.ENOTSUP { - return err - } - return copyExistingContents(rootfs, path) -} - -// ShmResourcePath returns path to shm -func (container *Container) ShmResourcePath() (string, error) { - return container.MountsResourcePath("shm") -} - -// HasMountFor checks if path is a mountpoint -func (container *Container) HasMountFor(path string) bool { - _, exists := container.MountPoints[path] - if exists { - return true - } - - // Also search among the tmpfs mounts - for dest := range container.HostConfig.Tmpfs { - if dest == path { - return true - } - } - - return false -} - -// UnmountIpcMount uses the provided unmount function to unmount shm if it was mounted -func (container *Container) UnmountIpcMount(unmount func(pth string) error) error { - if container.HasMountFor("/dev/shm") { - return nil - } - - // container.ShmPath should not be used here as it may point - // to the host's or other container's /dev/shm - shmPath, err := container.ShmResourcePath() - if err != nil { - return err - } - if shmPath == "" { - return nil - } - if err = unmount(shmPath); err != nil && !os.IsNotExist(err) { - if mounted, mErr := mount.Mounted(shmPath); mounted || mErr != nil { - return errors.Wrapf(err, "umount %s", shmPath) - } - } - return nil -} - -// IpcMounts returns the list of IPC mounts -func (container *Container) IpcMounts() []Mount { - var mounts []Mount - parser := volumemounts.NewParser(container.OS) - - if container.HasMountFor("/dev/shm") { - return mounts - } - if container.ShmPath == "" { - return mounts - } - - label.SetFileLabel(container.ShmPath, container.MountLabel) - mounts = append(mounts, Mount{ - Source: container.ShmPath, - Destination: "/dev/shm", - Writable: true, - Propagation: string(parser.DefaultPropagationMode()), - }) - - return mounts -} - -// SecretMounts returns the mounts for the secret path. -func (container *Container) SecretMounts() ([]Mount, error) { - var mounts []Mount - for _, r := range container.SecretReferences { - if r.File == nil { - continue - } - src, err := container.SecretFilePath(*r) - if err != nil { - return nil, err - } - mounts = append(mounts, Mount{ - Source: src, - Destination: getSecretTargetPath(r), - Writable: false, - }) - } - for _, r := range container.ConfigReferences { - fPath, err := container.ConfigFilePath(*r) - if err != nil { - return nil, err - } - mounts = append(mounts, Mount{ - Source: fPath, - Destination: r.File.Name, - Writable: false, - }) - } - - return mounts, nil -} - -// UnmountSecrets unmounts the local tmpfs for secrets -func (container *Container) UnmountSecrets() error { - p, err := container.SecretMountPath() - if err != nil { - return err - } - if _, err := os.Stat(p); err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - return mount.RecursiveUnmount(p) -} - -type conflictingUpdateOptions string - -func (e conflictingUpdateOptions) Error() string { - return string(e) -} - -func (e conflictingUpdateOptions) Conflict() {} - -// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container. -func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { - // update resources of container - resources := hostConfig.Resources - cResources := &container.HostConfig.Resources - - // validate NanoCPUs, CPUPeriod, and CPUQuota - // Because NanoCPU effectively updates CPUPeriod/CPUQuota, - // once NanoCPU is already set, updating CPUPeriod/CPUQuota will be blocked, and vice versa. - // In the following we make sure the intended update (resources) does not conflict with the existing (cResource). - if resources.NanoCPUs > 0 && cResources.CPUPeriod > 0 { - return conflictingUpdateOptions("Conflicting options: Nano CPUs cannot be updated as CPU Period has already been set") - } - if resources.NanoCPUs > 0 && cResources.CPUQuota > 0 { - return conflictingUpdateOptions("Conflicting options: Nano CPUs cannot be updated as CPU Quota has already been set") - } - if resources.CPUPeriod > 0 && cResources.NanoCPUs > 0 { - return conflictingUpdateOptions("Conflicting options: CPU Period cannot be updated as NanoCPUs has already been set") - } - if resources.CPUQuota > 0 && cResources.NanoCPUs > 0 { - return conflictingUpdateOptions("Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set") - } - - if resources.BlkioWeight != 0 { - cResources.BlkioWeight = resources.BlkioWeight - } - if resources.CPUShares != 0 { - cResources.CPUShares = resources.CPUShares - } - if resources.NanoCPUs != 0 { - cResources.NanoCPUs = resources.NanoCPUs - } - if resources.CPUPeriod != 0 { - cResources.CPUPeriod = resources.CPUPeriod - } - if resources.CPUQuota != 0 { - cResources.CPUQuota = resources.CPUQuota - } - if resources.CpusetCpus != "" { - cResources.CpusetCpus = resources.CpusetCpus - } - if resources.CpusetMems != "" { - cResources.CpusetMems = resources.CpusetMems - } - if resources.Memory != 0 { - // if memory limit smaller than already set memoryswap limit and doesn't - // update the memoryswap limit, then error out. - if resources.Memory > cResources.MemorySwap && resources.MemorySwap == 0 { - return conflictingUpdateOptions("Memory limit should be smaller than already set memoryswap limit, update the memoryswap at the same time") - } - cResources.Memory = resources.Memory - } - if resources.MemorySwap != 0 { - cResources.MemorySwap = resources.MemorySwap - } - if resources.MemoryReservation != 0 { - cResources.MemoryReservation = resources.MemoryReservation - } - if resources.KernelMemory != 0 { - cResources.KernelMemory = resources.KernelMemory - } - if resources.CPURealtimePeriod != 0 { - cResources.CPURealtimePeriod = resources.CPURealtimePeriod - } - if resources.CPURealtimeRuntime != 0 { - cResources.CPURealtimeRuntime = resources.CPURealtimeRuntime - } - - // update HostConfig of container - if hostConfig.RestartPolicy.Name != "" { - if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { - return conflictingUpdateOptions("Restart policy cannot be updated because AutoRemove is enabled for the container") - } - container.HostConfig.RestartPolicy = hostConfig.RestartPolicy - } - - return nil -} - -// DetachAndUnmount uses a detached mount on all mount destinations, then -// unmounts each volume normally. -// This is used from daemon/archive for `docker cp` -func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { - networkMounts := container.NetworkMounts() - mountPaths := make([]string, 0, len(container.MountPoints)+len(networkMounts)) - - for _, mntPoint := range container.MountPoints { - dest, err := container.GetResourcePath(mntPoint.Destination) - if err != nil { - logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err) - continue - } - mountPaths = append(mountPaths, dest) - } - - for _, m := range networkMounts { - dest, err := container.GetResourcePath(m.Destination) - if err != nil { - logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err) - continue - } - mountPaths = append(mountPaths, dest) - } - - for _, mountPath := range mountPaths { - if err := mount.Unmount(mountPath); err != nil { - logrus.Warnf("%s unmountVolumes: Failed to do lazy umount fo volume '%s': %v", container.ID, mountPath, err) - } - } - return container.UnmountVolumes(volumeEventLog) -} - -// copyExistingContents copies from the source to the destination and -// ensures the ownership is appropriately set. -func copyExistingContents(source, destination string) error { - dstList, err := ioutil.ReadDir(destination) - if err != nil { - return err - } - if len(dstList) != 0 { - // destination is not empty, do not copy - return nil - } - return fs.CopyDir(destination, source) -} - -// TmpfsMounts returns the list of tmpfs mounts -func (container *Container) TmpfsMounts() ([]Mount, error) { - parser := volumemounts.NewParser(container.OS) - var mounts []Mount - for dest, data := range container.HostConfig.Tmpfs { - mounts = append(mounts, Mount{ - Source: "tmpfs", - Destination: dest, - Data: data, - }) - } - for dest, mnt := range container.MountPoints { - if mnt.Type == mounttypes.TypeTmpfs { - data, err := parser.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly) - if err != nil { - return nil, err - } - mounts = append(mounts, Mount{ - Source: "tmpfs", - Destination: dest, - Data: data, - }) - } - } - return mounts, nil -} - -// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network -func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { - return false -} - -// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock. -func (container *Container) GetMountPoints() []types.MountPoint { - mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) - for _, m := range container.MountPoints { - mountPoints = append(mountPoints, types.MountPoint{ - Type: m.Type, - Name: m.Name, - Source: m.Path(), - Destination: m.Destination, - Driver: m.Driver, - Mode: m.Mode, - RW: m.RW, - Propagation: m.Propagation, - }) - } - return mountPoints -} - -// ConfigFilePath returns the path to the on-disk location of a config. -// On unix, configs are always considered secret -func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) (string, error) { - mounts, err := container.SecretMountPath() - if err != nil { - return "", err - } - return filepath.Join(mounts, configRef.ConfigID), nil -} diff --git a/vendor/github.com/docker/docker/container/container_windows.go b/vendor/github.com/docker/docker/container/container_windows.go deleted file mode 100644 index b5bdb5bc3..000000000 --- a/vendor/github.com/docker/docker/container/container_windows.go +++ /dev/null @@ -1,213 +0,0 @@ -package container // import "github.com/docker/docker/container" - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" - swarmtypes "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/pkg/system" -) - -const ( - containerSecretMountPath = `C:\ProgramData\Docker\secrets` - containerInternalSecretMountPath = `C:\ProgramData\Docker\internal\secrets` - containerInternalConfigsDirPath = `C:\ProgramData\Docker\internal\configs` - - // DefaultStopTimeout is the timeout (in seconds) for the shutdown call on a container - DefaultStopTimeout = 30 -) - -// UnmountIpcMount unmounts Ipc related mounts. -// This is a NOOP on windows. -func (container *Container) UnmountIpcMount(unmount func(pth string) error) error { - return nil -} - -// IpcMounts returns the list of Ipc related mounts. -func (container *Container) IpcMounts() []Mount { - return nil -} - -// CreateSecretSymlinks creates symlinks to files in the secret mount. -func (container *Container) CreateSecretSymlinks() error { - for _, r := range container.SecretReferences { - if r.File == nil { - continue - } - resolvedPath, _, err := container.ResolvePath(getSecretTargetPath(r)) - if err != nil { - return err - } - if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil { - return err - } - if err := os.Symlink(filepath.Join(containerInternalSecretMountPath, r.SecretID), resolvedPath); err != nil { - return err - } - } - - return nil -} - -// SecretMounts returns the mount for the secret path. -// All secrets are stored in a single mount on Windows. Target symlinks are -// created for each secret, pointing to the files in this mount. -func (container *Container) SecretMounts() ([]Mount, error) { - var mounts []Mount - if len(container.SecretReferences) > 0 { - src, err := container.SecretMountPath() - if err != nil { - return nil, err - } - mounts = append(mounts, Mount{ - Source: src, - Destination: containerInternalSecretMountPath, - Writable: false, - }) - } - - return mounts, nil -} - -// UnmountSecrets unmounts the fs for secrets -func (container *Container) UnmountSecrets() error { - p, err := container.SecretMountPath() - if err != nil { - return err - } - return os.RemoveAll(p) -} - -// CreateConfigSymlinks creates symlinks to files in the config mount. -func (container *Container) CreateConfigSymlinks() error { - for _, configRef := range container.ConfigReferences { - if configRef.File == nil { - continue - } - resolvedPath, _, err := container.ResolvePath(configRef.File.Name) - if err != nil { - return err - } - if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil { - return err - } - if err := os.Symlink(filepath.Join(containerInternalConfigsDirPath, configRef.ConfigID), resolvedPath); err != nil { - return err - } - } - - return nil -} - -// ConfigMounts returns the mount for configs. -// TODO: Right now Windows doesn't really have a "secure" storage for secrets, -// however some configs may contain secrets. Once secure storage is worked out, -// configs and secret handling should be merged. -func (container *Container) ConfigMounts() []Mount { - var mounts []Mount - if len(container.ConfigReferences) > 0 { - mounts = append(mounts, Mount{ - Source: container.ConfigsDirPath(), - Destination: containerInternalConfigsDirPath, - Writable: false, - }) - } - - return mounts -} - -// DetachAndUnmount unmounts all volumes. -// On Windows it only delegates to `UnmountVolumes` since there is nothing to -// force unmount. -func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { - return container.UnmountVolumes(volumeEventLog) -} - -// TmpfsMounts returns the list of tmpfs mounts -func (container *Container) TmpfsMounts() ([]Mount, error) { - var mounts []Mount - return mounts, nil -} - -// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container. -func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { - resources := hostConfig.Resources - if resources.CPUShares != 0 || - resources.Memory != 0 || - resources.NanoCPUs != 0 || - resources.CgroupParent != "" || - resources.BlkioWeight != 0 || - len(resources.BlkioWeightDevice) != 0 || - len(resources.BlkioDeviceReadBps) != 0 || - len(resources.BlkioDeviceWriteBps) != 0 || - len(resources.BlkioDeviceReadIOps) != 0 || - len(resources.BlkioDeviceWriteIOps) != 0 || - resources.CPUPeriod != 0 || - resources.CPUQuota != 0 || - resources.CPURealtimePeriod != 0 || - resources.CPURealtimeRuntime != 0 || - resources.CpusetCpus != "" || - resources.CpusetMems != "" || - len(resources.Devices) != 0 || - len(resources.DeviceCgroupRules) != 0 || - resources.DiskQuota != 0 || - resources.KernelMemory != 0 || - resources.MemoryReservation != 0 || - resources.MemorySwap != 0 || - resources.MemorySwappiness != nil || - resources.OomKillDisable != nil || - resources.PidsLimit != 0 || - len(resources.Ulimits) != 0 || - resources.CPUCount != 0 || - resources.CPUPercent != 0 || - resources.IOMaximumIOps != 0 || - resources.IOMaximumBandwidth != 0 { - return fmt.Errorf("resource updating isn't supported on Windows") - } - // update HostConfig of container - if hostConfig.RestartPolicy.Name != "" { - if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { - return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") - } - container.HostConfig.RestartPolicy = hostConfig.RestartPolicy - } - return nil -} - -// BuildHostnameFile writes the container's hostname file. -func (container *Container) BuildHostnameFile() error { - return nil -} - -// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network -func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { - return true -} - -// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock. -func (container *Container) GetMountPoints() []types.MountPoint { - mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) - for _, m := range container.MountPoints { - mountPoints = append(mountPoints, types.MountPoint{ - Type: m.Type, - Name: m.Name, - Source: m.Path(), - Destination: m.Destination, - Driver: m.Driver, - RW: m.RW, - }) - } - return mountPoints -} - -func (container *Container) ConfigsDirPath() string { - return filepath.Join(container.Root, "configs") -} - -// ConfigFilePath returns the path to the on-disk location of a config. -func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) string { - return filepath.Join(container.ConfigsDirPath(), configRef.ConfigID) -} diff --git a/vendor/github.com/docker/docker/container/env.go b/vendor/github.com/docker/docker/container/env.go deleted file mode 100644 index d225fd147..000000000 --- a/vendor/github.com/docker/docker/container/env.go +++ /dev/null @@ -1,43 +0,0 @@ -package container // import "github.com/docker/docker/container" - -import ( - "strings" -) - -// ReplaceOrAppendEnvValues returns the defaults with the overrides either -// replaced by env key or appended to the list -func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { - cache := make(map[string]int, len(defaults)) - for i, e := range defaults { - parts := strings.SplitN(e, "=", 2) - cache[parts[0]] = i - } - - for _, value := range overrides { - // Values w/o = means they want this env to be removed/unset. - if !strings.Contains(value, "=") { - if i, exists := cache[value]; exists { - defaults[i] = "" // Used to indicate it should be removed - } - continue - } - - // Just do a normal set/update - parts := strings.SplitN(value, "=", 2) - if i, exists := cache[parts[0]]; exists { - defaults[i] = value - } else { - defaults = append(defaults, value) - } - } - - // Now remove all entries that we want to "unset" - for i := 0; i < len(defaults); i++ { - if defaults[i] == "" { - defaults = append(defaults[:i], defaults[i+1:]...) - i-- - } - } - - return defaults -} diff --git a/vendor/github.com/docker/docker/container/health.go b/vendor/github.com/docker/docker/container/health.go deleted file mode 100644 index 167ee9b47..000000000 --- a/vendor/github.com/docker/docker/container/health.go +++ /dev/null @@ -1,82 +0,0 @@ -package container // import "github.com/docker/docker/container" - -import ( - "sync" - - "github.com/docker/docker/api/types" - "github.com/sirupsen/logrus" -) - -// Health holds the current container health-check state -type Health struct { - types.Health - stop chan struct{} // Write struct{} to stop the monitor - mu sync.Mutex -} - -// String returns a human-readable description of the health-check state -func (s *Health) String() string { - status := s.Status() - - switch status { - case types.Starting: - return "health: starting" - default: // Healthy and Unhealthy are clear on their own - return s.Health.Status - } -} - -// Status returns the current health status. -// -// Note that this takes a lock and the value may change after being read. -func (s *Health) Status() string { - s.mu.Lock() - defer s.mu.Unlock() - - // This happens when the monitor has yet to be setup. - if s.Health.Status == "" { - return types.Unhealthy - } - - return s.Health.Status -} - -// SetStatus writes the current status to the underlying health structure, -// obeying the locking semantics. -// -// Status may be set directly if another lock is used. -func (s *Health) SetStatus(new string) { - s.mu.Lock() - defer s.mu.Unlock() - - s.Health.Status = new -} - -// OpenMonitorChannel creates and returns a new monitor channel. If there -// already is one, it returns nil. -func (s *Health) OpenMonitorChannel() chan struct{} { - s.mu.Lock() - defer s.mu.Unlock() - - if s.stop == nil { - logrus.Debug("OpenMonitorChannel") - s.stop = make(chan struct{}) - return s.stop - } - return nil -} - -// CloseMonitorChannel closes any existing monitor channel. -func (s *Health) CloseMonitorChannel() { - s.mu.Lock() - defer s.mu.Unlock() - - if s.stop != nil { - logrus.Debug("CloseMonitorChannel: waiting for probe to stop") - close(s.stop) - s.stop = nil - // unhealthy when the monitor has stopped for compatibility reasons - s.Health.Status = types.Unhealthy - logrus.Debug("CloseMonitorChannel done") - } -} diff --git a/vendor/github.com/docker/docker/container/history.go b/vendor/github.com/docker/docker/container/history.go deleted file mode 100644 index 7117d9a43..000000000 --- a/vendor/github.com/docker/docker/container/history.go +++ /dev/null @@ -1,30 +0,0 @@ -package container // import "github.com/docker/docker/container" - -import "sort" - -// History is a convenience type for storing a list of containers, -// sorted by creation date in descendant order. -type History []*Container - -// Len returns the number of containers in the history. -func (history *History) Len() int { - return len(*history) -} - -// Less compares two containers and returns true if the second one -// was created before the first one. -func (history *History) Less(i, j int) bool { - containers := *history - return containers[j].Created.Before(containers[i].Created) -} - -// Swap switches containers i and j positions in the history. -func (history *History) Swap(i, j int) { - containers := *history - containers[i], containers[j] = containers[j], containers[i] -} - -// sort orders the history by creation date in descendant order. -func (history *History) sort() { - sort.Sort(history) -} diff --git a/vendor/github.com/docker/docker/container/memory_store.go b/vendor/github.com/docker/docker/container/memory_store.go deleted file mode 100644 index ad4c9e20f..000000000 --- a/vendor/github.com/docker/docker/container/memory_store.go +++ /dev/null @@ -1,95 +0,0 @@ -package container // import "github.com/docker/docker/container" - -import ( - "sync" -) - -// memoryStore implements a Store in memory. -type memoryStore struct { - s map[string]*Container - sync.RWMutex -} - -// NewMemoryStore initializes a new memory store. -func NewMemoryStore() Store { - return &memoryStore{ - s: make(map[string]*Container), - } -} - -// Add appends a new container to the memory store. -// It overrides the id if it existed before. -func (c *memoryStore) Add(id string, cont *Container) { - c.Lock() - c.s[id] = cont - c.Unlock() -} - -// Get returns a container from the store by id. -func (c *memoryStore) Get(id string) *Container { - var res *Container - c.RLock() - res = c.s[id] - c.RUnlock() - return res -} - -// Delete removes a container from the store by id. -func (c *memoryStore) Delete(id string) { - c.Lock() - delete(c.s, id) - c.Unlock() -} - -// List returns a sorted list of containers from the store. -// The containers are ordered by creation date. -func (c *memoryStore) List() []*Container { - containers := History(c.all()) - containers.sort() - return containers -} - -// Size returns the number of containers in the store. -func (c *memoryStore) Size() int { - c.RLock() - defer c.RUnlock() - return len(c.s) -} - -// First returns the first container found in the store by a given filter. -func (c *memoryStore) First(filter StoreFilter) *Container { - for _, cont := range c.all() { - if filter(cont) { - return cont - } - } - return nil -} - -// ApplyAll calls the reducer function with every container in the store. -// This operation is asynchronous in the memory store. -// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. -func (c *memoryStore) ApplyAll(apply StoreReducer) { - wg := new(sync.WaitGroup) - for _, cont := range c.all() { - wg.Add(1) - go func(container *Container) { - apply(container) - wg.Done() - }(cont) - } - - wg.Wait() -} - -func (c *memoryStore) all() []*Container { - c.RLock() - containers := make([]*Container, 0, len(c.s)) - for _, cont := range c.s { - containers = append(containers, cont) - } - c.RUnlock() - return containers -} - -var _ Store = &memoryStore{} diff --git a/vendor/github.com/docker/docker/container/monitor.go b/vendor/github.com/docker/docker/container/monitor.go deleted file mode 100644 index 1735e3487..000000000 --- a/vendor/github.com/docker/docker/container/monitor.go +++ /dev/null @@ -1,46 +0,0 @@ -package container // import "github.com/docker/docker/container" - -import ( - "time" - - "github.com/sirupsen/logrus" -) - -const ( - loggerCloseTimeout = 10 * time.Second -) - -// Reset puts a container into a state where it can be restarted again. -func (container *Container) Reset(lock bool) { - if lock { - container.Lock() - defer container.Unlock() - } - - if err := container.CloseStreams(); err != nil { - logrus.Errorf("%s: %s", container.ID, err) - } - - // Re-create a brand new stdin pipe once the container exited - if container.Config.OpenStdin { - container.StreamConfig.NewInputPipes() - } - - if container.LogDriver != nil { - if container.LogCopier != nil { - exit := make(chan struct{}) - go func() { - container.LogCopier.Wait() - close(exit) - }() - select { - case <-time.After(loggerCloseTimeout): - logrus.Warn("Logger didn't exit in time: logs may be truncated") - case <-exit: - } - } - container.LogDriver.Close() - container.LogCopier = nil - container.LogDriver = nil - } -} diff --git a/vendor/github.com/docker/docker/container/mounts_unix.go b/vendor/github.com/docker/docker/container/mounts_unix.go deleted file mode 100644 index 62f4441dc..000000000 --- a/vendor/github.com/docker/docker/container/mounts_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows - -package container // import "github.com/docker/docker/container" - -// Mount contains information for a mount operation. -type Mount struct { - Source string `json:"source"` - Destination string `json:"destination"` - Writable bool `json:"writable"` - Data string `json:"data"` - Propagation string `json:"mountpropagation"` -} diff --git a/vendor/github.com/docker/docker/container/mounts_windows.go b/vendor/github.com/docker/docker/container/mounts_windows.go deleted file mode 100644 index 8f27e8806..000000000 --- a/vendor/github.com/docker/docker/container/mounts_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package container // import "github.com/docker/docker/container" - -// Mount contains information for a mount operation. -type Mount struct { - Source string `json:"source"` - Destination string `json:"destination"` - Writable bool `json:"writable"` -} diff --git a/vendor/github.com/docker/docker/container/state.go b/vendor/github.com/docker/docker/container/state.go deleted file mode 100644 index 7c2a1ec81..000000000 --- a/vendor/github.com/docker/docker/container/state.go +++ /dev/null @@ -1,409 +0,0 @@ -package container // import "github.com/docker/docker/container" - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/go-units" -) - -// State holds the current container state, and has methods to get and -// set the state. Container has an embed, which allows all of the -// functions defined against State to run against Container. -type State struct { - sync.Mutex - // Note that `Running` and `Paused` are not mutually exclusive: - // When pausing a container (on Linux), the cgroups freezer is used to suspend - // all processes in the container. Freezing the process requires the process to - // be running. As a result, paused containers are both `Running` _and_ `Paused`. - Running bool - Paused bool - Restarting bool - OOMKilled bool - RemovalInProgress bool // Not need for this to be persistent on disk. - Dead bool - Pid int - ExitCodeValue int `json:"ExitCode"` - ErrorMsg string `json:"Error"` // contains last known error during container start, stop, or remove - StartedAt time.Time - FinishedAt time.Time - Health *Health - - waitStop chan struct{} - waitRemove chan struct{} -} - -// StateStatus is used to return container wait results. -// Implements exec.ExitCode interface. -// This type is needed as State include a sync.Mutex field which make -// copying it unsafe. -type StateStatus struct { - exitCode int - err error -} - -// ExitCode returns current exitcode for the state. -func (s StateStatus) ExitCode() int { - return s.exitCode -} - -// Err returns current error for the state. Returns nil if the container had -// exited on its own. -func (s StateStatus) Err() error { - return s.err -} - -// NewState creates a default state object with a fresh channel for state changes. -func NewState() *State { - return &State{ - waitStop: make(chan struct{}), - waitRemove: make(chan struct{}), - } -} - -// String returns a human-readable description of the state -func (s *State) String() string { - if s.Running { - if s.Paused { - return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) - } - if s.Restarting { - return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) - } - - if h := s.Health; h != nil { - return fmt.Sprintf("Up %s (%s)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)), h.String()) - } - - return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) - } - - if s.RemovalInProgress { - return "Removal In Progress" - } - - if s.Dead { - return "Dead" - } - - if s.StartedAt.IsZero() { - return "Created" - } - - if s.FinishedAt.IsZero() { - return "" - } - - return fmt.Sprintf("Exited (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) -} - -// IsValidHealthString checks if the provided string is a valid container health status or not. -func IsValidHealthString(s string) bool { - return s == types.Starting || - s == types.Healthy || - s == types.Unhealthy || - s == types.NoHealthcheck -} - -// StateString returns a single string to describe state -func (s *State) StateString() string { - if s.Running { - if s.Paused { - return "paused" - } - if s.Restarting { - return "restarting" - } - return "running" - } - - if s.RemovalInProgress { - return "removing" - } - - if s.Dead { - return "dead" - } - - if s.StartedAt.IsZero() { - return "created" - } - - return "exited" -} - -// IsValidStateString checks if the provided string is a valid container state or not. -func IsValidStateString(s string) bool { - if s != "paused" && - s != "restarting" && - s != "removing" && - s != "running" && - s != "dead" && - s != "created" && - s != "exited" { - return false - } - return true -} - -// WaitCondition is an enum type for different states to wait for. -type WaitCondition int - -// Possible WaitCondition Values. -// -// WaitConditionNotRunning (default) is used to wait for any of the non-running -// states: "created", "exited", "dead", "removing", or "removed". -// -// WaitConditionNextExit is used to wait for the next time the state changes -// to a non-running state. If the state is currently "created" or "exited", -// this would cause Wait() to block until either the container runs and exits -// or is removed. -// -// WaitConditionRemoved is used to wait for the container to be removed. -const ( - WaitConditionNotRunning WaitCondition = iota - WaitConditionNextExit - WaitConditionRemoved -) - -// Wait waits until the container is in a certain state indicated by the given -// condition. A context must be used for cancelling the request, controlling -// timeouts, and avoiding goroutine leaks. Wait must be called without holding -// the state lock. Returns a channel from which the caller will receive the -// result. If the container exited on its own, the result's Err() method will -// be nil and its ExitCode() method will return the container's exit code, -// otherwise, the results Err() method will return an error indicating why the -// wait operation failed. -func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateStatus { - s.Lock() - defer s.Unlock() - - if condition == WaitConditionNotRunning && !s.Running { - // Buffer so we can put it in the channel now. - resultC := make(chan StateStatus, 1) - - // Send the current status. - resultC <- StateStatus{ - exitCode: s.ExitCode(), - err: s.Err(), - } - - return resultC - } - - // If we are waiting only for removal, the waitStop channel should - // remain nil and block forever. - var waitStop chan struct{} - if condition < WaitConditionRemoved { - waitStop = s.waitStop - } - - // Always wait for removal, just in case the container gets removed - // while it is still in a "created" state, in which case it is never - // actually stopped. - waitRemove := s.waitRemove - - resultC := make(chan StateStatus) - - go func() { - select { - case <-ctx.Done(): - // Context timeout or cancellation. - resultC <- StateStatus{ - exitCode: -1, - err: ctx.Err(), - } - return - case <-waitStop: - case <-waitRemove: - } - - s.Lock() - result := StateStatus{ - exitCode: s.ExitCode(), - err: s.Err(), - } - s.Unlock() - - resultC <- result - }() - - return resultC -} - -// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. -func (s *State) IsRunning() bool { - s.Lock() - res := s.Running - s.Unlock() - return res -} - -// GetPID holds the process id of a container. -func (s *State) GetPID() int { - s.Lock() - res := s.Pid - s.Unlock() - return res -} - -// ExitCode returns current exitcode for the state. Take lock before if state -// may be shared. -func (s *State) ExitCode() int { - return s.ExitCodeValue -} - -// SetExitCode sets current exitcode for the state. Take lock before if state -// may be shared. -func (s *State) SetExitCode(ec int) { - s.ExitCodeValue = ec -} - -// SetRunning sets the state of the container to "running". -func (s *State) SetRunning(pid int, initial bool) { - s.ErrorMsg = "" - s.Paused = false - s.Running = true - s.Restarting = false - if initial { - s.Paused = false - } - s.ExitCodeValue = 0 - s.Pid = pid - if initial { - s.StartedAt = time.Now().UTC() - } -} - -// SetStopped sets the container state to "stopped" without locking. -func (s *State) SetStopped(exitStatus *ExitStatus) { - s.Running = false - s.Paused = false - s.Restarting = false - s.Pid = 0 - if exitStatus.ExitedAt.IsZero() { - s.FinishedAt = time.Now().UTC() - } else { - s.FinishedAt = exitStatus.ExitedAt - } - s.ExitCodeValue = exitStatus.ExitCode - s.OOMKilled = exitStatus.OOMKilled - close(s.waitStop) // fire waiters for stop - s.waitStop = make(chan struct{}) -} - -// SetRestarting sets the container state to "restarting" without locking. -// It also sets the container PID to 0. -func (s *State) SetRestarting(exitStatus *ExitStatus) { - // we should consider the container running when it is restarting because of - // all the checks in docker around rm/stop/etc - s.Running = true - s.Restarting = true - s.Paused = false - s.Pid = 0 - s.FinishedAt = time.Now().UTC() - s.ExitCodeValue = exitStatus.ExitCode - s.OOMKilled = exitStatus.OOMKilled - close(s.waitStop) // fire waiters for stop - s.waitStop = make(chan struct{}) -} - -// SetError sets the container's error state. This is useful when we want to -// know the error that occurred when container transits to another state -// when inspecting it -func (s *State) SetError(err error) { - s.ErrorMsg = "" - if err != nil { - s.ErrorMsg = err.Error() - } -} - -// IsPaused returns whether the container is paused or not. -func (s *State) IsPaused() bool { - s.Lock() - res := s.Paused - s.Unlock() - return res -} - -// IsRestarting returns whether the container is restarting or not. -func (s *State) IsRestarting() bool { - s.Lock() - res := s.Restarting - s.Unlock() - return res -} - -// SetRemovalInProgress sets the container state as being removed. -// It returns true if the container was already in that state. -func (s *State) SetRemovalInProgress() bool { - s.Lock() - defer s.Unlock() - if s.RemovalInProgress { - return true - } - s.RemovalInProgress = true - return false -} - -// ResetRemovalInProgress makes the RemovalInProgress state to false. -func (s *State) ResetRemovalInProgress() { - s.Lock() - s.RemovalInProgress = false - s.Unlock() -} - -// IsRemovalInProgress returns whether the RemovalInProgress flag is set. -// Used by Container to check whether a container is being removed. -func (s *State) IsRemovalInProgress() bool { - s.Lock() - res := s.RemovalInProgress - s.Unlock() - return res -} - -// SetDead sets the container state to "dead" -func (s *State) SetDead() { - s.Lock() - s.Dead = true - s.Unlock() -} - -// IsDead returns whether the Dead flag is set. Used by Container to check whether a container is dead. -func (s *State) IsDead() bool { - s.Lock() - res := s.Dead - s.Unlock() - return res -} - -// SetRemoved assumes this container is already in the "dead" state and -// closes the internal waitRemove channel to unblock callers waiting for a -// container to be removed. -func (s *State) SetRemoved() { - s.SetRemovalError(nil) -} - -// SetRemovalError is to be called in case a container remove failed. -// It sets an error and closes the internal waitRemove channel to unblock -// callers waiting for the container to be removed. -func (s *State) SetRemovalError(err error) { - s.SetError(err) - s.Lock() - close(s.waitRemove) // Unblock those waiting on remove. - // Recreate the channel so next ContainerWait will work - s.waitRemove = make(chan struct{}) - s.Unlock() -} - -// Err returns an error if there is one. -func (s *State) Err() error { - if s.ErrorMsg != "" { - return errors.New(s.ErrorMsg) - } - return nil -} diff --git a/vendor/github.com/docker/docker/container/store.go b/vendor/github.com/docker/docker/container/store.go deleted file mode 100644 index 3af038985..000000000 --- a/vendor/github.com/docker/docker/container/store.go +++ /dev/null @@ -1,28 +0,0 @@ -package container // import "github.com/docker/docker/container" - -// StoreFilter defines a function to filter -// container in the store. -type StoreFilter func(*Container) bool - -// StoreReducer defines a function to -// manipulate containers in the store -type StoreReducer func(*Container) - -// Store defines an interface that -// any container store must implement. -type Store interface { - // Add appends a new container to the store. - Add(string, *Container) - // Get returns a container from the store by the identifier it was stored with. - Get(string) *Container - // Delete removes a container from the store by the identifier it was stored with. - Delete(string) - // List returns a list of containers from the store. - List() []*Container - // Size returns the number of containers in the store. - Size() int - // First returns the first container found in the store by a given filter. - First(StoreFilter) *Container - // ApplyAll calls the reducer function with every container in the store. - ApplyAll(StoreReducer) -} diff --git a/vendor/github.com/docker/docker/container/stream/attach.go b/vendor/github.com/docker/docker/container/stream/attach.go deleted file mode 100644 index 1366dcb49..000000000 --- a/vendor/github.com/docker/docker/container/stream/attach.go +++ /dev/null @@ -1,175 +0,0 @@ -package stream // import "github.com/docker/docker/container/stream" - -import ( - "context" - "io" - - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/term" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sync/errgroup" -) - -var defaultEscapeSequence = []byte{16, 17} // ctrl-p, ctrl-q - -// AttachConfig is the config struct used to attach a client to a stream's stdio -type AttachConfig struct { - // Tells the attach copier that the stream's stdin is a TTY and to look for - // escape sequences in stdin to detach from the stream. - // When true the escape sequence is not passed to the underlying stream - TTY bool - // Specifies the detach keys the client will be using - // Only useful when `TTY` is true - DetachKeys []byte - - // CloseStdin signals that once done, stdin for the attached stream should be closed - // For example, this would close the attached container's stdin. - CloseStdin bool - - // UseStd* indicate whether the client has requested to be connected to the - // given stream or not. These flags are used instead of checking Std* != nil - // at points before the client streams Std* are wired up. - UseStdin, UseStdout, UseStderr bool - - // CStd* are the streams directly connected to the container - CStdin io.WriteCloser - CStdout, CStderr io.ReadCloser - - // Provide client streams to wire up to - Stdin io.ReadCloser - Stdout, Stderr io.Writer -} - -// AttachStreams attaches the container's streams to the AttachConfig -func (c *Config) AttachStreams(cfg *AttachConfig) { - if cfg.UseStdin { - cfg.CStdin = c.StdinPipe() - } - - if cfg.UseStdout { - cfg.CStdout = c.StdoutPipe() - } - - if cfg.UseStderr { - cfg.CStderr = c.StderrPipe() - } -} - -// CopyStreams starts goroutines to copy data in and out to/from the container -func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan error { - var group errgroup.Group - - // Connect stdin of container to the attach stdin stream. - if cfg.Stdin != nil { - group.Go(func() error { - logrus.Debug("attach: stdin: begin") - defer logrus.Debug("attach: stdin: end") - - defer func() { - if cfg.CloseStdin && !cfg.TTY { - cfg.CStdin.Close() - } else { - // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr - if cfg.CStdout != nil { - cfg.CStdout.Close() - } - if cfg.CStderr != nil { - cfg.CStderr.Close() - } - } - }() - - var err error - if cfg.TTY { - _, err = copyEscapable(cfg.CStdin, cfg.Stdin, cfg.DetachKeys) - } else { - _, err = pools.Copy(cfg.CStdin, cfg.Stdin) - } - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - logrus.WithError(err).Debug("error on attach stdin") - return errors.Wrap(err, "error on attach stdin") - } - return nil - }) - } - - attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) error { - logrus.Debugf("attach: %s: begin", name) - defer logrus.Debugf("attach: %s: end", name) - defer func() { - // Make sure stdin gets closed - if cfg.Stdin != nil { - cfg.Stdin.Close() - } - streamPipe.Close() - }() - - _, err := pools.Copy(stream, streamPipe) - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - logrus.WithError(err).Debugf("attach: %s", name) - return errors.Wrapf(err, "error attaching %s stream", name) - } - return nil - } - - if cfg.Stdout != nil { - group.Go(func() error { - return attachStream("stdout", cfg.Stdout, cfg.CStdout) - }) - } - if cfg.Stderr != nil { - group.Go(func() error { - return attachStream("stderr", cfg.Stderr, cfg.CStderr) - }) - } - - errs := make(chan error, 1) - go func() { - defer logrus.Debug("attach done") - groupErr := make(chan error, 1) - go func() { - groupErr <- group.Wait() - }() - select { - case <-ctx.Done(): - // close all pipes - if cfg.CStdin != nil { - cfg.CStdin.Close() - } - if cfg.CStdout != nil { - cfg.CStdout.Close() - } - if cfg.CStderr != nil { - cfg.CStderr.Close() - } - - // Now with these closed, wait should return. - if err := group.Wait(); err != nil { - errs <- err - return - } - errs <- ctx.Err() - case err := <-groupErr: - errs <- err - } - }() - - return errs -} - -func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { - if len(keys) == 0 { - keys = defaultEscapeSequence - } - pr := term.NewEscapeProxy(src, keys) - defer src.Close() - - return pools.Copy(dst, pr) -} diff --git a/vendor/github.com/docker/docker/container/stream/streams.go b/vendor/github.com/docker/docker/container/stream/streams.go deleted file mode 100644 index d81867c1d..000000000 --- a/vendor/github.com/docker/docker/container/stream/streams.go +++ /dev/null @@ -1,146 +0,0 @@ -package stream // import "github.com/docker/docker/container/stream" - -import ( - "fmt" - "io" - "io/ioutil" - "strings" - "sync" - - "github.com/containerd/containerd/cio" - "github.com/docker/docker/pkg/broadcaster" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/pools" - "github.com/sirupsen/logrus" -) - -// Config holds information about I/O streams managed together. -// -// config.StdinPipe returns a WriteCloser which can be used to feed data -// to the standard input of the streamConfig's active process. -// config.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser -// which can be used to retrieve the standard output (and error) generated -// by the container's active process. The output (and error) are actually -// copied and delivered to all StdoutPipe and StderrPipe consumers, using -// a kind of "broadcaster". -type Config struct { - sync.WaitGroup - stdout *broadcaster.Unbuffered - stderr *broadcaster.Unbuffered - stdin io.ReadCloser - stdinPipe io.WriteCloser -} - -// NewConfig creates a stream config and initializes -// the standard err and standard out to new unbuffered broadcasters. -func NewConfig() *Config { - return &Config{ - stderr: new(broadcaster.Unbuffered), - stdout: new(broadcaster.Unbuffered), - } -} - -// Stdout returns the standard output in the configuration. -func (c *Config) Stdout() *broadcaster.Unbuffered { - return c.stdout -} - -// Stderr returns the standard error in the configuration. -func (c *Config) Stderr() *broadcaster.Unbuffered { - return c.stderr -} - -// Stdin returns the standard input in the configuration. -func (c *Config) Stdin() io.ReadCloser { - return c.stdin -} - -// StdinPipe returns an input writer pipe as an io.WriteCloser. -func (c *Config) StdinPipe() io.WriteCloser { - return c.stdinPipe -} - -// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. -// It adds this new out pipe to the Stdout broadcaster. -// This will block stdout if unconsumed. -func (c *Config) StdoutPipe() io.ReadCloser { - bytesPipe := ioutils.NewBytesPipe() - c.stdout.Add(bytesPipe) - return bytesPipe -} - -// StderrPipe creates a new io.ReadCloser with an empty bytes pipe. -// It adds this new err pipe to the Stderr broadcaster. -// This will block stderr if unconsumed. -func (c *Config) StderrPipe() io.ReadCloser { - bytesPipe := ioutils.NewBytesPipe() - c.stderr.Add(bytesPipe) - return bytesPipe -} - -// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe. -func (c *Config) NewInputPipes() { - c.stdin, c.stdinPipe = io.Pipe() -} - -// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input. -func (c *Config) NewNopInputPipe() { - c.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) -} - -// CloseStreams ensures that the configured streams are properly closed. -func (c *Config) CloseStreams() error { - var errors []string - - if c.stdin != nil { - if err := c.stdin.Close(); err != nil { - errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) - } - } - - if err := c.stdout.Clean(); err != nil { - errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) - } - - if err := c.stderr.Clean(); err != nil { - errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) - } - - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, "\n")) - } - - return nil -} - -// CopyToPipe connects streamconfig with a libcontainerd.IOPipe -func (c *Config) CopyToPipe(iop *cio.DirectIO) { - copyFunc := func(w io.Writer, r io.ReadCloser) { - c.Add(1) - go func() { - if _, err := pools.Copy(w, r); err != nil { - logrus.Errorf("stream copy error: %v", err) - } - r.Close() - c.Done() - }() - } - - if iop.Stdout != nil { - copyFunc(c.Stdout(), iop.Stdout) - } - if iop.Stderr != nil { - copyFunc(c.Stderr(), iop.Stderr) - } - - if stdin := c.Stdin(); stdin != nil { - if iop.Stdin != nil { - go func() { - pools.Copy(iop.Stdin, stdin) - if err := iop.Stdin.Close(); err != nil { - logrus.Warnf("failed to close stdin: %v", err) - } - }() - } - } -} diff --git a/vendor/github.com/docker/docker/container/view.go b/vendor/github.com/docker/docker/container/view.go deleted file mode 100644 index b63149941..000000000 --- a/vendor/github.com/docker/docker/container/view.go +++ /dev/null @@ -1,494 +0,0 @@ -package container // import "github.com/docker/docker/container" - -import ( - "errors" - "fmt" - "strings" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" - "github.com/docker/go-connections/nat" - "github.com/hashicorp/go-memdb" - "github.com/sirupsen/logrus" -) - -const ( - memdbContainersTable = "containers" - memdbNamesTable = "names" - memdbIDIndex = "id" - memdbContainerIDIndex = "containerid" -) - -var ( - // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved - ErrNameReserved = errors.New("name is reserved") - // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved - ErrNameNotReserved = errors.New("name is not reserved") -) - -// Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a -// versioned ACID in-memory store. -type Snapshot struct { - types.Container - - // additional info queries need to filter on - // preserve nanosec resolution for queries - CreatedAt time.Time - StartedAt time.Time - Name string - Pid int - ExitCode int - Running bool - Paused bool - Managed bool - ExposedPorts nat.PortSet - PortBindings nat.PortSet - Health string - HostConfig struct { - Isolation string - } -} - -// nameAssociation associates a container id with a name. -type nameAssociation struct { - // name is the name to associate. Note that name is the primary key - // ("id" in memdb). - name string - containerID string -} - -// ViewDB provides an in-memory transactional (ACID) container Store -type ViewDB interface { - Snapshot() View - Save(*Container) error - Delete(*Container) error - - ReserveName(name, containerID string) error - ReleaseName(name string) error -} - -// View can be used by readers to avoid locking -type View interface { - All() ([]Snapshot, error) - Get(id string) (*Snapshot, error) - - GetID(name string) (string, error) - GetAllNames() map[string][]string -} - -var schema = &memdb.DBSchema{ - Tables: map[string]*memdb.TableSchema{ - memdbContainersTable: { - Name: memdbContainersTable, - Indexes: map[string]*memdb.IndexSchema{ - memdbIDIndex: { - Name: memdbIDIndex, - Unique: true, - Indexer: &containerByIDIndexer{}, - }, - }, - }, - memdbNamesTable: { - Name: memdbNamesTable, - Indexes: map[string]*memdb.IndexSchema{ - // Used for names, because "id" is the primary key in memdb. - memdbIDIndex: { - Name: memdbIDIndex, - Unique: true, - Indexer: &namesByNameIndexer{}, - }, - memdbContainerIDIndex: { - Name: memdbContainerIDIndex, - Indexer: &namesByContainerIDIndexer{}, - }, - }, - }, - }, -} - -type memDB struct { - store *memdb.MemDB -} - -// NoSuchContainerError indicates that the container wasn't found in the -// database. -type NoSuchContainerError struct { - id string -} - -// Error satisfies the error interface. -func (e NoSuchContainerError) Error() string { - return "no such container " + e.id -} - -// NewViewDB provides the default implementation, with the default schema -func NewViewDB() (ViewDB, error) { - store, err := memdb.NewMemDB(schema) - if err != nil { - return nil, err - } - return &memDB{store: store}, nil -} - -// Snapshot provides a consistent read-only View of the database -func (db *memDB) Snapshot() View { - return &memdbView{ - txn: db.store.Txn(false), - } -} - -func (db *memDB) withTxn(cb func(*memdb.Txn) error) error { - txn := db.store.Txn(true) - err := cb(txn) - if err != nil { - txn.Abort() - return err - } - txn.Commit() - return nil -} - -// Save atomically updates the in-memory store state for a Container. -// Only read only (deep) copies of containers may be passed in. -func (db *memDB) Save(c *Container) error { - return db.withTxn(func(txn *memdb.Txn) error { - return txn.Insert(memdbContainersTable, c) - }) -} - -// Delete removes an item by ID -func (db *memDB) Delete(c *Container) error { - return db.withTxn(func(txn *memdb.Txn) error { - view := &memdbView{txn: txn} - names := view.getNames(c.ID) - - for _, name := range names { - txn.Delete(memdbNamesTable, nameAssociation{name: name}) - } - - // Ignore error - the container may not actually exist in the - // db, but we still need to clean up associated names. - txn.Delete(memdbContainersTable, NewBaseContainer(c.ID, c.Root)) - return nil - }) -} - -// ReserveName registers a container ID to a name -// ReserveName is idempotent -// Attempting to reserve a container ID to a name that already exists results in an `ErrNameReserved` -// A name reservation is globally unique -func (db *memDB) ReserveName(name, containerID string) error { - return db.withTxn(func(txn *memdb.Txn) error { - s, err := txn.First(memdbNamesTable, memdbIDIndex, name) - if err != nil { - return err - } - if s != nil { - if s.(nameAssociation).containerID != containerID { - return ErrNameReserved - } - return nil - } - return txn.Insert(memdbNamesTable, nameAssociation{name: name, containerID: containerID}) - }) -} - -// ReleaseName releases the reserved name -// Once released, a name can be reserved again -func (db *memDB) ReleaseName(name string) error { - return db.withTxn(func(txn *memdb.Txn) error { - return txn.Delete(memdbNamesTable, nameAssociation{name: name}) - }) -} - -type memdbView struct { - txn *memdb.Txn -} - -// All returns a all items in this snapshot. Returned objects must never be modified. -func (v *memdbView) All() ([]Snapshot, error) { - var all []Snapshot - iter, err := v.txn.Get(memdbContainersTable, memdbIDIndex) - if err != nil { - return nil, err - } - for { - item := iter.Next() - if item == nil { - break - } - snapshot := v.transform(item.(*Container)) - all = append(all, *snapshot) - } - return all, nil -} - -// Get returns an item by id. Returned objects must never be modified. -func (v *memdbView) Get(id string) (*Snapshot, error) { - s, err := v.txn.First(memdbContainersTable, memdbIDIndex, id) - if err != nil { - return nil, err - } - if s == nil { - return nil, NoSuchContainerError{id: id} - } - return v.transform(s.(*Container)), nil -} - -// getNames lists all the reserved names for the given container ID. -func (v *memdbView) getNames(containerID string) []string { - iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex, containerID) - if err != nil { - return nil - } - - var names []string - for { - item := iter.Next() - if item == nil { - break - } - names = append(names, item.(nameAssociation).name) - } - - return names -} - -// GetID returns the container ID that the passed in name is reserved to. -func (v *memdbView) GetID(name string) (string, error) { - s, err := v.txn.First(memdbNamesTable, memdbIDIndex, name) - if err != nil { - return "", err - } - if s == nil { - return "", ErrNameNotReserved - } - return s.(nameAssociation).containerID, nil -} - -// GetAllNames returns all registered names. -func (v *memdbView) GetAllNames() map[string][]string { - iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex) - if err != nil { - return nil - } - - out := make(map[string][]string) - for { - item := iter.Next() - if item == nil { - break - } - assoc := item.(nameAssociation) - out[assoc.containerID] = append(out[assoc.containerID], assoc.name) - } - - return out -} - -// transform maps a (deep) copied Container object to what queries need. -// A lock on the Container is not held because these are immutable deep copies. -func (v *memdbView) transform(container *Container) *Snapshot { - health := types.NoHealthcheck - if container.Health != nil { - health = container.Health.Status() - } - snapshot := &Snapshot{ - Container: types.Container{ - ID: container.ID, - Names: v.getNames(container.ID), - ImageID: container.ImageID.String(), - Ports: []types.Port{}, - Mounts: container.GetMountPoints(), - State: container.State.StateString(), - Status: container.State.String(), - Created: container.Created.Unix(), - }, - CreatedAt: container.Created, - StartedAt: container.StartedAt, - Name: container.Name, - Pid: container.Pid, - Managed: container.Managed, - ExposedPorts: make(nat.PortSet), - PortBindings: make(nat.PortSet), - Health: health, - Running: container.Running, - Paused: container.Paused, - ExitCode: container.ExitCode(), - } - - if snapshot.Names == nil { - // Dead containers will often have no name, so make sure the response isn't null - snapshot.Names = []string{} - } - - if container.HostConfig != nil { - snapshot.Container.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) - snapshot.HostConfig.Isolation = string(container.HostConfig.Isolation) - for binding := range container.HostConfig.PortBindings { - snapshot.PortBindings[binding] = struct{}{} - } - } - - if container.Config != nil { - snapshot.Image = container.Config.Image - snapshot.Labels = container.Config.Labels - for exposed := range container.Config.ExposedPorts { - snapshot.ExposedPorts[exposed] = struct{}{} - } - } - - if len(container.Args) > 0 { - var args []string - for _, arg := range container.Args { - if strings.Contains(arg, " ") { - args = append(args, fmt.Sprintf("'%s'", arg)) - } else { - args = append(args, arg) - } - } - argsAsString := strings.Join(args, " ") - snapshot.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) - } else { - snapshot.Command = container.Path - } - - snapshot.Ports = []types.Port{} - networks := make(map[string]*network.EndpointSettings) - if container.NetworkSettings != nil { - for name, netw := range container.NetworkSettings.Networks { - if netw == nil || netw.EndpointSettings == nil { - continue - } - networks[name] = &network.EndpointSettings{ - EndpointID: netw.EndpointID, - Gateway: netw.Gateway, - IPAddress: netw.IPAddress, - IPPrefixLen: netw.IPPrefixLen, - IPv6Gateway: netw.IPv6Gateway, - GlobalIPv6Address: netw.GlobalIPv6Address, - GlobalIPv6PrefixLen: netw.GlobalIPv6PrefixLen, - MacAddress: netw.MacAddress, - NetworkID: netw.NetworkID, - } - if netw.IPAMConfig != nil { - networks[name].IPAMConfig = &network.EndpointIPAMConfig{ - IPv4Address: netw.IPAMConfig.IPv4Address, - IPv6Address: netw.IPAMConfig.IPv6Address, - } - } - } - for port, bindings := range container.NetworkSettings.Ports { - p, err := nat.ParsePort(port.Port()) - if err != nil { - logrus.Warnf("invalid port map %+v", err) - continue - } - if len(bindings) == 0 { - snapshot.Ports = append(snapshot.Ports, types.Port{ - PrivatePort: uint16(p), - Type: port.Proto(), - }) - continue - } - for _, binding := range bindings { - h, err := nat.ParsePort(binding.HostPort) - if err != nil { - logrus.Warnf("invalid host port map %+v", err) - continue - } - snapshot.Ports = append(snapshot.Ports, types.Port{ - PrivatePort: uint16(p), - PublicPort: uint16(h), - Type: port.Proto(), - IP: binding.HostIP, - }) - } - } - } - snapshot.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} - - return snapshot -} - -// containerByIDIndexer is used to extract the ID field from Container types. -// memdb.StringFieldIndex can not be used since ID is a field from an embedded struct. -type containerByIDIndexer struct{} - -// FromObject implements the memdb.SingleIndexer interface for Container objects -func (e *containerByIDIndexer) FromObject(obj interface{}) (bool, []byte, error) { - c, ok := obj.(*Container) - if !ok { - return false, nil, fmt.Errorf("%T is not a Container", obj) - } - // Add the null character as a terminator - v := c.ID + "\x00" - return true, []byte(v), nil -} - -// FromArgs implements the memdb.Indexer interface -func (e *containerByIDIndexer) FromArgs(args ...interface{}) ([]byte, error) { - if len(args) != 1 { - return nil, fmt.Errorf("must provide only a single argument") - } - arg, ok := args[0].(string) - if !ok { - return nil, fmt.Errorf("argument must be a string: %#v", args[0]) - } - // Add the null character as a terminator - arg += "\x00" - return []byte(arg), nil -} - -// namesByNameIndexer is used to index container name associations by name. -type namesByNameIndexer struct{} - -func (e *namesByNameIndexer) FromObject(obj interface{}) (bool, []byte, error) { - n, ok := obj.(nameAssociation) - if !ok { - return false, nil, fmt.Errorf(`%T does not have type "nameAssociation"`, obj) - } - - // Add the null character as a terminator - return true, []byte(n.name + "\x00"), nil -} - -func (e *namesByNameIndexer) FromArgs(args ...interface{}) ([]byte, error) { - if len(args) != 1 { - return nil, fmt.Errorf("must provide only a single argument") - } - arg, ok := args[0].(string) - if !ok { - return nil, fmt.Errorf("argument must be a string: %#v", args[0]) - } - // Add the null character as a terminator - arg += "\x00" - return []byte(arg), nil -} - -// namesByContainerIDIndexer is used to index container names by container ID. -type namesByContainerIDIndexer struct{} - -func (e *namesByContainerIDIndexer) FromObject(obj interface{}) (bool, []byte, error) { - n, ok := obj.(nameAssociation) - if !ok { - return false, nil, fmt.Errorf(`%T does not have type "nameAssocation"`, obj) - } - - // Add the null character as a terminator - return true, []byte(n.containerID + "\x00"), nil -} - -func (e *namesByContainerIDIndexer) FromArgs(args ...interface{}) ([]byte, error) { - if len(args) != 1 { - return nil, fmt.Errorf("must provide only a single argument") - } - arg, ok := args[0].(string) - if !ok { - return nil, fmt.Errorf("argument must be a string: %#v", args[0]) - } - // Add the null character as a terminator - arg += "\x00" - return []byte(arg), nil -} diff --git a/vendor/github.com/docker/docker/contrib/apparmor/main.go b/vendor/github.com/docker/docker/contrib/apparmor/main.go deleted file mode 100644 index f4a2978b8..000000000 --- a/vendor/github.com/docker/docker/contrib/apparmor/main.go +++ /dev/null @@ -1,56 +0,0 @@ -package main - -import ( - "fmt" - "log" - "os" - "path" - "text/template" - - "github.com/docker/docker/pkg/aaparser" -) - -type profileData struct { - Version int -} - -func main() { - if len(os.Args) < 2 { - log.Fatal("pass a filename to save the profile in.") - } - - // parse the arg - apparmorProfilePath := os.Args[1] - - version, err := aaparser.GetVersion() - if err != nil { - log.Fatal(err) - } - data := profileData{ - Version: version, - } - fmt.Printf("apparmor_parser is of version %+v\n", data) - - // parse the template - compiled, err := template.New("apparmor_profile").Parse(dockerProfileTemplate) - if err != nil { - log.Fatalf("parsing template failed: %v", err) - } - - // make sure /etc/apparmor.d exists - if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil { - log.Fatal(err) - } - - f, err := os.OpenFile(apparmorProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - log.Fatal(err) - } - defer f.Close() - - if err := compiled.Execute(f, data); err != nil { - log.Fatalf("executing template failed: %v", err) - } - - fmt.Printf("created apparmor profile for version %+v at %q\n", data, apparmorProfilePath) -} diff --git a/vendor/github.com/docker/docker/contrib/apparmor/template.go b/vendor/github.com/docker/docker/contrib/apparmor/template.go deleted file mode 100644 index e5e1c8bed..000000000 --- a/vendor/github.com/docker/docker/contrib/apparmor/template.go +++ /dev/null @@ -1,268 +0,0 @@ -package main - -const dockerProfileTemplate = `@{DOCKER_GRAPH_PATH}=/var/lib/docker - -profile /usr/bin/docker (attach_disconnected, complain) { - # Prevent following links to these files during container setup. - deny /etc/** mkl, - deny /dev/** kl, - deny /sys/** mkl, - deny /proc/** mkl, - - mount -> @{DOCKER_GRAPH_PATH}/**, - mount -> /, - mount -> /proc/**, - mount -> /sys/**, - mount -> /run/docker/netns/**, - mount -> /.pivot_root[0-9]*/, - - / r, - - umount, - pivot_root, -{{if ge .Version 209000}} - signal (receive) peer=@{profile_name}, - signal (receive) peer=unconfined, - signal (send), -{{end}} - network, - capability, - owner /** rw, - @{DOCKER_GRAPH_PATH}/** rwl, - @{DOCKER_GRAPH_PATH}/linkgraph.db k, - @{DOCKER_GRAPH_PATH}/network/files/boltdb.db k, - @{DOCKER_GRAPH_PATH}/network/files/local-kv.db k, - @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/linkgraph.db k, - - # For non-root client use: - /dev/urandom r, - /dev/null rw, - /dev/pts/[0-9]* rw, - /run/docker.sock rw, - /proc/** r, - /proc/[0-9]*/attr/exec w, - /sys/kernel/mm/hugepages/ r, - /etc/localtime r, - /etc/ld.so.cache r, - /etc/passwd r, - -{{if ge .Version 209000}} - ptrace peer=@{profile_name}, - ptrace (read) peer=docker-default, - deny ptrace (trace) peer=docker-default, - deny ptrace peer=/usr/bin/docker///bin/ps, -{{end}} - - /usr/lib/** rm, - /lib/** rm, - - /usr/bin/docker pix, - /sbin/xtables-multi rCx, - /sbin/iptables rCx, - /sbin/modprobe rCx, - /sbin/auplink rCx, - /sbin/mke2fs rCx, - /sbin/tune2fs rCx, - /sbin/blkid rCx, - /bin/kmod rCx, - /usr/bin/xz rCx, - /bin/ps rCx, - /bin/tar rCx, - /bin/cat rCx, - /sbin/zfs rCx, - /sbin/apparmor_parser rCx, - -{{if ge .Version 209000}} - # Transitions - change_profile -> docker-*, - change_profile -> unconfined, -{{end}} - - profile /bin/cat (complain) { - /etc/ld.so.cache r, - /lib/** rm, - /dev/null rw, - /proc r, - /bin/cat mr, - - # For reading in 'docker stats': - /proc/[0-9]*/net/dev r, - } - profile /bin/ps (complain) { - /etc/ld.so.cache r, - /etc/localtime r, - /etc/passwd r, - /etc/nsswitch.conf r, - /lib/** rm, - /proc/[0-9]*/** r, - /dev/null rw, - /bin/ps mr, - -{{if ge .Version 209000}} - # We don't need ptrace so we'll deny and ignore the error. - deny ptrace (read, trace), -{{end}} - - # Quiet dac_override denials - deny capability dac_override, - deny capability dac_read_search, - deny capability sys_ptrace, - - /dev/tty r, - /proc/stat r, - /proc/cpuinfo r, - /proc/meminfo r, - /proc/uptime r, - /sys/devices/system/cpu/online r, - /proc/sys/kernel/pid_max r, - /proc/ r, - /proc/tty/drivers r, - } - profile /sbin/iptables (complain) { -{{if ge .Version 209000}} - signal (receive) peer=/usr/bin/docker, -{{end}} - capability net_admin, - } - profile /sbin/auplink flags=(attach_disconnected, complain) { -{{if ge .Version 209000}} - signal (receive) peer=/usr/bin/docker, -{{end}} - capability sys_admin, - capability dac_override, - - @{DOCKER_GRAPH_PATH}/aufs/** rw, - @{DOCKER_GRAPH_PATH}/tmp/** rw, - # For user namespaces: - @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/** rw, - - /sys/fs/aufs/** r, - /lib/** rm, - /apparmor/.null r, - /dev/null rw, - /etc/ld.so.cache r, - /sbin/auplink rm, - /proc/fs/aufs/** rw, - /proc/[0-9]*/mounts rw, - } - profile /sbin/modprobe /bin/kmod (complain) { -{{if ge .Version 209000}} - signal (receive) peer=/usr/bin/docker, -{{end}} - capability sys_module, - /etc/ld.so.cache r, - /lib/** rm, - /dev/null rw, - /apparmor/.null rw, - /sbin/modprobe rm, - /bin/kmod rm, - /proc/cmdline r, - /sys/module/** r, - /etc/modprobe.d{/,/**} r, - } - # xz works via pipes, so we do not need access to the filesystem. - profile /usr/bin/xz (complain) { -{{if ge .Version 209000}} - signal (receive) peer=/usr/bin/docker, -{{end}} - /etc/ld.so.cache r, - /lib/** rm, - /usr/bin/xz rm, - deny /proc/** rw, - deny /sys/** rw, - } - profile /sbin/xtables-multi (attach_disconnected, complain) { - /etc/ld.so.cache r, - /lib/** rm, - /sbin/xtables-multi rm, - /apparmor/.null w, - /dev/null rw, - - /proc r, - - capability net_raw, - capability net_admin, - network raw, - } - profile /sbin/zfs (attach_disconnected, complain) { - file, - capability, - } - profile /sbin/mke2fs (complain) { - /sbin/mke2fs rm, - - /lib/** rm, - - /apparmor/.null w, - - /etc/ld.so.cache r, - /etc/mke2fs.conf r, - /etc/mtab r, - - /dev/dm-* rw, - /dev/urandom r, - /dev/null rw, - - /proc/swaps r, - /proc/[0-9]*/mounts r, - } - profile /sbin/tune2fs (complain) { - /sbin/tune2fs rm, - - /lib/** rm, - - /apparmor/.null w, - - /etc/blkid.conf r, - /etc/mtab r, - /etc/ld.so.cache r, - - /dev/null rw, - /dev/.blkid.tab r, - /dev/dm-* rw, - - /proc/swaps r, - /proc/[0-9]*/mounts r, - } - profile /sbin/blkid (complain) { - /sbin/blkid rm, - - /lib/** rm, - /apparmor/.null w, - - /etc/ld.so.cache r, - /etc/blkid.conf r, - - /dev/null rw, - /dev/.blkid.tab rl, - /dev/.blkid.tab* rwl, - /dev/dm-* r, - - /sys/devices/virtual/block/** r, - - capability mknod, - - mount -> @{DOCKER_GRAPH_PATH}/**, - } - profile /sbin/apparmor_parser (complain) { - /sbin/apparmor_parser rm, - - /lib/** rm, - - /etc/ld.so.cache r, - /etc/apparmor/** r, - /etc/apparmor.d/** r, - /etc/apparmor.d/cache/** w, - - /dev/null rw, - - /sys/kernel/security/apparmor/** r, - /sys/kernel/security/apparmor/.replace w, - - /proc/[0-9]*/mounts r, - /proc/sys/kernel/osrelease r, - /proc r, - - capability mac_admin, - } -}` diff --git a/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go b/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go deleted file mode 100644 index d3ec46a8b..000000000 --- a/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool.go +++ /dev/null @@ -1,167 +0,0 @@ -// +build !windows - -package main - -import ( - "flag" - "fmt" - "os" - "path" - "sort" - "strconv" - "strings" - - "github.com/docker/docker/daemon/graphdriver/devmapper" - "github.com/docker/docker/pkg/devicemapper" - "github.com/sirupsen/logrus" -) - -func usage() { - fmt.Fprintf(os.Stderr, "Usage: %s [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0]) - flag.PrintDefaults() - os.Exit(1) -} - -func byteSizeFromString(arg string) (int64, error) { - digits := "" - rest := "" - last := strings.LastIndexAny(arg, "0123456789") - if last >= 0 { - digits = arg[:last+1] - rest = arg[last+1:] - } - - val, err := strconv.ParseInt(digits, 10, 64) - if err != nil { - return val, err - } - - rest = strings.ToLower(strings.TrimSpace(rest)) - - var multiplier int64 = 1 - switch rest { - case "": - multiplier = 1 - case "k", "kb": - multiplier = 1024 - case "m", "mb": - multiplier = 1024 * 1024 - case "g", "gb": - multiplier = 1024 * 1024 * 1024 - case "t", "tb": - multiplier = 1024 * 1024 * 1024 * 1024 - default: - return 0, fmt.Errorf("Unknown size unit: %s", rest) - } - - return val * multiplier, nil -} - -func main() { - root := flag.String("r", "/var/lib/docker", "Docker root dir") - flDebug := flag.Bool("D", false, "Debug mode") - - flag.Parse() - - if *flDebug { - os.Setenv("DEBUG", "1") - logrus.SetLevel(logrus.DebugLevel) - } - - if flag.NArg() < 1 { - usage() - } - - args := flag.Args() - - home := path.Join(*root, "devicemapper") - devices, err := devmapper.NewDeviceSet(home, false, nil, nil, nil) - if err != nil { - fmt.Println("Can't initialize device mapper: ", err) - os.Exit(1) - } - - switch args[0] { - case "status": - status := devices.Status() - fmt.Printf("Pool name: %s\n", status.PoolName) - fmt.Printf("Data Loopback file: %s\n", status.DataLoopback) - fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback) - fmt.Printf("Sector size: %d\n", status.SectorSize) - fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total)) - fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total)) - case "list": - ids := devices.List() - sort.Strings(ids) - for _, id := range ids { - fmt.Println(id) - } - case "device": - if flag.NArg() < 2 { - usage() - } - status, err := devices.GetDeviceStatus(args[1]) - if err != nil { - fmt.Println("Can't get device info: ", err) - os.Exit(1) - } - fmt.Printf("Id: %d\n", status.DeviceID) - fmt.Printf("Size: %d\n", status.Size) - fmt.Printf("Transaction Id: %d\n", status.TransactionID) - fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors) - fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors) - fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector) - case "resize": - if flag.NArg() < 2 { - usage() - } - - size, err := byteSizeFromString(args[1]) - if err != nil { - fmt.Println("Invalid size: ", err) - os.Exit(1) - } - - err = devices.ResizePool(size) - if err != nil { - fmt.Println("Error resizing pool: ", err) - os.Exit(1) - } - - case "snap": - if flag.NArg() < 3 { - usage() - } - - err := devices.AddDevice(args[1], args[2], nil) - if err != nil { - fmt.Println("Can't create snap device: ", err) - os.Exit(1) - } - case "remove": - if flag.NArg() < 2 { - usage() - } - - err := devicemapper.RemoveDevice(args[1]) - if err != nil { - fmt.Println("Can't remove device: ", err) - os.Exit(1) - } - case "mount": - if flag.NArg() < 3 { - usage() - } - - err := devices.MountDevice(args[1], args[2], "") - if err != nil { - fmt.Println("Can't mount device: ", err) - os.Exit(1) - } - default: - fmt.Printf("Unknown command %s\n", args[0]) - usage() - - os.Exit(1) - } -} diff --git a/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool_windows.go b/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool_windows.go deleted file mode 100644 index da29a2cad..000000000 --- a/vendor/github.com/docker/docker/contrib/docker-device-tool/device_tool_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package main - -func main() { -} diff --git a/vendor/github.com/docker/docker/contrib/httpserver/server.go b/vendor/github.com/docker/docker/contrib/httpserver/server.go deleted file mode 100644 index a75d5abb3..000000000 --- a/vendor/github.com/docker/docker/contrib/httpserver/server.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -import ( - "log" - "net/http" -) - -func main() { - fs := http.FileServer(http.Dir("/static")) - http.Handle("/", fs) - log.Panic(http.ListenAndServe(":80", nil)) -} diff --git a/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c b/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c deleted file mode 100644 index b767da7e1..000000000 --- a/vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c +++ /dev/null @@ -1,10 +0,0 @@ -#include -#include -#include - -int main(int argc, char *argv[]) -{ - printf("EUID=%d\n", geteuid()); - return 0; -} - diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/acct.c b/vendor/github.com/docker/docker/contrib/syscall-test/acct.c deleted file mode 100644 index 88ac28796..000000000 --- a/vendor/github.com/docker/docker/contrib/syscall-test/acct.c +++ /dev/null @@ -1,16 +0,0 @@ -#define _GNU_SOURCE -#include -#include -#include -#include -#include - -int main(int argc, char **argv) -{ - int err = acct("/tmp/t"); - if (err == -1) { - fprintf(stderr, "acct failed: %s\n", strerror(errno)); - exit(EXIT_FAILURE); - } - exit(EXIT_SUCCESS); -} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s b/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s deleted file mode 100644 index 8bbb5c58b..000000000 --- a/vendor/github.com/docker/docker/contrib/syscall-test/exit32.s +++ /dev/null @@ -1,7 +0,0 @@ -.globl _start -.text -_start: - xorl %eax, %eax - incl %eax - movb $0, %bl - int $0x80 diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/ns.c b/vendor/github.com/docker/docker/contrib/syscall-test/ns.c deleted file mode 100644 index 624388630..000000000 --- a/vendor/github.com/docker/docker/contrib/syscall-test/ns.c +++ /dev/null @@ -1,63 +0,0 @@ -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ - -struct clone_args { - char **argv; -}; - -// child_exec is the func that will be executed as the result of clone -static int child_exec(void *stuff) -{ - struct clone_args *args = (struct clone_args *)stuff; - if (execvp(args->argv[0], args->argv) != 0) { - fprintf(stderr, "failed to execvp arguments %s\n", - strerror(errno)); - exit(-1); - } - // we should never reach here! - exit(EXIT_FAILURE); -} - -int main(int argc, char **argv) -{ - struct clone_args args; - args.argv = &argv[1]; - - int clone_flags = CLONE_NEWNS | CLONE_NEWPID | SIGCHLD; - - // allocate stack for child - char *stack; /* Start of stack buffer */ - char *child_stack; /* End of stack buffer */ - stack = - mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); - if (stack == MAP_FAILED) { - fprintf(stderr, "mmap failed: %s\n", strerror(errno)); - exit(EXIT_FAILURE); - } - child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ - - // the result of this call is that our child_exec will be run in another - // process returning its pid - pid_t pid = clone(child_exec, child_stack, clone_flags, &args); - if (pid < 0) { - fprintf(stderr, "clone failed: %s\n", strerror(errno)); - exit(EXIT_FAILURE); - } - // lets wait on our child process here before we, the parent, exits - if (waitpid(pid, NULL, 0) == -1) { - fprintf(stderr, "failed to wait pid %d\n", pid); - exit(EXIT_FAILURE); - } - exit(EXIT_SUCCESS); -} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/raw.c b/vendor/github.com/docker/docker/contrib/syscall-test/raw.c deleted file mode 100644 index 7995a0d3a..000000000 --- a/vendor/github.com/docker/docker/contrib/syscall-test/raw.c +++ /dev/null @@ -1,14 +0,0 @@ -#include -#include -#include -#include -#include - -int main() { - if (socket(PF_INET, SOCK_RAW, IPPROTO_UDP) == -1) { - perror("socket"); - return 1; - } - - return 0; -} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c b/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c deleted file mode 100644 index df9680c86..000000000 --- a/vendor/github.com/docker/docker/contrib/syscall-test/setgid.c +++ /dev/null @@ -1,11 +0,0 @@ -#include -#include -#include - -int main() { - if (setgid(1) == -1) { - perror("setgid"); - return 1; - } - return 0; -} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c b/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c deleted file mode 100644 index 5b939677e..000000000 --- a/vendor/github.com/docker/docker/contrib/syscall-test/setuid.c +++ /dev/null @@ -1,11 +0,0 @@ -#include -#include -#include - -int main() { - if (setuid(1) == -1) { - perror("setuid"); - return 1; - } - return 0; -} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/socket.c b/vendor/github.com/docker/docker/contrib/syscall-test/socket.c deleted file mode 100644 index d26c82f00..000000000 --- a/vendor/github.com/docker/docker/contrib/syscall-test/socket.c +++ /dev/null @@ -1,30 +0,0 @@ -#include -#include -#include -#include -#include -#include - -int main() { - int s; - struct sockaddr_in sin; - - s = socket(AF_INET, SOCK_STREAM, 0); - if (s == -1) { - perror("socket"); - return 1; - } - - sin.sin_family = AF_INET; - sin.sin_addr.s_addr = INADDR_ANY; - sin.sin_port = htons(80); - - if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) == -1) { - perror("bind"); - return 1; - } - - close(s); - - return 0; -} diff --git a/vendor/github.com/docker/docker/contrib/syscall-test/userns.c b/vendor/github.com/docker/docker/contrib/syscall-test/userns.c deleted file mode 100644 index 4c5c8d304..000000000 --- a/vendor/github.com/docker/docker/contrib/syscall-test/userns.c +++ /dev/null @@ -1,63 +0,0 @@ -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ - -struct clone_args { - char **argv; -}; - -// child_exec is the func that will be executed as the result of clone -static int child_exec(void *stuff) -{ - struct clone_args *args = (struct clone_args *)stuff; - if (execvp(args->argv[0], args->argv) != 0) { - fprintf(stderr, "failed to execvp arguments %s\n", - strerror(errno)); - exit(-1); - } - // we should never reach here! - exit(EXIT_FAILURE); -} - -int main(int argc, char **argv) -{ - struct clone_args args; - args.argv = &argv[1]; - - int clone_flags = CLONE_NEWUSER | SIGCHLD; - - // allocate stack for child - char *stack; /* Start of stack buffer */ - char *child_stack; /* End of stack buffer */ - stack = - mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); - if (stack == MAP_FAILED) { - fprintf(stderr, "mmap failed: %s\n", strerror(errno)); - exit(EXIT_FAILURE); - } - child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ - - // the result of this call is that our child_exec will be run in another - // process returning its pid - pid_t pid = clone(child_exec, child_stack, clone_flags, &args); - if (pid < 0) { - fprintf(stderr, "clone failed: %s\n", strerror(errno)); - exit(EXIT_FAILURE); - } - // lets wait on our child process here before we, the parent, exits - if (waitpid(pid, NULL, 0) == -1) { - fprintf(stderr, "failed to wait pid %d\n", pid); - exit(EXIT_FAILURE); - } - exit(EXIT_SUCCESS); -} diff --git a/vendor/github.com/docker/docker/daemon/apparmor_default.go b/vendor/github.com/docker/docker/daemon/apparmor_default.go deleted file mode 100644 index 461f5c7f9..000000000 --- a/vendor/github.com/docker/docker/daemon/apparmor_default.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build linux - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - - aaprofile "github.com/docker/docker/profiles/apparmor" - "github.com/opencontainers/runc/libcontainer/apparmor" -) - -// Define constants for native driver -const ( - defaultApparmorProfile = "docker-default" -) - -func ensureDefaultAppArmorProfile() error { - if apparmor.IsEnabled() { - loaded, err := aaprofile.IsLoaded(defaultApparmorProfile) - if err != nil { - return fmt.Errorf("Could not check if %s AppArmor profile was loaded: %s", defaultApparmorProfile, err) - } - - // Nothing to do. - if loaded { - return nil - } - - // Load the profile. - if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil { - return fmt.Errorf("AppArmor enabled on system but the %s profile could not be loaded: %s", defaultApparmorProfile, err) - } - } - - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go b/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go deleted file mode 100644 index 51f9c526b..000000000 --- a/vendor/github.com/docker/docker/daemon/apparmor_default_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux - -package daemon // import "github.com/docker/docker/daemon" - -func ensureDefaultAppArmorProfile() error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/archive.go b/vendor/github.com/docker/docker/daemon/archive.go deleted file mode 100644 index 9c7971b56..000000000 --- a/vendor/github.com/docker/docker/daemon/archive.go +++ /dev/null @@ -1,449 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "io" - "os" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/container" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" -) - -// ErrExtractPointNotDirectory is used to convey that the operation to extract -// a tar archive to a directory in a container has failed because the specified -// path does not refer to a directory. -var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") - -// The daemon will use the following interfaces if the container fs implements -// these for optimized copies to and from the container. -type extractor interface { - ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error -} - -type archiver interface { - ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) -} - -// helper functions to extract or archive -func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions) error { - if ea, ok := i.(extractor); ok { - return ea.ExtractArchive(src, dst, opts) - } - return chrootarchive.Untar(src, dst, opts) -} - -func archivePath(i interface{}, src string, opts *archive.TarOptions) (io.ReadCloser, error) { - if ap, ok := i.(archiver); ok { - return ap.ArchivePath(src, opts) - } - return archive.TarWithOptions(src, opts) -} - -// ContainerCopy performs a deprecated operation of archiving the resource at -// the specified path in the container identified by the given name. -func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - // Make sure an online file-system operation is permitted. - if err := daemon.isOnlineFSOperationPermitted(container); err != nil { - return nil, errdefs.System(err) - } - - data, err := daemon.containerCopy(container, res) - if err == nil { - return data, nil - } - - if os.IsNotExist(err) { - return nil, containerFileNotFound{res, name} - } - return nil, errdefs.System(err) -} - -// ContainerStatPath stats the filesystem resource at the specified path in the -// container identified by the given name. -func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - // Make sure an online file-system operation is permitted. - if err := daemon.isOnlineFSOperationPermitted(container); err != nil { - return nil, errdefs.System(err) - } - - stat, err = daemon.containerStatPath(container, path) - if err == nil { - return stat, nil - } - - if os.IsNotExist(err) { - return nil, containerFileNotFound{path, name} - } - return nil, errdefs.System(err) -} - -// ContainerArchivePath creates an archive of the filesystem resource at the -// specified path in the container identified by the given name. Returns a -// tar archive of the resource and whether it was a directory or a single file. -func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, nil, err - } - - // Make sure an online file-system operation is permitted. - if err := daemon.isOnlineFSOperationPermitted(container); err != nil { - return nil, nil, errdefs.System(err) - } - - content, stat, err = daemon.containerArchivePath(container, path) - if err == nil { - return content, stat, nil - } - - if os.IsNotExist(err) { - return nil, nil, containerFileNotFound{path, name} - } - return nil, nil, errdefs.System(err) -} - -// ContainerExtractToDir extracts the given archive to the specified location -// in the filesystem of the container identified by the given name. The given -// path must be of a directory in the container. If it is not, the error will -// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will -// be an error if unpacking the given content would cause an existing directory -// to be replaced with a non-directory and vice versa. -func (daemon *Daemon) ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - // Make sure an online file-system operation is permitted. - if err := daemon.isOnlineFSOperationPermitted(container); err != nil { - return errdefs.System(err) - } - - err = daemon.containerExtractToDir(container, path, copyUIDGID, noOverwriteDirNonDir, content) - if err == nil { - return nil - } - - if os.IsNotExist(err) { - return containerFileNotFound{path, name} - } - return errdefs.System(err) -} - -// containerStatPath stats the filesystem resource at the specified path in this -// container. Returns stat info about the resource. -func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) { - container.Lock() - defer container.Unlock() - - if err = daemon.Mount(container); err != nil { - return nil, err - } - defer daemon.Unmount(container) - - err = daemon.mountVolumes(container) - defer container.DetachAndUnmount(daemon.LogVolumeEvent) - if err != nil { - return nil, err - } - - // Normalize path before sending to rootfs - path = container.BaseFS.FromSlash(path) - - resolvedPath, absPath, err := container.ResolvePath(path) - if err != nil { - return nil, err - } - - return container.StatPath(resolvedPath, absPath) -} - -// containerArchivePath creates an archive of the filesystem resource at the specified -// path in this container. Returns a tar archive of the resource and stat info -// about the resource. -func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { - container.Lock() - - defer func() { - if err != nil { - // Wait to unlock the container until the archive is fully read - // (see the ReadCloseWrapper func below) or if there is an error - // before that occurs. - container.Unlock() - } - }() - - if err = daemon.Mount(container); err != nil { - return nil, nil, err - } - - defer func() { - if err != nil { - // unmount any volumes - container.DetachAndUnmount(daemon.LogVolumeEvent) - // unmount the container's rootfs - daemon.Unmount(container) - } - }() - - if err = daemon.mountVolumes(container); err != nil { - return nil, nil, err - } - - // Normalize path before sending to rootfs - path = container.BaseFS.FromSlash(path) - - resolvedPath, absPath, err := container.ResolvePath(path) - if err != nil { - return nil, nil, err - } - - stat, err = container.StatPath(resolvedPath, absPath) - if err != nil { - return nil, nil, err - } - - // We need to rebase the archive entries if the last element of the - // resolved path was a symlink that was evaluated and is now different - // than the requested path. For example, if the given path was "/foo/bar/", - // but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want - // to ensure that the archive entries start with "bar" and not "baz". This - // also catches the case when the root directory of the container is - // requested: we want the archive entries to start with "/" and not the - // container ID. - driver := container.BaseFS - - // Get the source and the base paths of the container resolved path in order - // to get the proper tar options for the rebase tar. - resolvedPath = driver.Clean(resolvedPath) - if driver.Base(resolvedPath) == "." { - resolvedPath += string(driver.Separator()) + "." - } - sourceDir, sourceBase := driver.Dir(resolvedPath), driver.Base(resolvedPath) - opts := archive.TarResourceRebaseOpts(sourceBase, driver.Base(absPath)) - - data, err := archivePath(driver, sourceDir, opts) - if err != nil { - return nil, nil, err - } - - content = ioutils.NewReadCloserWrapper(data, func() error { - err := data.Close() - container.DetachAndUnmount(daemon.LogVolumeEvent) - daemon.Unmount(container) - container.Unlock() - return err - }) - - daemon.LogContainerEvent(container, "archive-path") - - return content, stat, nil -} - -// containerExtractToDir extracts the given tar archive to the specified location in the -// filesystem of this container. The given path must be of a directory in the -// container. If it is not, the error will be ErrExtractPointNotDirectory. If -// noOverwriteDirNonDir is true then it will be an error if unpacking the -// given content would cause an existing directory to be replaced with a non- -// directory and vice versa. -func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) (err error) { - container.Lock() - defer container.Unlock() - - if err = daemon.Mount(container); err != nil { - return err - } - defer daemon.Unmount(container) - - err = daemon.mountVolumes(container) - defer container.DetachAndUnmount(daemon.LogVolumeEvent) - if err != nil { - return err - } - - // Normalize path before sending to rootfs' - path = container.BaseFS.FromSlash(path) - driver := container.BaseFS - - // Check if a drive letter supplied, it must be the system drive. No-op except on Windows - path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, driver) - if err != nil { - return err - } - - // The destination path needs to be resolved to a host path, with all - // symbolic links followed in the scope of the container's rootfs. Note - // that we do not use `container.ResolvePath(path)` here because we need - // to also evaluate the last path element if it is a symlink. This is so - // that you can extract an archive to a symlink that points to a directory. - - // Consider the given path as an absolute path in the container. - absPath := archive.PreserveTrailingDotOrSeparator( - driver.Join(string(driver.Separator()), path), - path, - driver.Separator()) - - // This will evaluate the last path element if it is a symlink. - resolvedPath, err := container.GetResourcePath(absPath) - if err != nil { - return err - } - - stat, err := driver.Lstat(resolvedPath) - if err != nil { - return err - } - - if !stat.IsDir() { - return ErrExtractPointNotDirectory - } - - // Need to check if the path is in a volume. If it is, it cannot be in a - // read-only volume. If it is not in a volume, the container cannot be - // configured with a read-only rootfs. - - // Use the resolved path relative to the container rootfs as the new - // absPath. This way we fully follow any symlinks in a volume that may - // lead back outside the volume. - // - // The Windows implementation of filepath.Rel in golang 1.4 does not - // support volume style file path semantics. On Windows when using the - // filter driver, we are guaranteed that the path will always be - // a volume file path. - var baseRel string - if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { - if strings.HasPrefix(resolvedPath, driver.Path()) { - baseRel = resolvedPath[len(driver.Path()):] - if baseRel[:1] == `\` { - baseRel = baseRel[1:] - } - } - } else { - baseRel, err = driver.Rel(driver.Path(), resolvedPath) - } - if err != nil { - return err - } - // Make it an absolute path. - absPath = driver.Join(string(driver.Separator()), baseRel) - - // @ TODO: gupta-ak: Technically, this works since it no-ops - // on Windows and the file system is local anyway on linux. - // But eventually, it should be made driver aware. - toVolume, err := checkIfPathIsInAVolume(container, absPath) - if err != nil { - return err - } - - if !toVolume && container.HostConfig.ReadonlyRootfs { - return ErrRootFSReadOnly - } - - options := daemon.defaultTarCopyOptions(noOverwriteDirNonDir) - - if copyUIDGID { - var err error - // tarCopyOptions will appropriately pull in the right uid/gid for the - // user/group and will set the options. - options, err = daemon.tarCopyOptions(container, noOverwriteDirNonDir) - if err != nil { - return err - } - } - - if err := extractArchive(driver, content, resolvedPath, options); err != nil { - return err - } - - daemon.LogContainerEvent(container, "extract-to-dir") - - return nil -} - -func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) { - if resource[0] == '/' || resource[0] == '\\' { - resource = resource[1:] - } - container.Lock() - - defer func() { - if err != nil { - // Wait to unlock the container until the archive is fully read - // (see the ReadCloseWrapper func below) or if there is an error - // before that occurs. - container.Unlock() - } - }() - - if err := daemon.Mount(container); err != nil { - return nil, err - } - - defer func() { - if err != nil { - // unmount any volumes - container.DetachAndUnmount(daemon.LogVolumeEvent) - // unmount the container's rootfs - daemon.Unmount(container) - } - }() - - if err := daemon.mountVolumes(container); err != nil { - return nil, err - } - - // Normalize path before sending to rootfs - resource = container.BaseFS.FromSlash(resource) - driver := container.BaseFS - - basePath, err := container.GetResourcePath(resource) - if err != nil { - return nil, err - } - stat, err := driver.Stat(basePath) - if err != nil { - return nil, err - } - var filter []string - if !stat.IsDir() { - d, f := driver.Split(basePath) - basePath = d - filter = []string{f} - } else { - filter = []string{driver.Base(basePath)} - basePath = driver.Dir(basePath) - } - archive, err := archivePath(driver, basePath, &archive.TarOptions{ - Compression: archive.Uncompressed, - IncludeFiles: filter, - }) - if err != nil { - return nil, err - } - - reader := ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - container.DetachAndUnmount(daemon.LogVolumeEvent) - daemon.Unmount(container) - container.Unlock() - return err - }) - daemon.LogContainerEvent(container, "copy") - return reader, nil -} diff --git a/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions.go b/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions.go deleted file mode 100644 index 766ba9fdb..000000000 --- a/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions.go +++ /dev/null @@ -1,15 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/docker/docker/pkg/archive" -) - -// defaultTarCopyOptions is the setting that is used when unpacking an archive -// for a copy API event. -func (daemon *Daemon) defaultTarCopyOptions(noOverwriteDirNonDir bool) *archive.TarOptions { - return &archive.TarOptions{ - NoOverwriteDirNonDir: noOverwriteDirNonDir, - UIDMaps: daemon.idMappings.UIDs(), - GIDMaps: daemon.idMappings.GIDs(), - } -} diff --git a/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_unix.go b/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_unix.go deleted file mode 100644 index d70904564..000000000 --- a/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_unix.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build !windows - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" -) - -func (daemon *Daemon) tarCopyOptions(container *container.Container, noOverwriteDirNonDir bool) (*archive.TarOptions, error) { - if container.Config.User == "" { - return daemon.defaultTarCopyOptions(noOverwriteDirNonDir), nil - } - - user, err := idtools.LookupUser(container.Config.User) - if err != nil { - return nil, err - } - - return &archive.TarOptions{ - NoOverwriteDirNonDir: noOverwriteDirNonDir, - ChownOpts: &idtools.IDPair{UID: user.Uid, GID: user.Gid}, - }, nil -} diff --git a/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_windows.go b/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_windows.go deleted file mode 100644 index 5142496f0..000000000 --- a/vendor/github.com/docker/docker/daemon/archive_tarcopyoptions_windows.go +++ /dev/null @@ -1,10 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/archive" -) - -func (daemon *Daemon) tarCopyOptions(container *container.Container, noOverwriteDirNonDir bool) (*archive.TarOptions, error) { - return daemon.defaultTarCopyOptions(noOverwriteDirNonDir), nil -} diff --git a/vendor/github.com/docker/docker/daemon/archive_unix.go b/vendor/github.com/docker/docker/daemon/archive_unix.go deleted file mode 100644 index 50e6fe24b..000000000 --- a/vendor/github.com/docker/docker/daemon/archive_unix.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build !windows - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/docker/docker/container" - volumemounts "github.com/docker/docker/volume/mounts" -) - -// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it -// cannot be in a read-only volume. If it is not in a volume, the container -// cannot be configured with a read-only rootfs. -func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { - var toVolume bool - parser := volumemounts.NewParser(container.OS) - for _, mnt := range container.MountPoints { - if toVolume = parser.HasResource(mnt, absPath); toVolume { - if mnt.RW { - break - } - return false, ErrVolumeReadonly - } - } - return toVolume, nil -} - -// isOnlineFSOperationPermitted returns an error if an online filesystem operation -// is not permitted. -func (daemon *Daemon) isOnlineFSOperationPermitted(container *container.Container) error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/archive_windows.go b/vendor/github.com/docker/docker/daemon/archive_windows.go deleted file mode 100644 index 8cec39c5e..000000000 --- a/vendor/github.com/docker/docker/daemon/archive_windows.go +++ /dev/null @@ -1,39 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "errors" - - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/container" -) - -// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it -// cannot be in a read-only volume. If it is not in a volume, the container -// cannot be configured with a read-only rootfs. -// -// This is a no-op on Windows which does not support read-only volumes, or -// extracting to a mount point inside a volume. TODO Windows: FIXME Post-TP5 -func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { - return false, nil -} - -// isOnlineFSOperationPermitted returns an error if an online filesystem operation -// is not permitted (such as stat or for copying). Running Hyper-V containers -// cannot have their file-system interrogated from the host as the filter is -// loaded inside the utility VM, not the host. -// IMPORTANT: The container lock must NOT be held when calling this function. -func (daemon *Daemon) isOnlineFSOperationPermitted(container *container.Container) error { - if !container.IsRunning() { - return nil - } - - // Determine isolation. If not specified in the hostconfig, use daemon default. - actualIsolation := container.HostConfig.Isolation - if containertypes.Isolation.IsDefault(containertypes.Isolation(actualIsolation)) { - actualIsolation = daemon.defaultIsolation - } - if containertypes.Isolation.IsHyperV(actualIsolation) { - return errors.New("filesystem operations against a running Hyper-V container are not supported") - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/attach.go b/vendor/github.com/docker/docker/daemon/attach.go deleted file mode 100644 index fb14691d2..000000000 --- a/vendor/github.com/docker/docker/daemon/attach.go +++ /dev/null @@ -1,187 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - "io" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/container/stream" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/docker/pkg/term" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. -func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error { - keys := []byte{} - var err error - if c.DetachKeys != "" { - keys, err = term.ToBytes(c.DetachKeys) - if err != nil { - return errdefs.InvalidParameter(errors.Errorf("Invalid detach keys (%s) provided", c.DetachKeys)) - } - } - - container, err := daemon.GetContainer(prefixOrName) - if err != nil { - return err - } - if container.IsPaused() { - err := fmt.Errorf("container %s is paused, unpause the container before attach", prefixOrName) - return errdefs.Conflict(err) - } - if container.IsRestarting() { - err := fmt.Errorf("container %s is restarting, wait until the container is running", prefixOrName) - return errdefs.Conflict(err) - } - - cfg := stream.AttachConfig{ - UseStdin: c.UseStdin, - UseStdout: c.UseStdout, - UseStderr: c.UseStderr, - TTY: container.Config.Tty, - CloseStdin: container.Config.StdinOnce, - DetachKeys: keys, - } - container.StreamConfig.AttachStreams(&cfg) - - inStream, outStream, errStream, err := c.GetStreams() - if err != nil { - return err - } - defer inStream.Close() - - if !container.Config.Tty && c.MuxStreams { - errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr) - outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } - - if cfg.UseStdin { - cfg.Stdin = inStream - } - if cfg.UseStdout { - cfg.Stdout = outStream - } - if cfg.UseStderr { - cfg.Stderr = errStream - } - - if err := daemon.containerAttach(container, &cfg, c.Logs, c.Stream); err != nil { - fmt.Fprintf(outStream, "Error attaching: %s\n", err) - } - return nil -} - -// ContainerAttachRaw attaches the provided streams to the container's stdio -func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, doStream bool, attached chan struct{}) error { - container, err := daemon.GetContainer(prefixOrName) - if err != nil { - return err - } - cfg := stream.AttachConfig{ - UseStdin: stdin != nil, - UseStdout: stdout != nil, - UseStderr: stderr != nil, - TTY: container.Config.Tty, - CloseStdin: container.Config.StdinOnce, - } - container.StreamConfig.AttachStreams(&cfg) - close(attached) - if cfg.UseStdin { - cfg.Stdin = stdin - } - if cfg.UseStdout { - cfg.Stdout = stdout - } - if cfg.UseStderr { - cfg.Stderr = stderr - } - - return daemon.containerAttach(container, &cfg, false, doStream) -} - -func (daemon *Daemon) containerAttach(c *container.Container, cfg *stream.AttachConfig, logs, doStream bool) error { - if logs { - logDriver, logCreated, err := daemon.getLogger(c) - if err != nil { - return err - } - if logCreated { - defer func() { - if err = logDriver.Close(); err != nil { - logrus.Errorf("Error closing logger: %v", err) - } - }() - } - cLog, ok := logDriver.(logger.LogReader) - if !ok { - return logger.ErrReadLogsNotSupported{} - } - logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) - defer logs.Close() - - LogLoop: - for { - select { - case msg, ok := <-logs.Msg: - if !ok { - break LogLoop - } - if msg.Source == "stdout" && cfg.Stdout != nil { - cfg.Stdout.Write(msg.Line) - } - if msg.Source == "stderr" && cfg.Stderr != nil { - cfg.Stderr.Write(msg.Line) - } - case err := <-logs.Err: - logrus.Errorf("Error streaming logs: %v", err) - break LogLoop - } - } - } - - daemon.LogContainerEvent(c, "attach") - - if !doStream { - return nil - } - - if cfg.Stdin != nil { - r, w := io.Pipe() - go func(stdin io.ReadCloser) { - defer w.Close() - defer logrus.Debug("Closing buffered stdin pipe") - io.Copy(w, stdin) - }(cfg.Stdin) - cfg.Stdin = r - } - - if !c.Config.OpenStdin { - cfg.Stdin = nil - } - - if c.Config.StdinOnce && !c.Config.Tty { - // Wait for the container to stop before returning. - waitChan := c.Wait(context.Background(), container.WaitConditionNotRunning) - defer func() { - <-waitChan // Ignore returned exit code. - }() - } - - ctx := c.InitAttachContext() - err := <-c.StreamConfig.CopyStreams(ctx, cfg) - if err != nil { - if _, ok := errors.Cause(err).(term.EscapeError); ok || err == context.Canceled { - daemon.LogContainerEvent(c, "detach") - } else { - logrus.Errorf("attach failed with error: %v", err) - } - } - - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/auth.go b/vendor/github.com/docker/docker/daemon/auth.go deleted file mode 100644 index d32c28b8d..000000000 --- a/vendor/github.com/docker/docker/daemon/auth.go +++ /dev/null @@ -1,13 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/dockerversion" -) - -// AuthenticateToRegistry checks the validity of credentials in authConfig -func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) { - return daemon.RegistryService.Auth(ctx, authConfig, dockerversion.DockerUserAgent(ctx)) -} diff --git a/vendor/github.com/docker/docker/daemon/bindmount_unix.go b/vendor/github.com/docker/docker/daemon/bindmount_unix.go deleted file mode 100644 index 028e300b0..000000000 --- a/vendor/github.com/docker/docker/daemon/bindmount_unix.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build linux freebsd - -package daemon // import "github.com/docker/docker/daemon" - -const bindMountType = "bind" diff --git a/vendor/github.com/docker/docker/daemon/caps/utils_unix.go b/vendor/github.com/docker/docker/daemon/caps/utils_unix.go deleted file mode 100644 index 4c18b28be..000000000 --- a/vendor/github.com/docker/docker/daemon/caps/utils_unix.go +++ /dev/null @@ -1,141 +0,0 @@ -// +build !windows - -package caps // import "github.com/docker/docker/daemon/caps" - -import ( - "fmt" - "strings" - - "github.com/syndtr/gocapability/capability" -) - -var capabilityList Capabilities - -func init() { - last := capability.CAP_LAST_CAP - // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap - if last == capability.Cap(63) { - last = capability.CAP_BLOCK_SUSPEND - } - for _, cap := range capability.List() { - if cap > last { - continue - } - capabilityList = append(capabilityList, - &CapabilityMapping{ - Key: "CAP_" + strings.ToUpper(cap.String()), - Value: cap, - }, - ) - } -} - -type ( - // CapabilityMapping maps linux capability name to its value of capability.Cap type - // Capabilities is one of the security systems in Linux Security Module (LSM) - // framework provided by the kernel. - // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html - CapabilityMapping struct { - Key string `json:"key,omitempty"` - Value capability.Cap `json:"value,omitempty"` - } - // Capabilities contains all CapabilityMapping - Capabilities []*CapabilityMapping -) - -// String returns of CapabilityMapping -func (c *CapabilityMapping) String() string { - return c.Key -} - -// GetCapability returns CapabilityMapping which contains specific key -func GetCapability(key string) *CapabilityMapping { - for _, capp := range capabilityList { - if capp.Key == key { - cpy := *capp - return &cpy - } - } - return nil -} - -// GetAllCapabilities returns all of the capabilities -func GetAllCapabilities() []string { - output := make([]string, len(capabilityList)) - for i, capability := range capabilityList { - output[i] = capability.String() - } - return output -} - -// inSlice tests whether a string is contained in a slice of strings or not. -// Comparison is case insensitive -func inSlice(slice []string, s string) bool { - for _, ss := range slice { - if strings.ToLower(s) == strings.ToLower(ss) { - return true - } - } - return false -} - -// TweakCapabilities can tweak capabilities by adding or dropping capabilities -// based on the basics capabilities. -func TweakCapabilities(basics, adds, drops []string) ([]string, error) { - var ( - newCaps []string - allCaps = GetAllCapabilities() - ) - - // FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix - // Currently they are mixed in here. We should do conversion in one place. - - // look for invalid cap in the drop list - for _, cap := range drops { - if strings.ToLower(cap) == "all" { - continue - } - - if !inSlice(allCaps, "CAP_"+cap) { - return nil, fmt.Errorf("Unknown capability drop: %q", cap) - } - } - - // handle --cap-add=all - if inSlice(adds, "all") { - basics = allCaps - } - - if !inSlice(drops, "all") { - for _, cap := range basics { - // skip `all` already handled above - if strings.ToLower(cap) == "all" { - continue - } - - // if we don't drop `all`, add back all the non-dropped caps - if !inSlice(drops, cap[4:]) { - newCaps = append(newCaps, strings.ToUpper(cap)) - } - } - } - - for _, cap := range adds { - // skip `all` already handled above - if strings.ToLower(cap) == "all" { - continue - } - - cap = "CAP_" + cap - - if !inSlice(allCaps, cap) { - return nil, fmt.Errorf("Unknown capability to add: %q", cap) - } - - // add cap if not already in the list - if !inSlice(newCaps, cap) { - newCaps = append(newCaps, strings.ToUpper(cap)) - } - } - return newCaps, nil -} diff --git a/vendor/github.com/docker/docker/daemon/changes.go b/vendor/github.com/docker/docker/daemon/changes.go deleted file mode 100644 index 70b3f6b94..000000000 --- a/vendor/github.com/docker/docker/daemon/changes.go +++ /dev/null @@ -1,34 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "errors" - "runtime" - "time" - - "github.com/docker/docker/pkg/archive" -) - -// ContainerChanges returns a list of container fs changes -func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { - start := time.Now() - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - if runtime.GOOS == "windows" && container.IsRunning() { - return nil, errors.New("Windows does not support diff of a running container") - } - - container.Lock() - defer container.Unlock() - if container.RWLayer == nil { - return nil, errors.New("RWLayer of container " + name + " is unexpectedly nil") - } - c, err := container.RWLayer.Changes() - if err != nil { - return nil, err - } - containerActions.WithValues("changes").UpdateSince(start) - return c, nil -} diff --git a/vendor/github.com/docker/docker/daemon/checkpoint.go b/vendor/github.com/docker/docker/daemon/checkpoint.go deleted file mode 100644 index 4a1cb0e10..000000000 --- a/vendor/github.com/docker/docker/daemon/checkpoint.go +++ /dev/null @@ -1,143 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/daemon/names" -) - -var ( - validCheckpointNameChars = names.RestrictedNameChars - validCheckpointNamePattern = names.RestrictedNamePattern -) - -// getCheckpointDir verifies checkpoint directory for create,remove, list options and checks if checkpoint already exists -func getCheckpointDir(checkDir, checkpointID, ctrName, ctrID, ctrCheckpointDir string, create bool) (string, error) { - var checkpointDir string - var err2 error - if checkDir != "" { - checkpointDir = checkDir - } else { - checkpointDir = ctrCheckpointDir - } - checkpointAbsDir := filepath.Join(checkpointDir, checkpointID) - stat, err := os.Stat(checkpointAbsDir) - if create { - switch { - case err == nil && stat.IsDir(): - err2 = fmt.Errorf("checkpoint with name %s already exists for container %s", checkpointID, ctrName) - case err != nil && os.IsNotExist(err): - err2 = os.MkdirAll(checkpointAbsDir, 0700) - case err != nil: - err2 = err - case err == nil: - err2 = fmt.Errorf("%s exists and is not a directory", checkpointAbsDir) - } - } else { - switch { - case err != nil: - err2 = fmt.Errorf("checkpoint %s does not exists for container %s", checkpointID, ctrName) - case err == nil && stat.IsDir(): - err2 = nil - case err == nil: - err2 = fmt.Errorf("%s exists and is not a directory", checkpointAbsDir) - } - } - return checkpointAbsDir, err2 -} - -// CheckpointCreate checkpoints the process running in a container with CRIU -func (daemon *Daemon) CheckpointCreate(name string, config types.CheckpointCreateOptions) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - if !container.IsRunning() { - return fmt.Errorf("Container %s not running", name) - } - - if container.Config.Tty { - return fmt.Errorf("checkpoint not support on containers with tty") - } - - if !validCheckpointNamePattern.MatchString(config.CheckpointID) { - return fmt.Errorf("Invalid checkpoint ID (%s), only %s are allowed", config.CheckpointID, validCheckpointNameChars) - } - - checkpointDir, err := getCheckpointDir(config.CheckpointDir, config.CheckpointID, name, container.ID, container.CheckpointDir(), true) - if err != nil { - return fmt.Errorf("cannot checkpoint container %s: %s", name, err) - } - - err = daemon.containerd.CreateCheckpoint(context.Background(), container.ID, checkpointDir, config.Exit) - if err != nil { - os.RemoveAll(checkpointDir) - return fmt.Errorf("Cannot checkpoint container %s: %s", name, err) - } - - daemon.LogContainerEvent(container, "checkpoint") - - return nil -} - -// CheckpointDelete deletes the specified checkpoint -func (daemon *Daemon) CheckpointDelete(name string, config types.CheckpointDeleteOptions) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - checkpointDir, err := getCheckpointDir(config.CheckpointDir, config.CheckpointID, name, container.ID, container.CheckpointDir(), false) - if err == nil { - return os.RemoveAll(filepath.Join(checkpointDir, config.CheckpointID)) - } - return err -} - -// CheckpointList lists all checkpoints of the specified container -func (daemon *Daemon) CheckpointList(name string, config types.CheckpointListOptions) ([]types.Checkpoint, error) { - var out []types.Checkpoint - - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - checkpointDir, err := getCheckpointDir(config.CheckpointDir, "", name, container.ID, container.CheckpointDir(), false) - if err != nil { - return nil, err - } - - if err := os.MkdirAll(checkpointDir, 0755); err != nil { - return nil, err - } - - dirs, err := ioutil.ReadDir(checkpointDir) - if err != nil { - return nil, err - } - - for _, d := range dirs { - if !d.IsDir() { - continue - } - path := filepath.Join(checkpointDir, d.Name(), "config.json") - data, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - var cpt types.Checkpoint - if err := json.Unmarshal(data, &cpt); err != nil { - return nil, err - } - out = append(out, cpt) - } - - return out, nil -} diff --git a/vendor/github.com/docker/docker/daemon/cluster.go b/vendor/github.com/docker/docker/daemon/cluster.go deleted file mode 100644 index b5ac6c485..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster.go +++ /dev/null @@ -1,26 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - apitypes "github.com/docker/docker/api/types" - lncluster "github.com/docker/libnetwork/cluster" -) - -// Cluster is the interface for github.com/docker/docker/daemon/cluster.(*Cluster). -type Cluster interface { - ClusterStatus - NetworkManager - SendClusterEvent(event lncluster.ConfigEventType) -} - -// ClusterStatus interface provides information about the Swarm status of the Cluster -type ClusterStatus interface { - IsAgent() bool - IsManager() bool -} - -// NetworkManager provides methods to manage networks -type NetworkManager interface { - GetNetwork(input string) (apitypes.NetworkResource, error) - GetNetworks() ([]apitypes.NetworkResource, error) - RemoveNetwork(input string) error -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/cluster.go b/vendor/github.com/docker/docker/daemon/cluster/cluster.go deleted file mode 100644 index 35ba5a937..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/cluster.go +++ /dev/null @@ -1,450 +0,0 @@ -package cluster // import "github.com/docker/docker/daemon/cluster" - -// -// ## Swarmkit integration -// -// Cluster - static configurable object for accessing everything swarm related. -// Contains methods for connecting and controlling the cluster. Exists always, -// even if swarm mode is not enabled. -// -// NodeRunner - Manager for starting the swarmkit node. Is present only and -// always if swarm mode is enabled. Implements backoff restart loop in case of -// errors. -// -// NodeState - Information about the current node status including access to -// gRPC clients if a manager is active. -// -// ### Locking -// -// `cluster.controlMutex` - taken for the whole lifecycle of the processes that -// can reconfigure cluster(init/join/leave etc). Protects that one -// reconfiguration action has fully completed before another can start. -// -// `cluster.mu` - taken when the actual changes in cluster configurations -// happen. Different from `controlMutex` because in some cases we need to -// access current cluster state even if the long-running reconfiguration is -// going on. For example network stack may ask for the current cluster state in -// the middle of the shutdown. Any time current cluster state is asked you -// should take the read lock of `cluster.mu`. If you are writing an API -// responder that returns synchronously, hold `cluster.mu.RLock()` for the -// duration of the whole handler function. That ensures that node will not be -// shut down until the handler has finished. -// -// NodeRunner implements its internal locks that should not be used outside of -// the struct. Instead, you should just call `nodeRunner.State()` method to get -// the current state of the cluster(still need `cluster.mu.RLock()` to access -// `cluster.nr` reference itself). Most of the changes in NodeRunner happen -// because of an external event(network problem, unexpected swarmkit error) and -// Docker shouldn't take any locks that delay these changes from happening. -// - -import ( - "context" - "fmt" - "net" - "os" - "path/filepath" - "sync" - "time" - - "github.com/docker/docker/api/types/network" - types "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/daemon/cluster/controllers/plugin" - executorpkg "github.com/docker/docker/daemon/cluster/executor" - "github.com/docker/docker/pkg/signal" - lncluster "github.com/docker/libnetwork/cluster" - swarmapi "github.com/docker/swarmkit/api" - swarmnode "github.com/docker/swarmkit/node" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const swarmDirName = "swarm" -const controlSocket = "control.sock" -const swarmConnectTimeout = 20 * time.Second -const swarmRequestTimeout = 20 * time.Second -const stateFile = "docker-state.json" -const defaultAddr = "0.0.0.0:2377" - -const ( - initialReconnectDelay = 100 * time.Millisecond - maxReconnectDelay = 30 * time.Second - contextPrefix = "com.docker.swarm" -) - -// NetworkSubnetsProvider exposes functions for retrieving the subnets -// of networks managed by Docker, so they can be filtered. -type NetworkSubnetsProvider interface { - Subnets() ([]net.IPNet, []net.IPNet) -} - -// Config provides values for Cluster. -type Config struct { - Root string - Name string - Backend executorpkg.Backend - ImageBackend executorpkg.ImageBackend - PluginBackend plugin.Backend - VolumeBackend executorpkg.VolumeBackend - NetworkSubnetsProvider NetworkSubnetsProvider - - // DefaultAdvertiseAddr is the default host/IP or network interface to use - // if no AdvertiseAddr value is specified. - DefaultAdvertiseAddr string - - // path to store runtime state, such as the swarm control socket - RuntimeRoot string - - // WatchStream is a channel to pass watch API notifications to daemon - WatchStream chan *swarmapi.WatchMessage - - // RaftHeartbeatTick is the number of ticks for heartbeat of quorum members - RaftHeartbeatTick uint32 - - // RaftElectionTick is the number of ticks to elapse before followers propose a new round of leader election - // This value should be 10x that of RaftHeartbeatTick - RaftElectionTick uint32 -} - -// Cluster provides capabilities to participate in a cluster as a worker or a -// manager. -type Cluster struct { - mu sync.RWMutex - controlMutex sync.RWMutex // protect init/join/leave user operations - nr *nodeRunner - root string - runtimeRoot string - config Config - configEvent chan lncluster.ConfigEventType // todo: make this array and goroutine safe - attachers map[string]*attacher - watchStream chan *swarmapi.WatchMessage -} - -// attacher manages the in-memory attachment state of a container -// attachment to a global scope network managed by swarm manager. It -// helps in identifying the attachment ID via the taskID and the -// corresponding attachment configuration obtained from the manager. -type attacher struct { - taskID string - config *network.NetworkingConfig - inProgress bool - attachWaitCh chan *network.NetworkingConfig - attachCompleteCh chan struct{} - detachWaitCh chan struct{} -} - -// New creates a new Cluster instance using provided config. -func New(config Config) (*Cluster, error) { - root := filepath.Join(config.Root, swarmDirName) - if err := os.MkdirAll(root, 0700); err != nil { - return nil, err - } - if config.RuntimeRoot == "" { - config.RuntimeRoot = root - } - if config.RaftHeartbeatTick == 0 { - config.RaftHeartbeatTick = 1 - } - if config.RaftElectionTick == 0 { - // 10X heartbeat tick is the recommended ratio according to etcd docs. - config.RaftElectionTick = 10 * config.RaftHeartbeatTick - } - - if err := os.MkdirAll(config.RuntimeRoot, 0700); err != nil { - return nil, err - } - c := &Cluster{ - root: root, - config: config, - configEvent: make(chan lncluster.ConfigEventType, 10), - runtimeRoot: config.RuntimeRoot, - attachers: make(map[string]*attacher), - watchStream: config.WatchStream, - } - return c, nil -} - -// Start the Cluster instance -// TODO The split between New and Start can be join again when the SendClusterEvent -// method is no longer required -func (c *Cluster) Start() error { - root := filepath.Join(c.config.Root, swarmDirName) - - nodeConfig, err := loadPersistentState(root) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - nr, err := c.newNodeRunner(*nodeConfig) - if err != nil { - return err - } - c.nr = nr - - select { - case <-time.After(swarmConnectTimeout): - logrus.Error("swarm component could not be started before timeout was reached") - case err := <-nr.Ready(): - if err != nil { - logrus.WithError(err).Error("swarm component could not be started") - return nil - } - } - return nil -} - -func (c *Cluster) newNodeRunner(conf nodeStartConfig) (*nodeRunner, error) { - if err := c.config.Backend.IsSwarmCompatible(); err != nil { - return nil, err - } - - actualLocalAddr := conf.LocalAddr - if actualLocalAddr == "" { - // If localAddr was not specified, resolve it automatically - // based on the route to joinAddr. localAddr can only be left - // empty on "join". - listenHost, _, err := net.SplitHostPort(conf.ListenAddr) - if err != nil { - return nil, fmt.Errorf("could not parse listen address: %v", err) - } - - listenAddrIP := net.ParseIP(listenHost) - if listenAddrIP == nil || !listenAddrIP.IsUnspecified() { - actualLocalAddr = listenHost - } else { - if conf.RemoteAddr == "" { - // Should never happen except using swarms created by - // old versions that didn't save remoteAddr. - conf.RemoteAddr = "8.8.8.8:53" - } - conn, err := net.Dial("udp", conf.RemoteAddr) - if err != nil { - return nil, fmt.Errorf("could not find local IP address: %v", err) - } - localHostPort := conn.LocalAddr().String() - actualLocalAddr, _, _ = net.SplitHostPort(localHostPort) - conn.Close() - } - } - - nr := &nodeRunner{cluster: c} - nr.actualLocalAddr = actualLocalAddr - - if err := nr.Start(conf); err != nil { - return nil, err - } - - c.config.Backend.DaemonJoinsCluster(c) - - return nr, nil -} - -func (c *Cluster) getRequestContext() (context.Context, func()) { // TODO: not needed when requests don't block on qourum lost - return context.WithTimeout(context.Background(), swarmRequestTimeout) -} - -// IsManager returns true if Cluster is participating as a manager. -func (c *Cluster) IsManager() bool { - c.mu.RLock() - defer c.mu.RUnlock() - return c.currentNodeState().IsActiveManager() -} - -// IsAgent returns true if Cluster is participating as a worker/agent. -func (c *Cluster) IsAgent() bool { - c.mu.RLock() - defer c.mu.RUnlock() - return c.currentNodeState().status == types.LocalNodeStateActive -} - -// GetLocalAddress returns the local address. -func (c *Cluster) GetLocalAddress() string { - c.mu.RLock() - defer c.mu.RUnlock() - return c.currentNodeState().actualLocalAddr -} - -// GetListenAddress returns the listen address. -func (c *Cluster) GetListenAddress() string { - c.mu.RLock() - defer c.mu.RUnlock() - if c.nr != nil { - return c.nr.config.ListenAddr - } - return "" -} - -// GetAdvertiseAddress returns the remotely reachable address of this node. -func (c *Cluster) GetAdvertiseAddress() string { - c.mu.RLock() - defer c.mu.RUnlock() - if c.nr != nil && c.nr.config.AdvertiseAddr != "" { - advertiseHost, _, _ := net.SplitHostPort(c.nr.config.AdvertiseAddr) - return advertiseHost - } - return c.currentNodeState().actualLocalAddr -} - -// GetDataPathAddress returns the address to be used for the data path traffic, if specified. -func (c *Cluster) GetDataPathAddress() string { - c.mu.RLock() - defer c.mu.RUnlock() - if c.nr != nil { - return c.nr.config.DataPathAddr - } - return "" -} - -// GetRemoteAddressList returns the advertise address for each of the remote managers if -// available. -func (c *Cluster) GetRemoteAddressList() []string { - c.mu.RLock() - defer c.mu.RUnlock() - return c.getRemoteAddressList() -} - -// GetWatchStream returns the channel to pass changes from store watch API -func (c *Cluster) GetWatchStream() chan *swarmapi.WatchMessage { - c.mu.RLock() - defer c.mu.RUnlock() - return c.watchStream -} - -func (c *Cluster) getRemoteAddressList() []string { - state := c.currentNodeState() - if state.swarmNode == nil { - return []string{} - } - - nodeID := state.swarmNode.NodeID() - remotes := state.swarmNode.Remotes() - addressList := make([]string, 0, len(remotes)) - for _, r := range remotes { - if r.NodeID != nodeID { - addressList = append(addressList, r.Addr) - } - } - return addressList -} - -// ListenClusterEvents returns a channel that receives messages on cluster -// participation changes. -// todo: make cancelable and accessible to multiple callers -func (c *Cluster) ListenClusterEvents() <-chan lncluster.ConfigEventType { - return c.configEvent -} - -// currentNodeState should not be called without a read lock -func (c *Cluster) currentNodeState() nodeState { - return c.nr.State() -} - -// errNoManager returns error describing why manager commands can't be used. -// Call with read lock. -func (c *Cluster) errNoManager(st nodeState) error { - if st.swarmNode == nil { - if errors.Cause(st.err) == errSwarmLocked { - return errSwarmLocked - } - if st.err == errSwarmCertificatesExpired { - return errSwarmCertificatesExpired - } - return errors.WithStack(notAvailableError("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.")) - } - if st.swarmNode.Manager() != nil { - return errors.WithStack(notAvailableError("This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster.")) - } - return errors.WithStack(notAvailableError("This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.")) -} - -// Cleanup stops active swarm node. This is run before daemon shutdown. -func (c *Cluster) Cleanup() { - c.controlMutex.Lock() - defer c.controlMutex.Unlock() - - c.mu.Lock() - node := c.nr - if node == nil { - c.mu.Unlock() - return - } - state := c.currentNodeState() - c.mu.Unlock() - - if state.IsActiveManager() { - active, reachable, unreachable, err := managerStats(state.controlClient, state.NodeID()) - if err == nil { - singlenode := active && isLastManager(reachable, unreachable) - if active && !singlenode && removingManagerCausesLossOfQuorum(reachable, unreachable) { - logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable) - } - } - } - - if err := node.Stop(); err != nil { - logrus.Errorf("failed to shut down cluster node: %v", err) - signal.DumpStacks("") - } - - c.mu.Lock() - c.nr = nil - c.mu.Unlock() -} - -func managerStats(client swarmapi.ControlClient, currentNodeID string) (current bool, reachable int, unreachable int, err error) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - nodes, err := client.ListNodes(ctx, &swarmapi.ListNodesRequest{}) - if err != nil { - return false, 0, 0, err - } - for _, n := range nodes.Nodes { - if n.ManagerStatus != nil { - if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_REACHABLE { - reachable++ - if n.ID == currentNodeID { - current = true - } - } - if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_UNREACHABLE { - unreachable++ - } - } - } - return -} - -func detectLockedError(err error) error { - if err == swarmnode.ErrInvalidUnlockKey { - return errors.WithStack(errSwarmLocked) - } - return err -} - -func (c *Cluster) lockedManagerAction(fn func(ctx context.Context, state nodeState) error) error { - c.mu.RLock() - defer c.mu.RUnlock() - - state := c.currentNodeState() - if !state.IsActiveManager() { - return c.errNoManager(state) - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - return fn(ctx, state) -} - -// SendClusterEvent allows to send cluster events on the configEvent channel -// TODO This method should not be exposed. -// Currently it is used to notify the network controller that the keys are -// available -func (c *Cluster) SendClusterEvent(event lncluster.ConfigEventType) { - c.mu.RLock() - defer c.mu.RUnlock() - c.configEvent <- event -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/configs.go b/vendor/github.com/docker/docker/daemon/cluster/configs.go deleted file mode 100644 index 6b373e618..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/configs.go +++ /dev/null @@ -1,118 +0,0 @@ -package cluster // import "github.com/docker/docker/daemon/cluster" - -import ( - "context" - - apitypes "github.com/docker/docker/api/types" - types "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/daemon/cluster/convert" - swarmapi "github.com/docker/swarmkit/api" -) - -// GetConfig returns a config from a managed swarm cluster -func (c *Cluster) GetConfig(input string) (types.Config, error) { - var config *swarmapi.Config - - if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - s, err := getConfig(ctx, state.controlClient, input) - if err != nil { - return err - } - config = s - return nil - }); err != nil { - return types.Config{}, err - } - return convert.ConfigFromGRPC(config), nil -} - -// GetConfigs returns all configs of a managed swarm cluster. -func (c *Cluster) GetConfigs(options apitypes.ConfigListOptions) ([]types.Config, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - state := c.currentNodeState() - if !state.IsActiveManager() { - return nil, c.errNoManager(state) - } - - filters, err := newListConfigsFilters(options.Filters) - if err != nil { - return nil, err - } - ctx, cancel := c.getRequestContext() - defer cancel() - - r, err := state.controlClient.ListConfigs(ctx, - &swarmapi.ListConfigsRequest{Filters: filters}) - if err != nil { - return nil, err - } - - configs := []types.Config{} - - for _, config := range r.Configs { - configs = append(configs, convert.ConfigFromGRPC(config)) - } - - return configs, nil -} - -// CreateConfig creates a new config in a managed swarm cluster. -func (c *Cluster) CreateConfig(s types.ConfigSpec) (string, error) { - var resp *swarmapi.CreateConfigResponse - if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - configSpec := convert.ConfigSpecToGRPC(s) - - r, err := state.controlClient.CreateConfig(ctx, - &swarmapi.CreateConfigRequest{Spec: &configSpec}) - if err != nil { - return err - } - resp = r - return nil - }); err != nil { - return "", err - } - return resp.Config.ID, nil -} - -// RemoveConfig removes a config from a managed swarm cluster. -func (c *Cluster) RemoveConfig(input string) error { - return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - config, err := getConfig(ctx, state.controlClient, input) - if err != nil { - return err - } - - req := &swarmapi.RemoveConfigRequest{ - ConfigID: config.ID, - } - - _, err = state.controlClient.RemoveConfig(ctx, req) - return err - }) -} - -// UpdateConfig updates a config in a managed swarm cluster. -// Note: this is not exposed to the CLI but is available from the API only -func (c *Cluster) UpdateConfig(input string, version uint64, spec types.ConfigSpec) error { - return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - config, err := getConfig(ctx, state.controlClient, input) - if err != nil { - return err - } - - configSpec := convert.ConfigSpecToGRPC(spec) - - _, err = state.controlClient.UpdateConfig(ctx, - &swarmapi.UpdateConfigRequest{ - ConfigID: config.ID, - ConfigVersion: &swarmapi.Version{ - Index: version, - }, - Spec: &configSpec, - }) - return err - }) -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller.go b/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller.go deleted file mode 100644 index 6d7606aa8..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/controllers/plugin/controller.go +++ /dev/null @@ -1,261 +0,0 @@ -package plugin // import "github.com/docker/docker/daemon/cluster/controllers/plugin" - -import ( - "context" - "io" - "io/ioutil" - "net/http" - - "github.com/docker/distribution/reference" - enginetypes "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm/runtime" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/plugin" - "github.com/docker/docker/plugin/v2" - "github.com/docker/swarmkit/api" - "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// Controller is the controller for the plugin backend. -// Plugins are managed as a singleton object with a desired state (different from containers). -// With the plugin controller instead of having a strict create->start->stop->remove -// task lifecycle like containers, we manage the desired state of the plugin and let -// the plugin manager do what it already does and monitor the plugin. -// We'll also end up with many tasks all pointing to the same plugin ID. -// -// TODO(@cpuguy83): registry auth is intentionally not supported until we work out -// the right way to pass registry credentials via secrets. -type Controller struct { - backend Backend - spec runtime.PluginSpec - logger *logrus.Entry - - pluginID string - serviceID string - taskID string - - // hook used to signal tests that `Wait()` is actually ready and waiting - signalWaitReady func() -} - -// Backend is the interface for interacting with the plugin manager -// Controller actions are passed to the configured backend to do the real work. -type Backend interface { - Disable(name string, config *enginetypes.PluginDisableConfig) error - Enable(name string, config *enginetypes.PluginEnableConfig) error - Remove(name string, config *enginetypes.PluginRmConfig) error - Pull(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer, opts ...plugin.CreateOpt) error - Upgrade(ctx context.Context, ref reference.Named, name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig, privileges enginetypes.PluginPrivileges, outStream io.Writer) error - Get(name string) (*v2.Plugin, error) - SubscribeEvents(buffer int, events ...plugin.Event) (eventCh <-chan interface{}, cancel func()) -} - -// NewController returns a new cluster plugin controller -func NewController(backend Backend, t *api.Task) (*Controller, error) { - spec, err := readSpec(t) - if err != nil { - return nil, err - } - return &Controller{ - backend: backend, - spec: spec, - serviceID: t.ServiceID, - logger: logrus.WithFields(logrus.Fields{ - "controller": "plugin", - "task": t.ID, - "plugin": spec.Name, - })}, nil -} - -func readSpec(t *api.Task) (runtime.PluginSpec, error) { - var cfg runtime.PluginSpec - - generic := t.Spec.GetGeneric() - if err := proto.Unmarshal(generic.Payload.Value, &cfg); err != nil { - return cfg, errors.Wrap(err, "error reading plugin spec") - } - return cfg, nil -} - -// Update is the update phase from swarmkit -func (p *Controller) Update(ctx context.Context, t *api.Task) error { - p.logger.Debug("Update") - return nil -} - -// Prepare is the prepare phase from swarmkit -func (p *Controller) Prepare(ctx context.Context) (err error) { - p.logger.Debug("Prepare") - - remote, err := reference.ParseNormalizedNamed(p.spec.Remote) - if err != nil { - return errors.Wrapf(err, "error parsing remote reference %q", p.spec.Remote) - } - - if p.spec.Name == "" { - p.spec.Name = remote.String() - } - - var authConfig enginetypes.AuthConfig - privs := convertPrivileges(p.spec.Privileges) - - pl, err := p.backend.Get(p.spec.Name) - - defer func() { - if pl != nil && err == nil { - pl.Acquire() - } - }() - - if err == nil && pl != nil { - if pl.SwarmServiceID != p.serviceID { - return errors.Errorf("plugin already exists: %s", p.spec.Name) - } - if pl.IsEnabled() { - if err := p.backend.Disable(pl.GetID(), &enginetypes.PluginDisableConfig{ForceDisable: true}); err != nil { - p.logger.WithError(err).Debug("could not disable plugin before running upgrade") - } - } - p.pluginID = pl.GetID() - return p.backend.Upgrade(ctx, remote, p.spec.Name, nil, &authConfig, privs, ioutil.Discard) - } - - if err := p.backend.Pull(ctx, remote, p.spec.Name, nil, &authConfig, privs, ioutil.Discard, plugin.WithSwarmService(p.serviceID)); err != nil { - return err - } - pl, err = p.backend.Get(p.spec.Name) - if err != nil { - return err - } - p.pluginID = pl.GetID() - - return nil -} - -// Start is the start phase from swarmkit -func (p *Controller) Start(ctx context.Context) error { - p.logger.Debug("Start") - - pl, err := p.backend.Get(p.pluginID) - if err != nil { - return err - } - - if p.spec.Disabled { - if pl.IsEnabled() { - return p.backend.Disable(p.pluginID, &enginetypes.PluginDisableConfig{ForceDisable: false}) - } - return nil - } - if !pl.IsEnabled() { - return p.backend.Enable(p.pluginID, &enginetypes.PluginEnableConfig{Timeout: 30}) - } - return nil -} - -// Wait causes the task to wait until returned -func (p *Controller) Wait(ctx context.Context) error { - p.logger.Debug("Wait") - - pl, err := p.backend.Get(p.pluginID) - if err != nil { - return err - } - - events, cancel := p.backend.SubscribeEvents(1, plugin.EventDisable{Plugin: pl.PluginObj}, plugin.EventRemove{Plugin: pl.PluginObj}, plugin.EventEnable{Plugin: pl.PluginObj}) - defer cancel() - - if p.signalWaitReady != nil { - p.signalWaitReady() - } - - if !p.spec.Disabled != pl.IsEnabled() { - return errors.New("mismatched plugin state") - } - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case e := <-events: - p.logger.Debugf("got event %#T", e) - - switch e.(type) { - case plugin.EventEnable: - if p.spec.Disabled { - return errors.New("plugin enabled") - } - case plugin.EventRemove: - return errors.New("plugin removed") - case plugin.EventDisable: - if !p.spec.Disabled { - return errors.New("plugin disabled") - } - } - } - } -} - -func isNotFound(err error) bool { - return errdefs.IsNotFound(err) -} - -// Shutdown is the shutdown phase from swarmkit -func (p *Controller) Shutdown(ctx context.Context) error { - p.logger.Debug("Shutdown") - return nil -} - -// Terminate is the terminate phase from swarmkit -func (p *Controller) Terminate(ctx context.Context) error { - p.logger.Debug("Terminate") - return nil -} - -// Remove is the remove phase from swarmkit -func (p *Controller) Remove(ctx context.Context) error { - p.logger.Debug("Remove") - - pl, err := p.backend.Get(p.pluginID) - if err != nil { - if isNotFound(err) { - return nil - } - return err - } - - pl.Release() - if pl.GetRefCount() > 0 { - p.logger.Debug("skipping remove due to ref count") - return nil - } - - // This may error because we have exactly 1 plugin, but potentially multiple - // tasks which are calling remove. - err = p.backend.Remove(p.pluginID, &enginetypes.PluginRmConfig{ForceRemove: true}) - if isNotFound(err) { - return nil - } - return err -} - -// Close is the close phase from swarmkit -func (p *Controller) Close() error { - p.logger.Debug("Close") - return nil -} - -func convertPrivileges(ls []*runtime.PluginPrivilege) enginetypes.PluginPrivileges { - var out enginetypes.PluginPrivileges - for _, p := range ls { - pp := enginetypes.PluginPrivilege{ - Name: p.Name, - Description: p.Description, - Value: p.Value, - } - out = append(out, pp) - } - return out -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/config.go b/vendor/github.com/docker/docker/daemon/cluster/convert/config.go deleted file mode 100644 index 16b3475af..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/config.go +++ /dev/null @@ -1,78 +0,0 @@ -package convert // import "github.com/docker/docker/daemon/cluster/convert" - -import ( - swarmtypes "github.com/docker/docker/api/types/swarm" - types "github.com/docker/docker/api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - gogotypes "github.com/gogo/protobuf/types" -) - -// ConfigFromGRPC converts a grpc Config to a Config. -func ConfigFromGRPC(s *swarmapi.Config) swarmtypes.Config { - config := swarmtypes.Config{ - ID: s.ID, - Spec: swarmtypes.ConfigSpec{ - Annotations: annotationsFromGRPC(s.Spec.Annotations), - Data: s.Spec.Data, - }, - } - - config.Version.Index = s.Meta.Version.Index - // Meta - config.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) - config.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) - - if s.Spec.Templating != nil { - config.Spec.Templating = &types.Driver{ - Name: s.Spec.Templating.Name, - Options: s.Spec.Templating.Options, - } - } - - return config -} - -// ConfigSpecToGRPC converts Config to a grpc Config. -func ConfigSpecToGRPC(s swarmtypes.ConfigSpec) swarmapi.ConfigSpec { - spec := swarmapi.ConfigSpec{ - Annotations: swarmapi.Annotations{ - Name: s.Name, - Labels: s.Labels, - }, - Data: s.Data, - } - - if s.Templating != nil { - spec.Templating = &swarmapi.Driver{ - Name: s.Templating.Name, - Options: s.Templating.Options, - } - } - - return spec -} - -// ConfigReferencesFromGRPC converts a slice of grpc ConfigReference to ConfigReference -func ConfigReferencesFromGRPC(s []*swarmapi.ConfigReference) []*swarmtypes.ConfigReference { - refs := []*swarmtypes.ConfigReference{} - - for _, r := range s { - ref := &swarmtypes.ConfigReference{ - ConfigID: r.ConfigID, - ConfigName: r.ConfigName, - } - - if t, ok := r.Target.(*swarmapi.ConfigReference_File); ok { - ref.File = &swarmtypes.ConfigReferenceFileTarget{ - Name: t.File.Name, - UID: t.File.UID, - GID: t.File.GID, - Mode: t.File.Mode, - } - } - - refs = append(refs, ref) - } - - return refs -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/container.go b/vendor/github.com/docker/docker/daemon/cluster/convert/container.go deleted file mode 100644 index d889b4004..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/container.go +++ /dev/null @@ -1,398 +0,0 @@ -package convert // import "github.com/docker/docker/daemon/cluster/convert" - -import ( - "errors" - "fmt" - "strings" - - "github.com/docker/docker/api/types/container" - mounttypes "github.com/docker/docker/api/types/mount" - types "github.com/docker/docker/api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - gogotypes "github.com/gogo/protobuf/types" - "github.com/sirupsen/logrus" -) - -func containerSpecFromGRPC(c *swarmapi.ContainerSpec) *types.ContainerSpec { - if c == nil { - return nil - } - containerSpec := &types.ContainerSpec{ - Image: c.Image, - Labels: c.Labels, - Command: c.Command, - Args: c.Args, - Hostname: c.Hostname, - Env: c.Env, - Dir: c.Dir, - User: c.User, - Groups: c.Groups, - StopSignal: c.StopSignal, - TTY: c.TTY, - OpenStdin: c.OpenStdin, - ReadOnly: c.ReadOnly, - Hosts: c.Hosts, - Secrets: secretReferencesFromGRPC(c.Secrets), - Configs: configReferencesFromGRPC(c.Configs), - Isolation: IsolationFromGRPC(c.Isolation), - Init: initFromGRPC(c.Init), - } - - if c.DNSConfig != nil { - containerSpec.DNSConfig = &types.DNSConfig{ - Nameservers: c.DNSConfig.Nameservers, - Search: c.DNSConfig.Search, - Options: c.DNSConfig.Options, - } - } - - // Privileges - if c.Privileges != nil { - containerSpec.Privileges = &types.Privileges{} - - if c.Privileges.CredentialSpec != nil { - containerSpec.Privileges.CredentialSpec = &types.CredentialSpec{} - switch c.Privileges.CredentialSpec.Source.(type) { - case *swarmapi.Privileges_CredentialSpec_File: - containerSpec.Privileges.CredentialSpec.File = c.Privileges.CredentialSpec.GetFile() - case *swarmapi.Privileges_CredentialSpec_Registry: - containerSpec.Privileges.CredentialSpec.Registry = c.Privileges.CredentialSpec.GetRegistry() - } - } - - if c.Privileges.SELinuxContext != nil { - containerSpec.Privileges.SELinuxContext = &types.SELinuxContext{ - Disable: c.Privileges.SELinuxContext.Disable, - User: c.Privileges.SELinuxContext.User, - Type: c.Privileges.SELinuxContext.Type, - Role: c.Privileges.SELinuxContext.Role, - Level: c.Privileges.SELinuxContext.Level, - } - } - } - - // Mounts - for _, m := range c.Mounts { - mount := mounttypes.Mount{ - Target: m.Target, - Source: m.Source, - Type: mounttypes.Type(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])), - ReadOnly: m.ReadOnly, - } - - if m.BindOptions != nil { - mount.BindOptions = &mounttypes.BindOptions{ - Propagation: mounttypes.Propagation(strings.ToLower(swarmapi.Mount_BindOptions_MountPropagation_name[int32(m.BindOptions.Propagation)])), - } - } - - if m.VolumeOptions != nil { - mount.VolumeOptions = &mounttypes.VolumeOptions{ - NoCopy: m.VolumeOptions.NoCopy, - Labels: m.VolumeOptions.Labels, - } - if m.VolumeOptions.DriverConfig != nil { - mount.VolumeOptions.DriverConfig = &mounttypes.Driver{ - Name: m.VolumeOptions.DriverConfig.Name, - Options: m.VolumeOptions.DriverConfig.Options, - } - } - } - - if m.TmpfsOptions != nil { - mount.TmpfsOptions = &mounttypes.TmpfsOptions{ - SizeBytes: m.TmpfsOptions.SizeBytes, - Mode: m.TmpfsOptions.Mode, - } - } - containerSpec.Mounts = append(containerSpec.Mounts, mount) - } - - if c.StopGracePeriod != nil { - grace, _ := gogotypes.DurationFromProto(c.StopGracePeriod) - containerSpec.StopGracePeriod = &grace - } - - if c.Healthcheck != nil { - containerSpec.Healthcheck = healthConfigFromGRPC(c.Healthcheck) - } - - return containerSpec -} - -func initFromGRPC(v *gogotypes.BoolValue) *bool { - if v == nil { - return nil - } - value := v.GetValue() - return &value -} - -func initToGRPC(v *bool) *gogotypes.BoolValue { - if v == nil { - return nil - } - return &gogotypes.BoolValue{Value: *v} -} - -func secretReferencesToGRPC(sr []*types.SecretReference) []*swarmapi.SecretReference { - refs := make([]*swarmapi.SecretReference, 0, len(sr)) - for _, s := range sr { - ref := &swarmapi.SecretReference{ - SecretID: s.SecretID, - SecretName: s.SecretName, - } - if s.File != nil { - ref.Target = &swarmapi.SecretReference_File{ - File: &swarmapi.FileTarget{ - Name: s.File.Name, - UID: s.File.UID, - GID: s.File.GID, - Mode: s.File.Mode, - }, - } - } - - refs = append(refs, ref) - } - - return refs -} - -func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretReference { - refs := make([]*types.SecretReference, 0, len(sr)) - for _, s := range sr { - target := s.GetFile() - if target == nil { - // not a file target - logrus.Warnf("secret target not a file: secret=%s", s.SecretID) - continue - } - refs = append(refs, &types.SecretReference{ - File: &types.SecretReferenceFileTarget{ - Name: target.Name, - UID: target.UID, - GID: target.GID, - Mode: target.Mode, - }, - SecretID: s.SecretID, - SecretName: s.SecretName, - }) - } - - return refs -} - -func configReferencesToGRPC(sr []*types.ConfigReference) []*swarmapi.ConfigReference { - refs := make([]*swarmapi.ConfigReference, 0, len(sr)) - for _, s := range sr { - ref := &swarmapi.ConfigReference{ - ConfigID: s.ConfigID, - ConfigName: s.ConfigName, - } - if s.File != nil { - ref.Target = &swarmapi.ConfigReference_File{ - File: &swarmapi.FileTarget{ - Name: s.File.Name, - UID: s.File.UID, - GID: s.File.GID, - Mode: s.File.Mode, - }, - } - } - - refs = append(refs, ref) - } - - return refs -} - -func configReferencesFromGRPC(sr []*swarmapi.ConfigReference) []*types.ConfigReference { - refs := make([]*types.ConfigReference, 0, len(sr)) - for _, s := range sr { - target := s.GetFile() - if target == nil { - // not a file target - logrus.Warnf("config target not a file: config=%s", s.ConfigID) - continue - } - refs = append(refs, &types.ConfigReference{ - File: &types.ConfigReferenceFileTarget{ - Name: target.Name, - UID: target.UID, - GID: target.GID, - Mode: target.Mode, - }, - ConfigID: s.ConfigID, - ConfigName: s.ConfigName, - }) - } - - return refs -} - -func containerToGRPC(c *types.ContainerSpec) (*swarmapi.ContainerSpec, error) { - containerSpec := &swarmapi.ContainerSpec{ - Image: c.Image, - Labels: c.Labels, - Command: c.Command, - Args: c.Args, - Hostname: c.Hostname, - Env: c.Env, - Dir: c.Dir, - User: c.User, - Groups: c.Groups, - StopSignal: c.StopSignal, - TTY: c.TTY, - OpenStdin: c.OpenStdin, - ReadOnly: c.ReadOnly, - Hosts: c.Hosts, - Secrets: secretReferencesToGRPC(c.Secrets), - Configs: configReferencesToGRPC(c.Configs), - Isolation: isolationToGRPC(c.Isolation), - Init: initToGRPC(c.Init), - } - - if c.DNSConfig != nil { - containerSpec.DNSConfig = &swarmapi.ContainerSpec_DNSConfig{ - Nameservers: c.DNSConfig.Nameservers, - Search: c.DNSConfig.Search, - Options: c.DNSConfig.Options, - } - } - - if c.StopGracePeriod != nil { - containerSpec.StopGracePeriod = gogotypes.DurationProto(*c.StopGracePeriod) - } - - // Privileges - if c.Privileges != nil { - containerSpec.Privileges = &swarmapi.Privileges{} - - if c.Privileges.CredentialSpec != nil { - containerSpec.Privileges.CredentialSpec = &swarmapi.Privileges_CredentialSpec{} - - if c.Privileges.CredentialSpec.File != "" && c.Privileges.CredentialSpec.Registry != "" { - return nil, errors.New("cannot specify both \"file\" and \"registry\" credential specs") - } - if c.Privileges.CredentialSpec.File != "" { - containerSpec.Privileges.CredentialSpec.Source = &swarmapi.Privileges_CredentialSpec_File{ - File: c.Privileges.CredentialSpec.File, - } - } else if c.Privileges.CredentialSpec.Registry != "" { - containerSpec.Privileges.CredentialSpec.Source = &swarmapi.Privileges_CredentialSpec_Registry{ - Registry: c.Privileges.CredentialSpec.Registry, - } - } else { - return nil, errors.New("must either provide \"file\" or \"registry\" for credential spec") - } - } - - if c.Privileges.SELinuxContext != nil { - containerSpec.Privileges.SELinuxContext = &swarmapi.Privileges_SELinuxContext{ - Disable: c.Privileges.SELinuxContext.Disable, - User: c.Privileges.SELinuxContext.User, - Type: c.Privileges.SELinuxContext.Type, - Role: c.Privileges.SELinuxContext.Role, - Level: c.Privileges.SELinuxContext.Level, - } - } - } - - // Mounts - for _, m := range c.Mounts { - mount := swarmapi.Mount{ - Target: m.Target, - Source: m.Source, - ReadOnly: m.ReadOnly, - } - - if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok { - mount.Type = swarmapi.Mount_MountType(mountType) - } else if string(m.Type) != "" { - return nil, fmt.Errorf("invalid MountType: %q", m.Type) - } - - if m.BindOptions != nil { - if mountPropagation, ok := swarmapi.Mount_BindOptions_MountPropagation_value[strings.ToUpper(string(m.BindOptions.Propagation))]; ok { - mount.BindOptions = &swarmapi.Mount_BindOptions{Propagation: swarmapi.Mount_BindOptions_MountPropagation(mountPropagation)} - } else if string(m.BindOptions.Propagation) != "" { - return nil, fmt.Errorf("invalid MountPropagation: %q", m.BindOptions.Propagation) - } - } - - if m.VolumeOptions != nil { - mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{ - NoCopy: m.VolumeOptions.NoCopy, - Labels: m.VolumeOptions.Labels, - } - if m.VolumeOptions.DriverConfig != nil { - mount.VolumeOptions.DriverConfig = &swarmapi.Driver{ - Name: m.VolumeOptions.DriverConfig.Name, - Options: m.VolumeOptions.DriverConfig.Options, - } - } - } - - if m.TmpfsOptions != nil { - mount.TmpfsOptions = &swarmapi.Mount_TmpfsOptions{ - SizeBytes: m.TmpfsOptions.SizeBytes, - Mode: m.TmpfsOptions.Mode, - } - } - - containerSpec.Mounts = append(containerSpec.Mounts, mount) - } - - if c.Healthcheck != nil { - containerSpec.Healthcheck = healthConfigToGRPC(c.Healthcheck) - } - - return containerSpec, nil -} - -func healthConfigFromGRPC(h *swarmapi.HealthConfig) *container.HealthConfig { - interval, _ := gogotypes.DurationFromProto(h.Interval) - timeout, _ := gogotypes.DurationFromProto(h.Timeout) - startPeriod, _ := gogotypes.DurationFromProto(h.StartPeriod) - return &container.HealthConfig{ - Test: h.Test, - Interval: interval, - Timeout: timeout, - Retries: int(h.Retries), - StartPeriod: startPeriod, - } -} - -func healthConfigToGRPC(h *container.HealthConfig) *swarmapi.HealthConfig { - return &swarmapi.HealthConfig{ - Test: h.Test, - Interval: gogotypes.DurationProto(h.Interval), - Timeout: gogotypes.DurationProto(h.Timeout), - Retries: int32(h.Retries), - StartPeriod: gogotypes.DurationProto(h.StartPeriod), - } -} - -// IsolationFromGRPC converts a swarm api container isolation to a moby isolation representation -func IsolationFromGRPC(i swarmapi.ContainerSpec_Isolation) container.Isolation { - switch i { - case swarmapi.ContainerIsolationHyperV: - return container.IsolationHyperV - case swarmapi.ContainerIsolationProcess: - return container.IsolationProcess - case swarmapi.ContainerIsolationDefault: - return container.IsolationDefault - } - return container.IsolationEmpty -} - -func isolationToGRPC(i container.Isolation) swarmapi.ContainerSpec_Isolation { - if i.IsHyperV() { - return swarmapi.ContainerIsolationHyperV - } - if i.IsProcess() { - return swarmapi.ContainerIsolationProcess - } - return swarmapi.ContainerIsolationDefault -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/network.go b/vendor/github.com/docker/docker/daemon/cluster/convert/network.go deleted file mode 100644 index 34660fc4f..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/network.go +++ /dev/null @@ -1,240 +0,0 @@ -package convert // import "github.com/docker/docker/daemon/cluster/convert" - -import ( - "strings" - - basictypes "github.com/docker/docker/api/types" - networktypes "github.com/docker/docker/api/types/network" - types "github.com/docker/docker/api/types/swarm" - netconst "github.com/docker/libnetwork/datastore" - swarmapi "github.com/docker/swarmkit/api" - gogotypes "github.com/gogo/protobuf/types" -) - -func networkAttachmentFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment { - if na != nil { - return types.NetworkAttachment{ - Network: networkFromGRPC(na.Network), - Addresses: na.Addresses, - } - } - return types.NetworkAttachment{} -} - -func networkFromGRPC(n *swarmapi.Network) types.Network { - if n != nil { - network := types.Network{ - ID: n.ID, - Spec: types.NetworkSpec{ - IPv6Enabled: n.Spec.Ipv6Enabled, - Internal: n.Spec.Internal, - Attachable: n.Spec.Attachable, - Ingress: IsIngressNetwork(n), - IPAMOptions: ipamFromGRPC(n.Spec.IPAM), - Scope: netconst.SwarmScope, - }, - IPAMOptions: ipamFromGRPC(n.IPAM), - } - - if n.Spec.GetNetwork() != "" { - network.Spec.ConfigFrom = &networktypes.ConfigReference{ - Network: n.Spec.GetNetwork(), - } - } - - // Meta - network.Version.Index = n.Meta.Version.Index - network.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) - network.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt) - - //Annotations - network.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations) - - //DriverConfiguration - if n.Spec.DriverConfig != nil { - network.Spec.DriverConfiguration = &types.Driver{ - Name: n.Spec.DriverConfig.Name, - Options: n.Spec.DriverConfig.Options, - } - } - - //DriverState - if n.DriverState != nil { - network.DriverState = types.Driver{ - Name: n.DriverState.Name, - Options: n.DriverState.Options, - } - } - - return network - } - return types.Network{} -} - -func ipamFromGRPC(i *swarmapi.IPAMOptions) *types.IPAMOptions { - var ipam *types.IPAMOptions - if i != nil { - ipam = &types.IPAMOptions{} - if i.Driver != nil { - ipam.Driver.Name = i.Driver.Name - ipam.Driver.Options = i.Driver.Options - } - - for _, config := range i.Configs { - ipam.Configs = append(ipam.Configs, types.IPAMConfig{ - Subnet: config.Subnet, - Range: config.Range, - Gateway: config.Gateway, - }) - } - } - return ipam -} - -func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec { - var endpointSpec *types.EndpointSpec - if es != nil { - endpointSpec = &types.EndpointSpec{} - endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String())) - - for _, portState := range es.Ports { - endpointSpec.Ports = append(endpointSpec.Ports, swarmPortConfigToAPIPortConfig(portState)) - } - } - return endpointSpec -} - -func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint { - endpoint := types.Endpoint{} - if e != nil { - if espec := endpointSpecFromGRPC(e.Spec); espec != nil { - endpoint.Spec = *espec - } - - for _, portState := range e.Ports { - endpoint.Ports = append(endpoint.Ports, swarmPortConfigToAPIPortConfig(portState)) - } - - for _, v := range e.VirtualIPs { - endpoint.VirtualIPs = append(endpoint.VirtualIPs, types.EndpointVirtualIP{ - NetworkID: v.NetworkID, - Addr: v.Addr}) - } - - } - - return endpoint -} - -func swarmPortConfigToAPIPortConfig(portConfig *swarmapi.PortConfig) types.PortConfig { - return types.PortConfig{ - Name: portConfig.Name, - Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portConfig.Protocol)])), - PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portConfig.PublishMode)])), - TargetPort: portConfig.TargetPort, - PublishedPort: portConfig.PublishedPort, - } -} - -// BasicNetworkFromGRPC converts a grpc Network to a NetworkResource. -func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource { - spec := n.Spec - var ipam networktypes.IPAM - if n.IPAM != nil { - if n.IPAM.Driver != nil { - ipam.Driver = n.IPAM.Driver.Name - ipam.Options = n.IPAM.Driver.Options - } - ipam.Config = make([]networktypes.IPAMConfig, 0, len(n.IPAM.Configs)) - for _, ic := range n.IPAM.Configs { - ipamConfig := networktypes.IPAMConfig{ - Subnet: ic.Subnet, - IPRange: ic.Range, - Gateway: ic.Gateway, - AuxAddress: ic.Reserved, - } - ipam.Config = append(ipam.Config, ipamConfig) - } - } - - nr := basictypes.NetworkResource{ - ID: n.ID, - Name: n.Spec.Annotations.Name, - Scope: netconst.SwarmScope, - EnableIPv6: spec.Ipv6Enabled, - IPAM: ipam, - Internal: spec.Internal, - Attachable: spec.Attachable, - Ingress: IsIngressNetwork(&n), - Labels: n.Spec.Annotations.Labels, - } - nr.Created, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) - - if n.Spec.GetNetwork() != "" { - nr.ConfigFrom = networktypes.ConfigReference{ - Network: n.Spec.GetNetwork(), - } - } - - if n.DriverState != nil { - nr.Driver = n.DriverState.Name - nr.Options = n.DriverState.Options - } - - return nr -} - -// BasicNetworkCreateToGRPC converts a NetworkCreateRequest to a grpc NetworkSpec. -func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.NetworkSpec { - ns := swarmapi.NetworkSpec{ - Annotations: swarmapi.Annotations{ - Name: create.Name, - Labels: create.Labels, - }, - DriverConfig: &swarmapi.Driver{ - Name: create.Driver, - Options: create.Options, - }, - Ipv6Enabled: create.EnableIPv6, - Internal: create.Internal, - Attachable: create.Attachable, - Ingress: create.Ingress, - } - if create.IPAM != nil { - driver := create.IPAM.Driver - if driver == "" { - driver = "default" - } - ns.IPAM = &swarmapi.IPAMOptions{ - Driver: &swarmapi.Driver{ - Name: driver, - Options: create.IPAM.Options, - }, - } - ipamSpec := make([]*swarmapi.IPAMConfig, 0, len(create.IPAM.Config)) - for _, ipamConfig := range create.IPAM.Config { - ipamSpec = append(ipamSpec, &swarmapi.IPAMConfig{ - Subnet: ipamConfig.Subnet, - Range: ipamConfig.IPRange, - Gateway: ipamConfig.Gateway, - }) - } - ns.IPAM.Configs = ipamSpec - } - if create.ConfigFrom != nil { - ns.ConfigFrom = &swarmapi.NetworkSpec_Network{ - Network: create.ConfigFrom.Network, - } - } - return ns -} - -// IsIngressNetwork check if the swarm network is an ingress network -func IsIngressNetwork(n *swarmapi.Network) bool { - if n.Spec.Ingress { - return true - } - // Check if legacy defined ingress network - _, ok := n.Spec.Annotations.Labels["com.docker.swarm.internal"] - return ok && n.Spec.Annotations.Name == "ingress" -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/node.go b/vendor/github.com/docker/docker/daemon/cluster/convert/node.go deleted file mode 100644 index 00636b6ab..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/node.go +++ /dev/null @@ -1,94 +0,0 @@ -package convert // import "github.com/docker/docker/daemon/cluster/convert" - -import ( - "fmt" - "strings" - - types "github.com/docker/docker/api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - gogotypes "github.com/gogo/protobuf/types" -) - -// NodeFromGRPC converts a grpc Node to a Node. -func NodeFromGRPC(n swarmapi.Node) types.Node { - node := types.Node{ - ID: n.ID, - Spec: types.NodeSpec{ - Role: types.NodeRole(strings.ToLower(n.Spec.DesiredRole.String())), - Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())), - }, - Status: types.NodeStatus{ - State: types.NodeState(strings.ToLower(n.Status.State.String())), - Message: n.Status.Message, - Addr: n.Status.Addr, - }, - } - - // Meta - node.Version.Index = n.Meta.Version.Index - node.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) - node.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt) - - //Annotations - node.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations) - - //Description - if n.Description != nil { - node.Description.Hostname = n.Description.Hostname - if n.Description.Platform != nil { - node.Description.Platform.Architecture = n.Description.Platform.Architecture - node.Description.Platform.OS = n.Description.Platform.OS - } - if n.Description.Resources != nil { - node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs - node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes - node.Description.Resources.GenericResources = GenericResourcesFromGRPC(n.Description.Resources.Generic) - } - if n.Description.Engine != nil { - node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion - node.Description.Engine.Labels = n.Description.Engine.Labels - for _, plugin := range n.Description.Engine.Plugins { - node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name}) - } - } - if n.Description.TLSInfo != nil { - node.Description.TLSInfo.TrustRoot = string(n.Description.TLSInfo.TrustRoot) - node.Description.TLSInfo.CertIssuerPublicKey = n.Description.TLSInfo.CertIssuerPublicKey - node.Description.TLSInfo.CertIssuerSubject = n.Description.TLSInfo.CertIssuerSubject - } - } - - //Manager - if n.ManagerStatus != nil { - node.ManagerStatus = &types.ManagerStatus{ - Leader: n.ManagerStatus.Leader, - Reachability: types.Reachability(strings.ToLower(n.ManagerStatus.Reachability.String())), - Addr: n.ManagerStatus.Addr, - } - } - - return node -} - -// NodeSpecToGRPC converts a NodeSpec to a grpc NodeSpec. -func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) { - spec := swarmapi.NodeSpec{ - Annotations: swarmapi.Annotations{ - Name: s.Name, - Labels: s.Labels, - }, - } - if role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(s.Role))]; ok { - spec.DesiredRole = swarmapi.NodeRole(role) - } else { - return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role) - } - - if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok { - spec.Availability = swarmapi.NodeSpec_Availability(availability) - } else { - return swarmapi.NodeSpec{}, fmt.Errorf("invalid Availability: %q", s.Availability) - } - - return spec, nil -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go b/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go deleted file mode 100644 index d0e5ac45d..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go +++ /dev/null @@ -1,80 +0,0 @@ -package convert // import "github.com/docker/docker/daemon/cluster/convert" - -import ( - swarmtypes "github.com/docker/docker/api/types/swarm" - types "github.com/docker/docker/api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - gogotypes "github.com/gogo/protobuf/types" -) - -// SecretFromGRPC converts a grpc Secret to a Secret. -func SecretFromGRPC(s *swarmapi.Secret) swarmtypes.Secret { - secret := swarmtypes.Secret{ - ID: s.ID, - Spec: swarmtypes.SecretSpec{ - Annotations: annotationsFromGRPC(s.Spec.Annotations), - Data: s.Spec.Data, - Driver: driverFromGRPC(s.Spec.Driver), - }, - } - - secret.Version.Index = s.Meta.Version.Index - // Meta - secret.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) - secret.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) - - if s.Spec.Templating != nil { - secret.Spec.Templating = &types.Driver{ - Name: s.Spec.Templating.Name, - Options: s.Spec.Templating.Options, - } - } - - return secret -} - -// SecretSpecToGRPC converts Secret to a grpc Secret. -func SecretSpecToGRPC(s swarmtypes.SecretSpec) swarmapi.SecretSpec { - spec := swarmapi.SecretSpec{ - Annotations: swarmapi.Annotations{ - Name: s.Name, - Labels: s.Labels, - }, - Data: s.Data, - Driver: driverToGRPC(s.Driver), - } - - if s.Templating != nil { - spec.Templating = &swarmapi.Driver{ - Name: s.Templating.Name, - Options: s.Templating.Options, - } - } - - return spec -} - -// SecretReferencesFromGRPC converts a slice of grpc SecretReference to SecretReference -func SecretReferencesFromGRPC(s []*swarmapi.SecretReference) []*swarmtypes.SecretReference { - refs := []*swarmtypes.SecretReference{} - - for _, r := range s { - ref := &swarmtypes.SecretReference{ - SecretID: r.SecretID, - SecretName: r.SecretName, - } - - if t, ok := r.Target.(*swarmapi.SecretReference_File); ok { - ref.File = &swarmtypes.SecretReferenceFileTarget{ - Name: t.File.Name, - UID: t.File.UID, - GID: t.File.GID, - Mode: t.File.Mode, - } - } - - refs = append(refs, ref) - } - - return refs -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/service.go b/vendor/github.com/docker/docker/daemon/cluster/convert/service.go deleted file mode 100644 index 5a1609aa0..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/service.go +++ /dev/null @@ -1,639 +0,0 @@ -package convert // import "github.com/docker/docker/daemon/cluster/convert" - -import ( - "fmt" - "strings" - - types "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/api/types/swarm/runtime" - "github.com/docker/docker/pkg/namesgenerator" - swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/api/genericresource" - "github.com/gogo/protobuf/proto" - gogotypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" -) - -var ( - // ErrUnsupportedRuntime returns an error if the runtime is not supported by the daemon - ErrUnsupportedRuntime = errors.New("unsupported runtime") - // ErrMismatchedRuntime returns an error if the runtime does not match the provided spec - ErrMismatchedRuntime = errors.New("mismatched Runtime and *Spec fields") -) - -// ServiceFromGRPC converts a grpc Service to a Service. -func ServiceFromGRPC(s swarmapi.Service) (types.Service, error) { - curSpec, err := serviceSpecFromGRPC(&s.Spec) - if err != nil { - return types.Service{}, err - } - prevSpec, err := serviceSpecFromGRPC(s.PreviousSpec) - if err != nil { - return types.Service{}, err - } - service := types.Service{ - ID: s.ID, - Spec: *curSpec, - PreviousSpec: prevSpec, - - Endpoint: endpointFromGRPC(s.Endpoint), - } - - // Meta - service.Version.Index = s.Meta.Version.Index - service.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) - service.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) - - // UpdateStatus - if s.UpdateStatus != nil { - service.UpdateStatus = &types.UpdateStatus{} - switch s.UpdateStatus.State { - case swarmapi.UpdateStatus_UPDATING: - service.UpdateStatus.State = types.UpdateStateUpdating - case swarmapi.UpdateStatus_PAUSED: - service.UpdateStatus.State = types.UpdateStatePaused - case swarmapi.UpdateStatus_COMPLETED: - service.UpdateStatus.State = types.UpdateStateCompleted - case swarmapi.UpdateStatus_ROLLBACK_STARTED: - service.UpdateStatus.State = types.UpdateStateRollbackStarted - case swarmapi.UpdateStatus_ROLLBACK_PAUSED: - service.UpdateStatus.State = types.UpdateStateRollbackPaused - case swarmapi.UpdateStatus_ROLLBACK_COMPLETED: - service.UpdateStatus.State = types.UpdateStateRollbackCompleted - } - - startedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.StartedAt) - if !startedAt.IsZero() && startedAt.Unix() != 0 { - service.UpdateStatus.StartedAt = &startedAt - } - - completedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.CompletedAt) - if !completedAt.IsZero() && completedAt.Unix() != 0 { - service.UpdateStatus.CompletedAt = &completedAt - } - - service.UpdateStatus.Message = s.UpdateStatus.Message - } - - return service, nil -} - -func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) (*types.ServiceSpec, error) { - if spec == nil { - return nil, nil - } - - serviceNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks)) - for _, n := range spec.Networks { - netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts} - serviceNetworks = append(serviceNetworks, netConfig) - - } - - taskTemplate, err := taskSpecFromGRPC(spec.Task) - if err != nil { - return nil, err - } - - switch t := spec.Task.GetRuntime().(type) { - case *swarmapi.TaskSpec_Container: - containerConfig := t.Container - taskTemplate.ContainerSpec = containerSpecFromGRPC(containerConfig) - taskTemplate.Runtime = types.RuntimeContainer - case *swarmapi.TaskSpec_Generic: - switch t.Generic.Kind { - case string(types.RuntimePlugin): - taskTemplate.Runtime = types.RuntimePlugin - default: - return nil, fmt.Errorf("unknown task runtime type: %s", t.Generic.Payload.TypeUrl) - } - - default: - return nil, fmt.Errorf("error creating service; unsupported runtime %T", t) - } - - convertedSpec := &types.ServiceSpec{ - Annotations: annotationsFromGRPC(spec.Annotations), - TaskTemplate: taskTemplate, - Networks: serviceNetworks, - EndpointSpec: endpointSpecFromGRPC(spec.Endpoint), - } - - // UpdateConfig - convertedSpec.UpdateConfig = updateConfigFromGRPC(spec.Update) - convertedSpec.RollbackConfig = updateConfigFromGRPC(spec.Rollback) - - // Mode - switch t := spec.GetMode().(type) { - case *swarmapi.ServiceSpec_Global: - convertedSpec.Mode.Global = &types.GlobalService{} - case *swarmapi.ServiceSpec_Replicated: - convertedSpec.Mode.Replicated = &types.ReplicatedService{ - Replicas: &t.Replicated.Replicas, - } - } - - return convertedSpec, nil -} - -// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec. -func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { - name := s.Name - if name == "" { - name = namesgenerator.GetRandomName(0) - } - - serviceNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.Networks)) - for _, n := range s.Networks { - netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts} - serviceNetworks = append(serviceNetworks, netConfig) - } - - taskNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.TaskTemplate.Networks)) - for _, n := range s.TaskTemplate.Networks { - netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts} - taskNetworks = append(taskNetworks, netConfig) - - } - - spec := swarmapi.ServiceSpec{ - Annotations: swarmapi.Annotations{ - Name: name, - Labels: s.Labels, - }, - Task: swarmapi.TaskSpec{ - Resources: resourcesToGRPC(s.TaskTemplate.Resources), - LogDriver: driverToGRPC(s.TaskTemplate.LogDriver), - Networks: taskNetworks, - ForceUpdate: s.TaskTemplate.ForceUpdate, - }, - Networks: serviceNetworks, - } - - switch s.TaskTemplate.Runtime { - case types.RuntimeContainer, "": // if empty runtime default to container - if s.TaskTemplate.ContainerSpec != nil { - containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec) - if err != nil { - return swarmapi.ServiceSpec{}, err - } - spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec} - } else { - // If the ContainerSpec is nil, we can't set the task runtime - return swarmapi.ServiceSpec{}, ErrMismatchedRuntime - } - case types.RuntimePlugin: - if s.TaskTemplate.PluginSpec != nil { - if s.Mode.Replicated != nil { - return swarmapi.ServiceSpec{}, errors.New("plugins must not use replicated mode") - } - - s.Mode.Global = &types.GlobalService{} // must always be global - - pluginSpec, err := proto.Marshal(s.TaskTemplate.PluginSpec) - if err != nil { - return swarmapi.ServiceSpec{}, err - } - spec.Task.Runtime = &swarmapi.TaskSpec_Generic{ - Generic: &swarmapi.GenericRuntimeSpec{ - Kind: string(types.RuntimePlugin), - Payload: &gogotypes.Any{ - TypeUrl: string(types.RuntimeURLPlugin), - Value: pluginSpec, - }, - }, - } - } else { - return swarmapi.ServiceSpec{}, ErrMismatchedRuntime - } - case types.RuntimeNetworkAttachment: - // NOTE(dperny) I'm leaving this case here for completeness. The actual - // code is left out out deliberately, as we should refuse to parse a - // Network Attachment runtime; it will cause weird behavior all over - // the system if we do. Instead, fallthrough and return - // ErrUnsupportedRuntime if we get one. - fallthrough - default: - return swarmapi.ServiceSpec{}, ErrUnsupportedRuntime - } - - restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy) - if err != nil { - return swarmapi.ServiceSpec{}, err - } - spec.Task.Restart = restartPolicy - - if s.TaskTemplate.Placement != nil { - var preferences []*swarmapi.PlacementPreference - for _, pref := range s.TaskTemplate.Placement.Preferences { - if pref.Spread != nil { - preferences = append(preferences, &swarmapi.PlacementPreference{ - Preference: &swarmapi.PlacementPreference_Spread{ - Spread: &swarmapi.SpreadOver{ - SpreadDescriptor: pref.Spread.SpreadDescriptor, - }, - }, - }) - } - } - var platforms []*swarmapi.Platform - for _, plat := range s.TaskTemplate.Placement.Platforms { - platforms = append(platforms, &swarmapi.Platform{ - Architecture: plat.Architecture, - OS: plat.OS, - }) - } - spec.Task.Placement = &swarmapi.Placement{ - Constraints: s.TaskTemplate.Placement.Constraints, - Preferences: preferences, - Platforms: platforms, - } - } - - spec.Update, err = updateConfigToGRPC(s.UpdateConfig) - if err != nil { - return swarmapi.ServiceSpec{}, err - } - spec.Rollback, err = updateConfigToGRPC(s.RollbackConfig) - if err != nil { - return swarmapi.ServiceSpec{}, err - } - - if s.EndpointSpec != nil { - if s.EndpointSpec.Mode != "" && - s.EndpointSpec.Mode != types.ResolutionModeVIP && - s.EndpointSpec.Mode != types.ResolutionModeDNSRR { - return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode) - } - - spec.Endpoint = &swarmapi.EndpointSpec{} - - spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))]) - - for _, portConfig := range s.EndpointSpec.Ports { - spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{ - Name: portConfig.Name, - Protocol: swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]), - PublishMode: swarmapi.PortConfig_PublishMode(swarmapi.PortConfig_PublishMode_value[strings.ToUpper(string(portConfig.PublishMode))]), - TargetPort: portConfig.TargetPort, - PublishedPort: portConfig.PublishedPort, - }) - } - } - - // Mode - if s.Mode.Global != nil && s.Mode.Replicated != nil { - return swarmapi.ServiceSpec{}, fmt.Errorf("cannot specify both replicated mode and global mode") - } - - if s.Mode.Global != nil { - spec.Mode = &swarmapi.ServiceSpec_Global{ - Global: &swarmapi.GlobalService{}, - } - } else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil { - spec.Mode = &swarmapi.ServiceSpec_Replicated{ - Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas}, - } - } else { - spec.Mode = &swarmapi.ServiceSpec_Replicated{ - Replicated: &swarmapi.ReplicatedService{Replicas: 1}, - } - } - - return spec, nil -} - -func annotationsFromGRPC(ann swarmapi.Annotations) types.Annotations { - a := types.Annotations{ - Name: ann.Name, - Labels: ann.Labels, - } - - if a.Labels == nil { - a.Labels = make(map[string]string) - } - - return a -} - -// GenericResourcesFromGRPC converts a GRPC GenericResource to a GenericResource -func GenericResourcesFromGRPC(genericRes []*swarmapi.GenericResource) []types.GenericResource { - var generic []types.GenericResource - for _, res := range genericRes { - var current types.GenericResource - - switch r := res.Resource.(type) { - case *swarmapi.GenericResource_DiscreteResourceSpec: - current.DiscreteResourceSpec = &types.DiscreteGenericResource{ - Kind: r.DiscreteResourceSpec.Kind, - Value: r.DiscreteResourceSpec.Value, - } - case *swarmapi.GenericResource_NamedResourceSpec: - current.NamedResourceSpec = &types.NamedGenericResource{ - Kind: r.NamedResourceSpec.Kind, - Value: r.NamedResourceSpec.Value, - } - } - - generic = append(generic, current) - } - - return generic -} - -func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements { - var resources *types.ResourceRequirements - if res != nil { - resources = &types.ResourceRequirements{} - if res.Limits != nil { - resources.Limits = &types.Resources{ - NanoCPUs: res.Limits.NanoCPUs, - MemoryBytes: res.Limits.MemoryBytes, - } - } - if res.Reservations != nil { - resources.Reservations = &types.Resources{ - NanoCPUs: res.Reservations.NanoCPUs, - MemoryBytes: res.Reservations.MemoryBytes, - GenericResources: GenericResourcesFromGRPC(res.Reservations.Generic), - } - } - } - - return resources -} - -// GenericResourcesToGRPC converts a GenericResource to a GRPC GenericResource -func GenericResourcesToGRPC(genericRes []types.GenericResource) []*swarmapi.GenericResource { - var generic []*swarmapi.GenericResource - for _, res := range genericRes { - var r *swarmapi.GenericResource - - if res.DiscreteResourceSpec != nil { - r = genericresource.NewDiscrete(res.DiscreteResourceSpec.Kind, res.DiscreteResourceSpec.Value) - } else if res.NamedResourceSpec != nil { - r = genericresource.NewString(res.NamedResourceSpec.Kind, res.NamedResourceSpec.Value) - } - - generic = append(generic, r) - } - - return generic -} - -func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirements { - var reqs *swarmapi.ResourceRequirements - if res != nil { - reqs = &swarmapi.ResourceRequirements{} - if res.Limits != nil { - reqs.Limits = &swarmapi.Resources{ - NanoCPUs: res.Limits.NanoCPUs, - MemoryBytes: res.Limits.MemoryBytes, - } - } - if res.Reservations != nil { - reqs.Reservations = &swarmapi.Resources{ - NanoCPUs: res.Reservations.NanoCPUs, - MemoryBytes: res.Reservations.MemoryBytes, - Generic: GenericResourcesToGRPC(res.Reservations.GenericResources), - } - - } - } - return reqs -} - -func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy { - var rp *types.RestartPolicy - if p != nil { - rp = &types.RestartPolicy{} - - switch p.Condition { - case swarmapi.RestartOnNone: - rp.Condition = types.RestartPolicyConditionNone - case swarmapi.RestartOnFailure: - rp.Condition = types.RestartPolicyConditionOnFailure - case swarmapi.RestartOnAny: - rp.Condition = types.RestartPolicyConditionAny - default: - rp.Condition = types.RestartPolicyConditionAny - } - - if p.Delay != nil { - delay, _ := gogotypes.DurationFromProto(p.Delay) - rp.Delay = &delay - } - if p.Window != nil { - window, _ := gogotypes.DurationFromProto(p.Window) - rp.Window = &window - } - - rp.MaxAttempts = &p.MaxAttempts - } - return rp -} - -func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) { - var rp *swarmapi.RestartPolicy - if p != nil { - rp = &swarmapi.RestartPolicy{} - - switch p.Condition { - case types.RestartPolicyConditionNone: - rp.Condition = swarmapi.RestartOnNone - case types.RestartPolicyConditionOnFailure: - rp.Condition = swarmapi.RestartOnFailure - case types.RestartPolicyConditionAny: - rp.Condition = swarmapi.RestartOnAny - default: - if string(p.Condition) != "" { - return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition) - } - rp.Condition = swarmapi.RestartOnAny - } - - if p.Delay != nil { - rp.Delay = gogotypes.DurationProto(*p.Delay) - } - if p.Window != nil { - rp.Window = gogotypes.DurationProto(*p.Window) - } - if p.MaxAttempts != nil { - rp.MaxAttempts = *p.MaxAttempts - - } - } - return rp, nil -} - -func placementFromGRPC(p *swarmapi.Placement) *types.Placement { - if p == nil { - return nil - } - r := &types.Placement{ - Constraints: p.Constraints, - } - - for _, pref := range p.Preferences { - if spread := pref.GetSpread(); spread != nil { - r.Preferences = append(r.Preferences, types.PlacementPreference{ - Spread: &types.SpreadOver{ - SpreadDescriptor: spread.SpreadDescriptor, - }, - }) - } - } - - for _, plat := range p.Platforms { - r.Platforms = append(r.Platforms, types.Platform{ - Architecture: plat.Architecture, - OS: plat.OS, - }) - } - - return r -} - -func driverFromGRPC(p *swarmapi.Driver) *types.Driver { - if p == nil { - return nil - } - - return &types.Driver{ - Name: p.Name, - Options: p.Options, - } -} - -func driverToGRPC(p *types.Driver) *swarmapi.Driver { - if p == nil { - return nil - } - - return &swarmapi.Driver{ - Name: p.Name, - Options: p.Options, - } -} - -func updateConfigFromGRPC(updateConfig *swarmapi.UpdateConfig) *types.UpdateConfig { - if updateConfig == nil { - return nil - } - - converted := &types.UpdateConfig{ - Parallelism: updateConfig.Parallelism, - MaxFailureRatio: updateConfig.MaxFailureRatio, - } - - converted.Delay = updateConfig.Delay - if updateConfig.Monitor != nil { - converted.Monitor, _ = gogotypes.DurationFromProto(updateConfig.Monitor) - } - - switch updateConfig.FailureAction { - case swarmapi.UpdateConfig_PAUSE: - converted.FailureAction = types.UpdateFailureActionPause - case swarmapi.UpdateConfig_CONTINUE: - converted.FailureAction = types.UpdateFailureActionContinue - case swarmapi.UpdateConfig_ROLLBACK: - converted.FailureAction = types.UpdateFailureActionRollback - } - - switch updateConfig.Order { - case swarmapi.UpdateConfig_STOP_FIRST: - converted.Order = types.UpdateOrderStopFirst - case swarmapi.UpdateConfig_START_FIRST: - converted.Order = types.UpdateOrderStartFirst - } - - return converted -} - -func updateConfigToGRPC(updateConfig *types.UpdateConfig) (*swarmapi.UpdateConfig, error) { - if updateConfig == nil { - return nil, nil - } - - converted := &swarmapi.UpdateConfig{ - Parallelism: updateConfig.Parallelism, - Delay: updateConfig.Delay, - MaxFailureRatio: updateConfig.MaxFailureRatio, - } - - switch updateConfig.FailureAction { - case types.UpdateFailureActionPause, "": - converted.FailureAction = swarmapi.UpdateConfig_PAUSE - case types.UpdateFailureActionContinue: - converted.FailureAction = swarmapi.UpdateConfig_CONTINUE - case types.UpdateFailureActionRollback: - converted.FailureAction = swarmapi.UpdateConfig_ROLLBACK - default: - return nil, fmt.Errorf("unrecognized update failure action %s", updateConfig.FailureAction) - } - if updateConfig.Monitor != 0 { - converted.Monitor = gogotypes.DurationProto(updateConfig.Monitor) - } - - switch updateConfig.Order { - case types.UpdateOrderStopFirst, "": - converted.Order = swarmapi.UpdateConfig_STOP_FIRST - case types.UpdateOrderStartFirst: - converted.Order = swarmapi.UpdateConfig_START_FIRST - default: - return nil, fmt.Errorf("unrecognized update order %s", updateConfig.Order) - } - - return converted, nil -} - -func networkAttachmentSpecFromGRPC(attachment swarmapi.NetworkAttachmentSpec) *types.NetworkAttachmentSpec { - return &types.NetworkAttachmentSpec{ - ContainerID: attachment.ContainerID, - } -} - -func taskSpecFromGRPC(taskSpec swarmapi.TaskSpec) (types.TaskSpec, error) { - taskNetworks := make([]types.NetworkAttachmentConfig, 0, len(taskSpec.Networks)) - for _, n := range taskSpec.Networks { - netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts} - taskNetworks = append(taskNetworks, netConfig) - } - - t := types.TaskSpec{ - Resources: resourcesFromGRPC(taskSpec.Resources), - RestartPolicy: restartPolicyFromGRPC(taskSpec.Restart), - Placement: placementFromGRPC(taskSpec.Placement), - LogDriver: driverFromGRPC(taskSpec.LogDriver), - Networks: taskNetworks, - ForceUpdate: taskSpec.ForceUpdate, - } - - switch taskSpec.GetRuntime().(type) { - case *swarmapi.TaskSpec_Container, nil: - c := taskSpec.GetContainer() - if c != nil { - t.ContainerSpec = containerSpecFromGRPC(c) - } - case *swarmapi.TaskSpec_Generic: - g := taskSpec.GetGeneric() - if g != nil { - switch g.Kind { - case string(types.RuntimePlugin): - var p runtime.PluginSpec - if err := proto.Unmarshal(g.Payload.Value, &p); err != nil { - return t, errors.Wrap(err, "error unmarshalling plugin spec") - } - t.PluginSpec = &p - } - } - case *swarmapi.TaskSpec_Attachment: - a := taskSpec.GetAttachment() - if a != nil { - t.NetworkAttachmentSpec = networkAttachmentSpecFromGRPC(*a) - } - t.Runtime = types.RuntimeNetworkAttachment - } - - return t, nil -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go b/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go deleted file mode 100644 index ae97a4b61..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go +++ /dev/null @@ -1,147 +0,0 @@ -package convert // import "github.com/docker/docker/daemon/cluster/convert" - -import ( - "fmt" - "strings" - - types "github.com/docker/docker/api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/ca" - gogotypes "github.com/gogo/protobuf/types" -) - -// SwarmFromGRPC converts a grpc Cluster to a Swarm. -func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { - swarm := types.Swarm{ - ClusterInfo: types.ClusterInfo{ - ID: c.ID, - Spec: types.Spec{ - Orchestration: types.OrchestrationConfig{ - TaskHistoryRetentionLimit: &c.Spec.Orchestration.TaskHistoryRetentionLimit, - }, - Raft: types.RaftConfig{ - SnapshotInterval: c.Spec.Raft.SnapshotInterval, - KeepOldSnapshots: &c.Spec.Raft.KeepOldSnapshots, - LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers, - HeartbeatTick: int(c.Spec.Raft.HeartbeatTick), - ElectionTick: int(c.Spec.Raft.ElectionTick), - }, - EncryptionConfig: types.EncryptionConfig{ - AutoLockManagers: c.Spec.EncryptionConfig.AutoLockManagers, - }, - CAConfig: types.CAConfig{ - // do not include the signing CA cert or key (it should already be redacted via the swarm APIs) - - // the key because it's secret, and the cert because otherwise doing a get + update on the spec - // can cause issues because the key would be missing and the cert wouldn't - ForceRotate: c.Spec.CAConfig.ForceRotate, - }, - }, - TLSInfo: types.TLSInfo{ - TrustRoot: string(c.RootCA.CACert), - }, - RootRotationInProgress: c.RootCA.RootRotation != nil, - }, - JoinTokens: types.JoinTokens{ - Worker: c.RootCA.JoinTokens.Worker, - Manager: c.RootCA.JoinTokens.Manager, - }, - } - - issuerInfo, err := ca.IssuerFromAPIRootCA(&c.RootCA) - if err == nil && issuerInfo != nil { - swarm.TLSInfo.CertIssuerSubject = issuerInfo.Subject - swarm.TLSInfo.CertIssuerPublicKey = issuerInfo.PublicKey - } - - heartbeatPeriod, _ := gogotypes.DurationFromProto(c.Spec.Dispatcher.HeartbeatPeriod) - swarm.Spec.Dispatcher.HeartbeatPeriod = heartbeatPeriod - - swarm.Spec.CAConfig.NodeCertExpiry, _ = gogotypes.DurationFromProto(c.Spec.CAConfig.NodeCertExpiry) - - for _, ca := range c.Spec.CAConfig.ExternalCAs { - swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{ - Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())), - URL: ca.URL, - Options: ca.Options, - CACert: string(ca.CACert), - }) - } - - // Meta - swarm.Version.Index = c.Meta.Version.Index - swarm.CreatedAt, _ = gogotypes.TimestampFromProto(c.Meta.CreatedAt) - swarm.UpdatedAt, _ = gogotypes.TimestampFromProto(c.Meta.UpdatedAt) - - // Annotations - swarm.Spec.Annotations = annotationsFromGRPC(c.Spec.Annotations) - - return swarm -} - -// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec. -func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) { - return MergeSwarmSpecToGRPC(s, swarmapi.ClusterSpec{}) -} - -// MergeSwarmSpecToGRPC merges a Spec with an initial grpc ClusterSpec -func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) { - // We take the initSpec (either created from scratch, or returned by swarmkit), - // and will only change the value if the one taken from types.Spec is not nil or 0. - // In other words, if the value taken from types.Spec is nil or 0, we will maintain the status quo. - if s.Annotations.Name != "" { - spec.Annotations.Name = s.Annotations.Name - } - if len(s.Annotations.Labels) != 0 { - spec.Annotations.Labels = s.Annotations.Labels - } - - if s.Orchestration.TaskHistoryRetentionLimit != nil { - spec.Orchestration.TaskHistoryRetentionLimit = *s.Orchestration.TaskHistoryRetentionLimit - } - if s.Raft.SnapshotInterval != 0 { - spec.Raft.SnapshotInterval = s.Raft.SnapshotInterval - } - if s.Raft.KeepOldSnapshots != nil { - spec.Raft.KeepOldSnapshots = *s.Raft.KeepOldSnapshots - } - if s.Raft.LogEntriesForSlowFollowers != 0 { - spec.Raft.LogEntriesForSlowFollowers = s.Raft.LogEntriesForSlowFollowers - } - if s.Raft.HeartbeatTick != 0 { - spec.Raft.HeartbeatTick = uint32(s.Raft.HeartbeatTick) - } - if s.Raft.ElectionTick != 0 { - spec.Raft.ElectionTick = uint32(s.Raft.ElectionTick) - } - if s.Dispatcher.HeartbeatPeriod != 0 { - spec.Dispatcher.HeartbeatPeriod = gogotypes.DurationProto(s.Dispatcher.HeartbeatPeriod) - } - if s.CAConfig.NodeCertExpiry != 0 { - spec.CAConfig.NodeCertExpiry = gogotypes.DurationProto(s.CAConfig.NodeCertExpiry) - } - if s.CAConfig.SigningCACert != "" { - spec.CAConfig.SigningCACert = []byte(s.CAConfig.SigningCACert) - } - if s.CAConfig.SigningCAKey != "" { - // do propagate the signing CA key here because we want to provide it TO the swarm APIs - spec.CAConfig.SigningCAKey = []byte(s.CAConfig.SigningCAKey) - } - spec.CAConfig.ForceRotate = s.CAConfig.ForceRotate - - for _, ca := range s.CAConfig.ExternalCAs { - protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))] - if !ok { - return swarmapi.ClusterSpec{}, fmt.Errorf("invalid protocol: %q", ca.Protocol) - } - spec.CAConfig.ExternalCAs = append(spec.CAConfig.ExternalCAs, &swarmapi.ExternalCA{ - Protocol: swarmapi.ExternalCA_CAProtocol(protocol), - URL: ca.URL, - Options: ca.Options, - CACert: []byte(ca.CACert), - }) - } - - spec.EncryptionConfig.AutoLockManagers = s.EncryptionConfig.AutoLockManagers - - return spec, nil -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/convert/task.go b/vendor/github.com/docker/docker/daemon/cluster/convert/task.go deleted file mode 100644 index 72e2805e1..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/convert/task.go +++ /dev/null @@ -1,69 +0,0 @@ -package convert // import "github.com/docker/docker/daemon/cluster/convert" - -import ( - "strings" - - types "github.com/docker/docker/api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - gogotypes "github.com/gogo/protobuf/types" -) - -// TaskFromGRPC converts a grpc Task to a Task. -func TaskFromGRPC(t swarmapi.Task) (types.Task, error) { - containerStatus := t.Status.GetContainer() - taskSpec, err := taskSpecFromGRPC(t.Spec) - if err != nil { - return types.Task{}, err - } - task := types.Task{ - ID: t.ID, - Annotations: annotationsFromGRPC(t.Annotations), - ServiceID: t.ServiceID, - Slot: int(t.Slot), - NodeID: t.NodeID, - Spec: taskSpec, - Status: types.TaskStatus{ - State: types.TaskState(strings.ToLower(t.Status.State.String())), - Message: t.Status.Message, - Err: t.Status.Err, - }, - DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())), - GenericResources: GenericResourcesFromGRPC(t.AssignedGenericResources), - } - - // Meta - task.Version.Index = t.Meta.Version.Index - task.CreatedAt, _ = gogotypes.TimestampFromProto(t.Meta.CreatedAt) - task.UpdatedAt, _ = gogotypes.TimestampFromProto(t.Meta.UpdatedAt) - - task.Status.Timestamp, _ = gogotypes.TimestampFromProto(t.Status.Timestamp) - - if containerStatus != nil { - task.Status.ContainerStatus = &types.ContainerStatus{ - ContainerID: containerStatus.ContainerID, - PID: int(containerStatus.PID), - ExitCode: int(containerStatus.ExitCode), - } - } - - // NetworksAttachments - for _, na := range t.Networks { - task.NetworksAttachments = append(task.NetworksAttachments, networkAttachmentFromGRPC(na)) - } - - if t.Status.PortStatus == nil { - return task, nil - } - - for _, p := range t.Status.PortStatus.Ports { - task.Status.PortStatus.Ports = append(task.Status.PortStatus.Ports, types.PortConfig{ - Name: p.Name, - Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(p.Protocol)])), - PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(p.PublishMode)])), - TargetPort: p.TargetPort, - PublishedPort: p.PublishedPort, - }) - } - - return task, nil -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/errors.go b/vendor/github.com/docker/docker/daemon/cluster/errors.go deleted file mode 100644 index 9ec716b1b..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/errors.go +++ /dev/null @@ -1,61 +0,0 @@ -package cluster // import "github.com/docker/docker/daemon/cluster" - -const ( - // errNoSwarm is returned on leaving a cluster that was never initialized - errNoSwarm notAvailableError = "This node is not part of a swarm" - - // errSwarmExists is returned on initialize or join request for a cluster that has already been activated - errSwarmExists notAvailableError = "This node is already part of a swarm. Use \"docker swarm leave\" to leave this swarm and join another one." - - // errSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached. - errSwarmJoinTimeoutReached notAvailableError = "Timeout was reached before node joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node." - - // errSwarmLocked is returned if the swarm is encrypted and needs a key to unlock it. - errSwarmLocked notAvailableError = "Swarm is encrypted and needs to be unlocked before it can be used. Please use \"docker swarm unlock\" to unlock it." - - // errSwarmCertificatesExpired is returned if docker was not started for the whole validity period and they had no chance to renew automatically. - errSwarmCertificatesExpired notAvailableError = "Swarm certificates have expired. To replace them, leave the swarm and join again." - - // errSwarmNotManager is returned if the node is not a swarm manager. - errSwarmNotManager notAvailableError = "This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager." -) - -type notAllowedError string - -func (e notAllowedError) Error() string { - return string(e) -} - -func (e notAllowedError) Forbidden() {} - -type notAvailableError string - -func (e notAvailableError) Error() string { - return string(e) -} - -func (e notAvailableError) Unavailable() {} - -type configError string - -func (e configError) Error() string { - return string(e) -} - -func (e configError) InvalidParameter() {} - -type invalidUnlockKey struct{} - -func (invalidUnlockKey) Error() string { - return "swarm could not be unlocked: invalid key provided" -} - -func (invalidUnlockKey) Unauthorized() {} - -type notLockedError struct{} - -func (notLockedError) Error() string { - return "swarm is not locked" -} - -func (notLockedError) Conflict() {} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go b/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go deleted file mode 100644 index 1f2312ab4..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/backend.go +++ /dev/null @@ -1,75 +0,0 @@ -package executor // import "github.com/docker/docker/daemon/cluster/executor" - -import ( - "context" - "io" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" - swarmtypes "github.com/docker/docker/api/types/swarm" - containerpkg "github.com/docker/docker/container" - clustertypes "github.com/docker/docker/daemon/cluster/provider" - networkSettings "github.com/docker/docker/daemon/network" - "github.com/docker/docker/plugin" - volumeopts "github.com/docker/docker/volume/service/opts" - "github.com/docker/libnetwork" - "github.com/docker/libnetwork/cluster" - networktypes "github.com/docker/libnetwork/types" - "github.com/docker/swarmkit/agent/exec" -) - -// Backend defines the executor component for a swarm agent. -type Backend interface { - CreateManagedNetwork(clustertypes.NetworkCreateRequest) error - DeleteManagedNetwork(networkID string) error - FindNetwork(idName string) (libnetwork.Network, error) - SetupIngress(clustertypes.NetworkCreateRequest, string) (<-chan struct{}, error) - ReleaseIngress() (<-chan struct{}, error) - CreateManagedContainer(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) - ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error - ContainerStop(name string, seconds *int) error - ContainerLogs(context.Context, string, *types.ContainerLogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error) - ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error - ActivateContainerServiceBinding(containerName string) error - DeactivateContainerServiceBinding(containerName string) error - UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error - ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) - ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) - ContainerRm(name string, config *types.ContainerRmConfig) error - ContainerKill(name string, sig uint64) error - SetContainerDependencyStore(name string, store exec.DependencyGetter) error - SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error - SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error - SystemInfo() (*types.Info, error) - Containers(config *types.ContainerListOptions) ([]*types.Container, error) - SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error - DaemonJoinsCluster(provider cluster.Provider) - DaemonLeavesCluster() - IsSwarmCompatible() error - SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) - UnsubscribeFromEvents(listener chan interface{}) - UpdateAttachment(string, string, string, *network.NetworkingConfig) error - WaitForDetachment(context.Context, string, string, string, string) error - PluginManager() *plugin.Manager - PluginGetter() *plugin.Store - GetAttachmentStore() *networkSettings.AttachmentStore -} - -// VolumeBackend is used by an executor to perform volume operations -type VolumeBackend interface { - Create(ctx context.Context, name, driverName string, opts ...volumeopts.CreateOption) (*types.Volume, error) -} - -// ImageBackend is used by an executor to perform image operations -type ImageBackend interface { - PullImage(ctx context.Context, image, tag, platform string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error - GetRepository(context.Context, reference.Named, *types.AuthConfig) (distribution.Repository, bool, error) - LookupImage(name string) (*types.ImageInspect, error) -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go deleted file mode 100644 index fdf1ee2ec..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/adapter.go +++ /dev/null @@ -1,477 +0,0 @@ -package container // import "github.com/docker/docker/daemon/cluster/executor/container" - -import ( - "context" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "runtime" - "strings" - "syscall" - "time" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/events" - containerpkg "github.com/docker/docker/container" - "github.com/docker/docker/daemon" - "github.com/docker/docker/daemon/cluster/convert" - executorpkg "github.com/docker/docker/daemon/cluster/executor" - volumeopts "github.com/docker/docker/volume/service/opts" - "github.com/docker/libnetwork" - "github.com/docker/swarmkit/agent/exec" - "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/log" - gogotypes "github.com/gogo/protobuf/types" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" - "golang.org/x/time/rate" -) - -// containerAdapter conducts remote operations for a container. All calls -// are mostly naked calls to the client API, seeded with information from -// containerConfig. -type containerAdapter struct { - backend executorpkg.Backend - imageBackend executorpkg.ImageBackend - volumeBackend executorpkg.VolumeBackend - container *containerConfig - dependencies exec.DependencyGetter -} - -func newContainerAdapter(b executorpkg.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*containerAdapter, error) { - ctnr, err := newContainerConfig(task, node) - if err != nil { - return nil, err - } - - return &containerAdapter{ - container: ctnr, - backend: b, - imageBackend: i, - volumeBackend: v, - dependencies: dependencies, - }, nil -} - -func (c *containerAdapter) pullImage(ctx context.Context) error { - spec := c.container.spec() - - // Skip pulling if the image is referenced by image ID. - if _, err := digest.Parse(spec.Image); err == nil { - return nil - } - - // Skip pulling if the image is referenced by digest and already - // exists locally. - named, err := reference.ParseNormalizedNamed(spec.Image) - if err == nil { - if _, ok := named.(reference.Canonical); ok { - _, err := c.imageBackend.LookupImage(spec.Image) - if err == nil { - return nil - } - } - } - - // if the image needs to be pulled, the auth config will be retrieved and updated - var encodedAuthConfig string - if spec.PullOptions != nil { - encodedAuthConfig = spec.PullOptions.RegistryAuth - } - - authConfig := &types.AuthConfig{} - if encodedAuthConfig != "" { - if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil { - logrus.Warnf("invalid authconfig: %v", err) - } - } - - pr, pw := io.Pipe() - metaHeaders := map[string][]string{} - go func() { - // TODO @jhowardmsft LCOW Support: This will need revisiting as - // the stack is built up to include LCOW support for swarm. - platform := runtime.GOOS - err := c.imageBackend.PullImage(ctx, c.container.image(), "", platform, metaHeaders, authConfig, pw) - pw.CloseWithError(err) - }() - - dec := json.NewDecoder(pr) - dec.UseNumber() - m := map[string]interface{}{} - spamLimiter := rate.NewLimiter(rate.Every(time.Second), 1) - - lastStatus := "" - for { - if err := dec.Decode(&m); err != nil { - if err == io.EOF { - break - } - return err - } - l := log.G(ctx) - // limit pull progress logs unless the status changes - if spamLimiter.Allow() || lastStatus != m["status"] { - // if we have progress details, we have everything we need - if progress, ok := m["progressDetail"].(map[string]interface{}); ok { - // first, log the image and status - l = l.WithFields(logrus.Fields{ - "image": c.container.image(), - "status": m["status"], - }) - // then, if we have progress, log the progress - if progress["current"] != nil && progress["total"] != nil { - l = l.WithFields(logrus.Fields{ - "current": progress["current"], - "total": progress["total"], - }) - } - } - l.Debug("pull in progress") - } - // sometimes, we get no useful information at all, and add no fields - if status, ok := m["status"].(string); ok { - lastStatus = status - } - } - - // if the final stream object contained an error, return it - if errMsg, ok := m["error"]; ok { - return fmt.Errorf("%v", errMsg) - } - return nil -} - -func (c *containerAdapter) createNetworks(ctx context.Context) error { - for name := range c.container.networksAttachments { - ncr, err := c.container.networkCreateRequest(name) - if err != nil { - return err - } - - if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing - if _, ok := err.(libnetwork.NetworkNameError); ok { - continue - } - // We will continue if CreateManagedNetwork returns PredefinedNetworkError error. - // Other callers still can treat it as Error. - if _, ok := err.(daemon.PredefinedNetworkError); ok { - continue - } - return err - } - } - - return nil -} - -func (c *containerAdapter) removeNetworks(ctx context.Context) error { - for name, v := range c.container.networksAttachments { - if err := c.backend.DeleteManagedNetwork(v.Network.ID); err != nil { - switch err.(type) { - case *libnetwork.ActiveEndpointsError: - continue - case libnetwork.ErrNoSuchNetwork: - continue - default: - log.G(ctx).Errorf("network %s remove failed: %v", name, err) - return err - } - } - } - - return nil -} - -func (c *containerAdapter) networkAttach(ctx context.Context) error { - config := c.container.createNetworkingConfig(c.backend) - - var ( - networkName string - networkID string - ) - - if config != nil { - for n, epConfig := range config.EndpointsConfig { - networkName = n - networkID = epConfig.NetworkID - break - } - } - - return c.backend.UpdateAttachment(networkName, networkID, c.container.networkAttachmentContainerID(), config) -} - -func (c *containerAdapter) waitForDetach(ctx context.Context) error { - config := c.container.createNetworkingConfig(c.backend) - - var ( - networkName string - networkID string - ) - - if config != nil { - for n, epConfig := range config.EndpointsConfig { - networkName = n - networkID = epConfig.NetworkID - break - } - } - - return c.backend.WaitForDetachment(ctx, networkName, networkID, c.container.taskID(), c.container.networkAttachmentContainerID()) -} - -func (c *containerAdapter) create(ctx context.Context) error { - var cr containertypes.ContainerCreateCreatedBody - var err error - if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{ - Name: c.container.name(), - Config: c.container.config(), - HostConfig: c.container.hostConfig(), - // Use the first network in container create - NetworkingConfig: c.container.createNetworkingConfig(c.backend), - }); err != nil { - return err - } - - // Docker daemon currently doesn't support multiple networks in container create - // Connect to all other networks - nc := c.container.connectNetworkingConfig(c.backend) - - if nc != nil { - for n, ep := range nc.EndpointsConfig { - if err := c.backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil { - return err - } - } - } - - container := c.container.task.Spec.GetContainer() - if container == nil { - return errors.New("unable to get container from task spec") - } - - if err := c.backend.SetContainerDependencyStore(cr.ID, c.dependencies); err != nil { - return err - } - - // configure secrets - secretRefs := convert.SecretReferencesFromGRPC(container.Secrets) - if err := c.backend.SetContainerSecretReferences(cr.ID, secretRefs); err != nil { - return err - } - - configRefs := convert.ConfigReferencesFromGRPC(container.Configs) - if err := c.backend.SetContainerConfigReferences(cr.ID, configRefs); err != nil { - return err - } - - return c.backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()) -} - -// checkMounts ensures that the provided mounts won't have any host-specific -// problems at start up. For example, we disallow bind mounts without an -// existing path, which slightly different from the container API. -func (c *containerAdapter) checkMounts() error { - spec := c.container.spec() - for _, mount := range spec.Mounts { - switch mount.Type { - case api.MountTypeBind: - if _, err := os.Stat(mount.Source); os.IsNotExist(err) { - return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source) - } - } - } - - return nil -} - -func (c *containerAdapter) start(ctx context.Context) error { - if err := c.checkMounts(); err != nil { - return err - } - - return c.backend.ContainerStart(c.container.name(), nil, "", "") -} - -func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { - cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false) - if ctx.Err() != nil { - return types.ContainerJSON{}, ctx.Err() - } - if err != nil { - return types.ContainerJSON{}, err - } - return *cs, nil -} - -// events issues a call to the events API and returns a channel with all -// events. The stream of events can be shutdown by cancelling the context. -func (c *containerAdapter) events(ctx context.Context) <-chan events.Message { - log.G(ctx).Debugf("waiting on events") - buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter()) - eventsq := make(chan events.Message, len(buffer)) - - for _, event := range buffer { - eventsq <- event - } - - go func() { - defer c.backend.UnsubscribeFromEvents(l) - - for { - select { - case ev := <-l: - jev, ok := ev.(events.Message) - if !ok { - log.G(ctx).Warnf("unexpected event message: %q", ev) - continue - } - select { - case eventsq <- jev: - case <-ctx.Done(): - return - } - case <-ctx.Done(): - return - } - } - }() - - return eventsq -} - -func (c *containerAdapter) wait(ctx context.Context) (<-chan containerpkg.StateStatus, error) { - return c.backend.ContainerWait(ctx, c.container.nameOrID(), containerpkg.WaitConditionNotRunning) -} - -func (c *containerAdapter) shutdown(ctx context.Context) error { - // Default stop grace period to nil (daemon will use the stopTimeout of the container) - var stopgrace *int - spec := c.container.spec() - if spec.StopGracePeriod != nil { - stopgraceValue := int(spec.StopGracePeriod.Seconds) - stopgrace = &stopgraceValue - } - return c.backend.ContainerStop(c.container.name(), stopgrace) -} - -func (c *containerAdapter) terminate(ctx context.Context) error { - return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL)) -} - -func (c *containerAdapter) remove(ctx context.Context) error { - return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{ - RemoveVolume: true, - ForceRemove: true, - }) -} - -func (c *containerAdapter) createVolumes(ctx context.Context) error { - // Create plugin volumes that are embedded inside a Mount - for _, mount := range c.container.task.Spec.GetContainer().Mounts { - if mount.Type != api.MountTypeVolume { - continue - } - - if mount.VolumeOptions == nil { - continue - } - - if mount.VolumeOptions.DriverConfig == nil { - continue - } - - req := c.container.volumeCreateRequest(&mount) - - // Check if this volume exists on the engine - if _, err := c.volumeBackend.Create(ctx, req.Name, req.Driver, - volumeopts.WithCreateOptions(req.DriverOpts), - volumeopts.WithCreateLabels(req.Labels), - ); err != nil { - // TODO(amitshukla): Today, volume create through the engine api does not return an error - // when the named volume with the same parameters already exists. - // It returns an error if the driver name is different - that is a valid error - return err - } - - } - - return nil -} - -func (c *containerAdapter) activateServiceBinding() error { - return c.backend.ActivateContainerServiceBinding(c.container.name()) -} - -func (c *containerAdapter) deactivateServiceBinding() error { - return c.backend.DeactivateContainerServiceBinding(c.container.name()) -} - -func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscriptionOptions) (<-chan *backend.LogMessage, error) { - apiOptions := &types.ContainerLogsOptions{ - Follow: options.Follow, - - // Always say yes to Timestamps and Details. we make the decision - // of whether to return these to the user or not way higher up the - // stack. - Timestamps: true, - Details: true, - } - - if options.Since != nil { - since, err := gogotypes.TimestampFromProto(options.Since) - if err != nil { - return nil, err - } - // print since as this formatted string because the docker container - // logs interface expects it like this. - // see github.com/docker/docker/api/types/time.ParseTimestamps - apiOptions.Since = fmt.Sprintf("%d.%09d", since.Unix(), int64(since.Nanosecond())) - } - - if options.Tail < 0 { - // See protobuf documentation for details of how this works. - apiOptions.Tail = fmt.Sprint(-options.Tail - 1) - } else if options.Tail > 0 { - return nil, errors.New("tail relative to start of logs not supported via docker API") - } - - if len(options.Streams) == 0 { - // empty == all - apiOptions.ShowStdout, apiOptions.ShowStderr = true, true - } else { - for _, stream := range options.Streams { - switch stream { - case api.LogStreamStdout: - apiOptions.ShowStdout = true - case api.LogStreamStderr: - apiOptions.ShowStderr = true - } - } - } - msgs, _, err := c.backend.ContainerLogs(ctx, c.container.name(), apiOptions) - if err != nil { - return nil, err - } - return msgs, nil -} - -// todo: typed/wrapped errors -func isContainerCreateNameConflict(err error) bool { - return strings.Contains(err.Error(), "Conflict. The name") -} - -func isUnknownContainer(err error) bool { - return strings.Contains(err.Error(), "No such container:") -} - -func isStoppedContainer(err error) bool { - return strings.Contains(err.Error(), "is already stopped") -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go deleted file mode 100644 index f0aa0b957..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/attachment.go +++ /dev/null @@ -1,74 +0,0 @@ -package container // import "github.com/docker/docker/daemon/cluster/executor/container" - -import ( - "context" - - executorpkg "github.com/docker/docker/daemon/cluster/executor" - "github.com/docker/swarmkit/agent/exec" - "github.com/docker/swarmkit/api" -) - -// networkAttacherController implements agent.Controller against docker's API. -// -// networkAttacherController manages the lifecycle of network -// attachment of a docker unmanaged container managed as a task from -// agent point of view. It provides network attachment information to -// the unmanaged container for it to attach to the network and run. -type networkAttacherController struct { - backend executorpkg.Backend - task *api.Task - adapter *containerAdapter - closed chan struct{} -} - -func newNetworkAttacherController(b executorpkg.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*networkAttacherController, error) { - adapter, err := newContainerAdapter(b, i, v, task, node, dependencies) - if err != nil { - return nil, err - } - - return &networkAttacherController{ - backend: b, - task: task, - adapter: adapter, - closed: make(chan struct{}), - }, nil -} - -func (nc *networkAttacherController) Update(ctx context.Context, t *api.Task) error { - return nil -} - -func (nc *networkAttacherController) Prepare(ctx context.Context) error { - // Make sure all the networks that the task needs are created. - return nc.adapter.createNetworks(ctx) -} - -func (nc *networkAttacherController) Start(ctx context.Context) error { - return nc.adapter.networkAttach(ctx) -} - -func (nc *networkAttacherController) Wait(pctx context.Context) error { - ctx, cancel := context.WithCancel(pctx) - defer cancel() - - return nc.adapter.waitForDetach(ctx) -} - -func (nc *networkAttacherController) Shutdown(ctx context.Context) error { - return nil -} - -func (nc *networkAttacherController) Terminate(ctx context.Context) error { - return nil -} - -func (nc *networkAttacherController) Remove(ctx context.Context) error { - // Try removing the network referenced in this task in case this - // task is the last one referencing it - return nc.adapter.removeNetworks(ctx) -} - -func (nc *networkAttacherController) Close() error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go deleted file mode 100644 index 77d21d2c1..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/container.go +++ /dev/null @@ -1,680 +0,0 @@ -package container // import "github.com/docker/docker/daemon/cluster/executor/container" - -import ( - "errors" - "fmt" - "net" - "strconv" - "strings" - "time" - - "github.com/sirupsen/logrus" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - enginecontainer "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - enginemount "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/network" - volumetypes "github.com/docker/docker/api/types/volume" - "github.com/docker/docker/daemon/cluster/convert" - executorpkg "github.com/docker/docker/daemon/cluster/executor" - clustertypes "github.com/docker/docker/daemon/cluster/provider" - "github.com/docker/go-connections/nat" - netconst "github.com/docker/libnetwork/datastore" - "github.com/docker/swarmkit/agent/exec" - "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/api/genericresource" - "github.com/docker/swarmkit/template" - gogotypes "github.com/gogo/protobuf/types" -) - -const ( - // Explicitly use the kernel's default setting for CPU quota of 100ms. - // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt - cpuQuotaPeriod = 100 * time.Millisecond - - // systemLabelPrefix represents the reserved namespace for system labels. - systemLabelPrefix = "com.docker.swarm" -) - -// containerConfig converts task properties into docker container compatible -// components. -type containerConfig struct { - task *api.Task - networksAttachments map[string]*api.NetworkAttachment -} - -// newContainerConfig returns a validated container config. No methods should -// return an error if this function returns without error. -func newContainerConfig(t *api.Task, node *api.NodeDescription) (*containerConfig, error) { - var c containerConfig - return &c, c.setTask(t, node) -} - -func (c *containerConfig) setTask(t *api.Task, node *api.NodeDescription) error { - if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil { - return exec.ErrRuntimeUnsupported - } - - container := t.Spec.GetContainer() - if container != nil { - if container.Image == "" { - return ErrImageRequired - } - - if err := validateMounts(container.Mounts); err != nil { - return err - } - } - - // index the networks by name - c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks)) - for _, attachment := range t.Networks { - c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment - } - - c.task = t - - if t.Spec.GetContainer() != nil { - preparedSpec, err := template.ExpandContainerSpec(node, t) - if err != nil { - return err - } - c.task.Spec.Runtime = &api.TaskSpec_Container{ - Container: preparedSpec, - } - } - - return nil -} - -func (c *containerConfig) networkAttachmentContainerID() string { - attachment := c.task.Spec.GetAttachment() - if attachment == nil { - return "" - } - - return attachment.ContainerID -} - -func (c *containerConfig) taskID() string { - return c.task.ID -} - -func (c *containerConfig) endpoint() *api.Endpoint { - return c.task.Endpoint -} - -func (c *containerConfig) spec() *api.ContainerSpec { - return c.task.Spec.GetContainer() -} - -func (c *containerConfig) nameOrID() string { - if c.task.Spec.GetContainer() != nil { - return c.name() - } - - return c.networkAttachmentContainerID() -} - -func (c *containerConfig) name() string { - if c.task.Annotations.Name != "" { - // if set, use the container Annotations.Name field, set in the orchestrator. - return c.task.Annotations.Name - } - - slot := fmt.Sprint(c.task.Slot) - if slot == "" || c.task.Slot == 0 { - slot = c.task.NodeID - } - - // fallback to service.slot.id. - return fmt.Sprintf("%s.%s.%s", c.task.ServiceAnnotations.Name, slot, c.task.ID) -} - -func (c *containerConfig) image() string { - raw := c.spec().Image - ref, err := reference.ParseNormalizedNamed(raw) - if err != nil { - return raw - } - return reference.FamiliarString(reference.TagNameOnly(ref)) -} - -func (c *containerConfig) portBindings() nat.PortMap { - portBindings := nat.PortMap{} - if c.task.Endpoint == nil { - return portBindings - } - - for _, portConfig := range c.task.Endpoint.Ports { - if portConfig.PublishMode != api.PublishModeHost { - continue - } - - port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String()))) - binding := []nat.PortBinding{ - {}, - } - - if portConfig.PublishedPort != 0 { - binding[0].HostPort = strconv.Itoa(int(portConfig.PublishedPort)) - } - portBindings[port] = binding - } - - return portBindings -} - -func (c *containerConfig) isolation() enginecontainer.Isolation { - return convert.IsolationFromGRPC(c.spec().Isolation) -} - -func (c *containerConfig) init() *bool { - if c.spec().Init == nil { - return nil - } - init := c.spec().Init.GetValue() - return &init -} - -func (c *containerConfig) exposedPorts() map[nat.Port]struct{} { - exposedPorts := make(map[nat.Port]struct{}) - if c.task.Endpoint == nil { - return exposedPorts - } - - for _, portConfig := range c.task.Endpoint.Ports { - if portConfig.PublishMode != api.PublishModeHost { - continue - } - - port := nat.Port(fmt.Sprintf("%d/%s", portConfig.TargetPort, strings.ToLower(portConfig.Protocol.String()))) - exposedPorts[port] = struct{}{} - } - - return exposedPorts -} - -func (c *containerConfig) config() *enginecontainer.Config { - genericEnvs := genericresource.EnvFormat(c.task.AssignedGenericResources, "DOCKER_RESOURCE") - env := append(c.spec().Env, genericEnvs...) - - config := &enginecontainer.Config{ - Labels: c.labels(), - StopSignal: c.spec().StopSignal, - Tty: c.spec().TTY, - OpenStdin: c.spec().OpenStdin, - User: c.spec().User, - Env: env, - Hostname: c.spec().Hostname, - WorkingDir: c.spec().Dir, - Image: c.image(), - ExposedPorts: c.exposedPorts(), - Healthcheck: c.healthcheck(), - } - - if len(c.spec().Command) > 0 { - // If Command is provided, we replace the whole invocation with Command - // by replacing Entrypoint and specifying Cmd. Args is ignored in this - // case. - config.Entrypoint = append(config.Entrypoint, c.spec().Command...) - config.Cmd = append(config.Cmd, c.spec().Args...) - } else if len(c.spec().Args) > 0 { - // In this case, we assume the image has an Entrypoint and Args - // specifies the arguments for that entrypoint. - config.Cmd = c.spec().Args - } - - return config -} - -func (c *containerConfig) labels() map[string]string { - var ( - system = map[string]string{ - "task": "", // mark as cluster task - "task.id": c.task.ID, - "task.name": c.name(), - "node.id": c.task.NodeID, - "service.id": c.task.ServiceID, - "service.name": c.task.ServiceAnnotations.Name, - } - labels = make(map[string]string) - ) - - // base labels are those defined in the spec. - for k, v := range c.spec().Labels { - labels[k] = v - } - - // we then apply the overrides from the task, which may be set via the - // orchestrator. - for k, v := range c.task.Annotations.Labels { - labels[k] = v - } - - // finally, we apply the system labels, which override all labels. - for k, v := range system { - labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v - } - - return labels -} - -func (c *containerConfig) mounts() []enginemount.Mount { - var r []enginemount.Mount - for _, mount := range c.spec().Mounts { - r = append(r, convertMount(mount)) - } - return r -} - -func convertMount(m api.Mount) enginemount.Mount { - mount := enginemount.Mount{ - Source: m.Source, - Target: m.Target, - ReadOnly: m.ReadOnly, - } - - switch m.Type { - case api.MountTypeBind: - mount.Type = enginemount.TypeBind - case api.MountTypeVolume: - mount.Type = enginemount.TypeVolume - case api.MountTypeTmpfs: - mount.Type = enginemount.TypeTmpfs - } - - if m.BindOptions != nil { - mount.BindOptions = &enginemount.BindOptions{} - switch m.BindOptions.Propagation { - case api.MountPropagationRPrivate: - mount.BindOptions.Propagation = enginemount.PropagationRPrivate - case api.MountPropagationPrivate: - mount.BindOptions.Propagation = enginemount.PropagationPrivate - case api.MountPropagationRSlave: - mount.BindOptions.Propagation = enginemount.PropagationRSlave - case api.MountPropagationSlave: - mount.BindOptions.Propagation = enginemount.PropagationSlave - case api.MountPropagationRShared: - mount.BindOptions.Propagation = enginemount.PropagationRShared - case api.MountPropagationShared: - mount.BindOptions.Propagation = enginemount.PropagationShared - } - } - - if m.VolumeOptions != nil { - mount.VolumeOptions = &enginemount.VolumeOptions{ - NoCopy: m.VolumeOptions.NoCopy, - } - if m.VolumeOptions.Labels != nil { - mount.VolumeOptions.Labels = make(map[string]string, len(m.VolumeOptions.Labels)) - for k, v := range m.VolumeOptions.Labels { - mount.VolumeOptions.Labels[k] = v - } - } - if m.VolumeOptions.DriverConfig != nil { - mount.VolumeOptions.DriverConfig = &enginemount.Driver{ - Name: m.VolumeOptions.DriverConfig.Name, - } - if m.VolumeOptions.DriverConfig.Options != nil { - mount.VolumeOptions.DriverConfig.Options = make(map[string]string, len(m.VolumeOptions.DriverConfig.Options)) - for k, v := range m.VolumeOptions.DriverConfig.Options { - mount.VolumeOptions.DriverConfig.Options[k] = v - } - } - } - } - - if m.TmpfsOptions != nil { - mount.TmpfsOptions = &enginemount.TmpfsOptions{ - SizeBytes: m.TmpfsOptions.SizeBytes, - Mode: m.TmpfsOptions.Mode, - } - } - - return mount -} - -func (c *containerConfig) healthcheck() *enginecontainer.HealthConfig { - hcSpec := c.spec().Healthcheck - if hcSpec == nil { - return nil - } - interval, _ := gogotypes.DurationFromProto(hcSpec.Interval) - timeout, _ := gogotypes.DurationFromProto(hcSpec.Timeout) - startPeriod, _ := gogotypes.DurationFromProto(hcSpec.StartPeriod) - return &enginecontainer.HealthConfig{ - Test: hcSpec.Test, - Interval: interval, - Timeout: timeout, - Retries: int(hcSpec.Retries), - StartPeriod: startPeriod, - } -} - -func (c *containerConfig) hostConfig() *enginecontainer.HostConfig { - hc := &enginecontainer.HostConfig{ - Resources: c.resources(), - GroupAdd: c.spec().Groups, - PortBindings: c.portBindings(), - Mounts: c.mounts(), - ReadonlyRootfs: c.spec().ReadOnly, - Isolation: c.isolation(), - Init: c.init(), - } - - if c.spec().DNSConfig != nil { - hc.DNS = c.spec().DNSConfig.Nameservers - hc.DNSSearch = c.spec().DNSConfig.Search - hc.DNSOptions = c.spec().DNSConfig.Options - } - - c.applyPrivileges(hc) - - // The format of extra hosts on swarmkit is specified in: - // http://man7.org/linux/man-pages/man5/hosts.5.html - // IP_address canonical_hostname [aliases...] - // However, the format of ExtraHosts in HostConfig is - // : - // We need to do the conversion here - // (Alias is ignored for now) - for _, entry := range c.spec().Hosts { - parts := strings.Fields(entry) - if len(parts) > 1 { - hc.ExtraHosts = append(hc.ExtraHosts, fmt.Sprintf("%s:%s", parts[1], parts[0])) - } - } - - if c.task.LogDriver != nil { - hc.LogConfig = enginecontainer.LogConfig{ - Type: c.task.LogDriver.Name, - Config: c.task.LogDriver.Options, - } - } - - if len(c.task.Networks) > 0 { - labels := c.task.Networks[0].Network.Spec.Annotations.Labels - name := c.task.Networks[0].Network.Spec.Annotations.Name - if v, ok := labels["com.docker.swarm.predefined"]; ok && v == "true" { - hc.NetworkMode = enginecontainer.NetworkMode(name) - } - } - - return hc -} - -// This handles the case of volumes that are defined inside a service Mount -func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *volumetypes.VolumeCreateBody { - var ( - driverName string - driverOpts map[string]string - labels map[string]string - ) - - if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil { - driverName = mount.VolumeOptions.DriverConfig.Name - driverOpts = mount.VolumeOptions.DriverConfig.Options - labels = mount.VolumeOptions.Labels - } - - if mount.VolumeOptions != nil { - return &volumetypes.VolumeCreateBody{ - Name: mount.Source, - Driver: driverName, - DriverOpts: driverOpts, - Labels: labels, - } - } - return nil -} - -func (c *containerConfig) resources() enginecontainer.Resources { - resources := enginecontainer.Resources{} - - // If no limits are specified let the engine use its defaults. - // - // TODO(aluzzardi): We might want to set some limits anyway otherwise - // "unlimited" tasks will step over the reservation of other tasks. - r := c.task.Spec.Resources - if r == nil || r.Limits == nil { - return resources - } - - if r.Limits.MemoryBytes > 0 { - resources.Memory = r.Limits.MemoryBytes - } - - if r.Limits.NanoCPUs > 0 { - // CPU Period must be set in microseconds. - resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond) - resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9 - } - - return resources -} - -// Docker daemon supports just 1 network during container create. -func (c *containerConfig) createNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig { - var networks []*api.NetworkAttachment - if c.task.Spec.GetContainer() != nil || c.task.Spec.GetAttachment() != nil { - networks = c.task.Networks - } - - epConfig := make(map[string]*network.EndpointSettings) - if len(networks) > 0 { - epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0], b) - } - - return &network.NetworkingConfig{EndpointsConfig: epConfig} -} - -// TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create -func (c *containerConfig) connectNetworkingConfig(b executorpkg.Backend) *network.NetworkingConfig { - var networks []*api.NetworkAttachment - if c.task.Spec.GetContainer() != nil { - networks = c.task.Networks - } - // First network is used during container create. Other networks are used in "docker network connect" - if len(networks) < 2 { - return nil - } - - epConfig := make(map[string]*network.EndpointSettings) - for _, na := range networks[1:] { - epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na, b) - } - return &network.NetworkingConfig{EndpointsConfig: epConfig} -} - -func getEndpointConfig(na *api.NetworkAttachment, b executorpkg.Backend) *network.EndpointSettings { - var ipv4, ipv6 string - for _, addr := range na.Addresses { - ip, _, err := net.ParseCIDR(addr) - if err != nil { - continue - } - - if ip.To4() != nil { - ipv4 = ip.String() - continue - } - - if ip.To16() != nil { - ipv6 = ip.String() - } - } - - n := &network.EndpointSettings{ - NetworkID: na.Network.ID, - IPAMConfig: &network.EndpointIPAMConfig{ - IPv4Address: ipv4, - IPv6Address: ipv6, - }, - DriverOpts: na.DriverAttachmentOpts, - } - if v, ok := na.Network.Spec.Annotations.Labels["com.docker.swarm.predefined"]; ok && v == "true" { - if ln, err := b.FindNetwork(na.Network.Spec.Annotations.Name); err == nil { - n.NetworkID = ln.ID() - } - } - return n -} - -func (c *containerConfig) virtualIP(networkID string) string { - if c.task.Endpoint == nil { - return "" - } - - for _, eVip := range c.task.Endpoint.VirtualIPs { - // We only support IPv4 VIPs for now. - if eVip.NetworkID == networkID { - vip, _, err := net.ParseCIDR(eVip.Addr) - if err != nil { - return "" - } - - return vip.String() - } - } - - return "" -} - -func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig { - if len(c.task.Networks) == 0 { - return nil - } - - logrus.Debugf("Creating service config in agent for t = %+v", c.task) - svcCfg := &clustertypes.ServiceConfig{ - Name: c.task.ServiceAnnotations.Name, - Aliases: make(map[string][]string), - ID: c.task.ServiceID, - VirtualAddresses: make(map[string]*clustertypes.VirtualAddress), - } - - for _, na := range c.task.Networks { - svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{ - // We support only IPv4 virtual IP for now. - IPv4: c.virtualIP(na.Network.ID), - } - if len(na.Aliases) > 0 { - svcCfg.Aliases[na.Network.ID] = na.Aliases - } - } - - if c.task.Endpoint != nil { - for _, ePort := range c.task.Endpoint.Ports { - if ePort.PublishMode != api.PublishModeIngress { - continue - } - - svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{ - Name: ePort.Name, - Protocol: int32(ePort.Protocol), - TargetPort: ePort.TargetPort, - PublishedPort: ePort.PublishedPort, - }) - } - } - - return svcCfg -} - -func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) { - na, ok := c.networksAttachments[name] - if !ok { - return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced") - } - - options := types.NetworkCreate{ - // ID: na.Network.ID, - Labels: na.Network.Spec.Annotations.Labels, - Internal: na.Network.Spec.Internal, - Attachable: na.Network.Spec.Attachable, - Ingress: convert.IsIngressNetwork(na.Network), - EnableIPv6: na.Network.Spec.Ipv6Enabled, - CheckDuplicate: true, - Scope: netconst.SwarmScope, - } - - if na.Network.Spec.GetNetwork() != "" { - options.ConfigFrom = &network.ConfigReference{ - Network: na.Network.Spec.GetNetwork(), - } - } - - if na.Network.DriverState != nil { - options.Driver = na.Network.DriverState.Name - options.Options = na.Network.DriverState.Options - } - if na.Network.IPAM != nil { - options.IPAM = &network.IPAM{ - Driver: na.Network.IPAM.Driver.Name, - Options: na.Network.IPAM.Driver.Options, - } - for _, ic := range na.Network.IPAM.Configs { - c := network.IPAMConfig{ - Subnet: ic.Subnet, - IPRange: ic.Range, - Gateway: ic.Gateway, - } - options.IPAM.Config = append(options.IPAM.Config, c) - } - } - - return clustertypes.NetworkCreateRequest{ - ID: na.Network.ID, - NetworkCreateRequest: types.NetworkCreateRequest{ - Name: name, - NetworkCreate: options, - }, - }, nil -} - -func (c *containerConfig) applyPrivileges(hc *enginecontainer.HostConfig) { - privileges := c.spec().Privileges - if privileges == nil { - return - } - - credentials := privileges.CredentialSpec - if credentials != nil { - switch credentials.Source.(type) { - case *api.Privileges_CredentialSpec_File: - hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=file://"+credentials.GetFile()) - case *api.Privileges_CredentialSpec_Registry: - hc.SecurityOpt = append(hc.SecurityOpt, "credentialspec=registry://"+credentials.GetRegistry()) - } - } - - selinux := privileges.SELinuxContext - if selinux != nil { - if selinux.Disable { - hc.SecurityOpt = append(hc.SecurityOpt, "label=disable") - } - if selinux.User != "" { - hc.SecurityOpt = append(hc.SecurityOpt, "label=user:"+selinux.User) - } - if selinux.Role != "" { - hc.SecurityOpt = append(hc.SecurityOpt, "label=role:"+selinux.Role) - } - if selinux.Level != "" { - hc.SecurityOpt = append(hc.SecurityOpt, "label=level:"+selinux.Level) - } - if selinux.Type != "" { - hc.SecurityOpt = append(hc.SecurityOpt, "label=type:"+selinux.Type) - } - } -} - -func (c containerConfig) eventFilter() filters.Args { - filter := filters.NewArgs() - filter.Add("type", events.ContainerEventType) - filter.Add("name", c.name()) - filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID)) - return filter -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go deleted file mode 100644 index bcd426e73..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/controller.go +++ /dev/null @@ -1,692 +0,0 @@ -package container // import "github.com/docker/docker/daemon/cluster/executor/container" - -import ( - "context" - "fmt" - "os" - "strconv" - "strings" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/events" - executorpkg "github.com/docker/docker/daemon/cluster/executor" - "github.com/docker/go-connections/nat" - "github.com/docker/libnetwork" - "github.com/docker/swarmkit/agent/exec" - "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/log" - gogotypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" - "golang.org/x/time/rate" -) - -const defaultGossipConvergeDelay = 2 * time.Second - -// controller implements agent.Controller against docker's API. -// -// Most operations against docker's API are done through the container name, -// which is unique to the task. -type controller struct { - task *api.Task - adapter *containerAdapter - closed chan struct{} - err error - pulled chan struct{} // closed after pull - cancelPull func() // cancels pull context if not nil - pullErr error // pull error, only read after pulled closed -} - -var _ exec.Controller = &controller{} - -// NewController returns a docker exec runner for the provided task. -func newController(b executorpkg.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*controller, error) { - adapter, err := newContainerAdapter(b, i, v, task, node, dependencies) - if err != nil { - return nil, err - } - - return &controller{ - task: task, - adapter: adapter, - closed: make(chan struct{}), - }, nil -} - -func (r *controller) Task() (*api.Task, error) { - return r.task, nil -} - -// ContainerStatus returns the container-specific status for the task. -func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) { - ctnr, err := r.adapter.inspect(ctx) - if err != nil { - if isUnknownContainer(err) { - return nil, nil - } - return nil, err - } - return parseContainerStatus(ctnr) -} - -func (r *controller) PortStatus(ctx context.Context) (*api.PortStatus, error) { - ctnr, err := r.adapter.inspect(ctx) - if err != nil { - if isUnknownContainer(err) { - return nil, nil - } - - return nil, err - } - - return parsePortStatus(ctnr) -} - -// Update tasks a recent task update and applies it to the container. -func (r *controller) Update(ctx context.Context, t *api.Task) error { - // TODO(stevvooe): While assignment of tasks is idempotent, we do allow - // updates of metadata, such as labelling, as well as any other properties - // that make sense. - return nil -} - -// Prepare creates a container and ensures the image is pulled. -// -// If the container has already be created, exec.ErrTaskPrepared is returned. -func (r *controller) Prepare(ctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - // Make sure all the networks that the task needs are created. - if err := r.adapter.createNetworks(ctx); err != nil { - return err - } - - // Make sure all the volumes that the task needs are created. - if err := r.adapter.createVolumes(ctx); err != nil { - return err - } - - if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { - if r.pulled == nil { - // Fork the pull to a different context to allow pull to continue - // on re-entrant calls to Prepare. This ensures that Prepare can be - // idempotent and not incur the extra cost of pulling when - // cancelled on updates. - var pctx context.Context - - r.pulled = make(chan struct{}) - pctx, r.cancelPull = context.WithCancel(context.Background()) // TODO(stevvooe): Bind a context to the entire controller. - - go func() { - defer close(r.pulled) - r.pullErr = r.adapter.pullImage(pctx) // protected by closing r.pulled - }() - } - - select { - case <-ctx.Done(): - return ctx.Err() - case <-r.pulled: - if r.pullErr != nil { - // NOTE(stevvooe): We always try to pull the image to make sure we have - // the most up to date version. This will return an error, but we only - // log it. If the image truly doesn't exist, the create below will - // error out. - // - // This gives us some nice behavior where we use up to date versions of - // mutable tags, but will still run if the old image is available but a - // registry is down. - // - // If you don't want this behavior, lock down your image to an - // immutable tag or digest. - log.G(ctx).WithError(r.pullErr).Error("pulling image failed") - } - } - } - if err := r.adapter.create(ctx); err != nil { - if isContainerCreateNameConflict(err) { - if _, err := r.adapter.inspect(ctx); err != nil { - return err - } - - // container is already created. success! - return exec.ErrTaskPrepared - } - - return err - } - - return nil -} - -// Start the container. An error will be returned if the container is already started. -func (r *controller) Start(ctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - ctnr, err := r.adapter.inspect(ctx) - if err != nil { - return err - } - - // Detect whether the container has *ever* been started. If so, we don't - // issue the start. - // - // TODO(stevvooe): This is very racy. While reading inspect, another could - // start the process and we could end up starting it twice. - if ctnr.State.Status != "created" { - return exec.ErrTaskStarted - } - - for { - if err := r.adapter.start(ctx); err != nil { - if _, ok := errors.Cause(err).(libnetwork.ErrNoSuchNetwork); ok { - // Retry network creation again if we - // failed because some of the networks - // were not found. - if err := r.adapter.createNetworks(ctx); err != nil { - return err - } - - continue - } - - return errors.Wrap(err, "starting container failed") - } - - break - } - - // no health check - if ctnr.Config == nil || ctnr.Config.Healthcheck == nil || len(ctnr.Config.Healthcheck.Test) == 0 || ctnr.Config.Healthcheck.Test[0] == "NONE" { - if err := r.adapter.activateServiceBinding(); err != nil { - log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s which has no healthcheck config", r.adapter.container.name()) - return err - } - return nil - } - - // wait for container to be healthy - eventq := r.adapter.events(ctx) - - var healthErr error - for { - select { - case event := <-eventq: - if !r.matchevent(event) { - continue - } - - switch event.Action { - case "die": // exit on terminal events - ctnr, err := r.adapter.inspect(ctx) - if err != nil { - return errors.Wrap(err, "die event received") - } else if ctnr.State.ExitCode != 0 { - return &exitError{code: ctnr.State.ExitCode, cause: healthErr} - } - - return nil - case "destroy": - // If we get here, something has gone wrong but we want to exit - // and report anyways. - return ErrContainerDestroyed - case "health_status: unhealthy": - // in this case, we stop the container and report unhealthy status - if err := r.Shutdown(ctx); err != nil { - return errors.Wrap(err, "unhealthy container shutdown failed") - } - // set health check error, and wait for container to fully exit ("die" event) - healthErr = ErrContainerUnhealthy - case "health_status: healthy": - if err := r.adapter.activateServiceBinding(); err != nil { - log.G(ctx).WithError(err).Errorf("failed to activate service binding for container %s after healthy event", r.adapter.container.name()) - return err - } - return nil - } - case <-ctx.Done(): - return ctx.Err() - case <-r.closed: - return r.err - } - } -} - -// Wait on the container to exit. -func (r *controller) Wait(pctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - ctx, cancel := context.WithCancel(pctx) - defer cancel() - - healthErr := make(chan error, 1) - go func() { - ectx, cancel := context.WithCancel(ctx) // cancel event context on first event - defer cancel() - if err := r.checkHealth(ectx); err == ErrContainerUnhealthy { - healthErr <- ErrContainerUnhealthy - if err := r.Shutdown(ectx); err != nil { - log.G(ectx).WithError(err).Debug("shutdown failed on unhealthy") - } - } - }() - - waitC, err := r.adapter.wait(ctx) - if err != nil { - return err - } - - if status := <-waitC; status.ExitCode() != 0 { - exitErr := &exitError{ - code: status.ExitCode(), - } - - // Set the cause if it is knowable. - select { - case e := <-healthErr: - exitErr.cause = e - default: - if status.Err() != nil { - exitErr.cause = status.Err() - } - } - - return exitErr - } - - return nil -} - -func (r *controller) hasServiceBinding() bool { - if r.task == nil { - return false - } - - // service is attached to a network besides the default bridge - for _, na := range r.task.Networks { - if na.Network == nil || - na.Network.DriverState == nil || - na.Network.DriverState.Name == "bridge" && na.Network.Spec.Annotations.Name == "bridge" { - continue - } - return true - } - - return false -} - -// Shutdown the container cleanly. -func (r *controller) Shutdown(ctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - if r.cancelPull != nil { - r.cancelPull() - } - - if r.hasServiceBinding() { - // remove container from service binding - if err := r.adapter.deactivateServiceBinding(); err != nil { - log.G(ctx).WithError(err).Warningf("failed to deactivate service binding for container %s", r.adapter.container.name()) - // Don't return an error here, because failure to deactivate - // the service binding is expected if the container was never - // started. - } - - // add a delay for gossip converge - // TODO(dongluochen): this delay should be configurable to fit different cluster size and network delay. - time.Sleep(defaultGossipConvergeDelay) - } - - if err := r.adapter.shutdown(ctx); err != nil { - if isUnknownContainer(err) || isStoppedContainer(err) { - return nil - } - - return err - } - - return nil -} - -// Terminate the container, with force. -func (r *controller) Terminate(ctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - if r.cancelPull != nil { - r.cancelPull() - } - - if err := r.adapter.terminate(ctx); err != nil { - if isUnknownContainer(err) { - return nil - } - - return err - } - - return nil -} - -// Remove the container and its resources. -func (r *controller) Remove(ctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - if r.cancelPull != nil { - r.cancelPull() - } - - // It may be necessary to shut down the task before removing it. - if err := r.Shutdown(ctx); err != nil { - if isUnknownContainer(err) { - return nil - } - // This may fail if the task was already shut down. - log.G(ctx).WithError(err).Debug("shutdown failed on removal") - } - - // Try removing networks referenced in this task in case this - // task is the last one referencing it - if err := r.adapter.removeNetworks(ctx); err != nil { - if isUnknownContainer(err) { - return nil - } - return err - } - - if err := r.adapter.remove(ctx); err != nil { - if isUnknownContainer(err) { - return nil - } - - return err - } - return nil -} - -// waitReady waits for a container to be "ready". -// Ready means it's past the started state. -func (r *controller) waitReady(pctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - ctx, cancel := context.WithCancel(pctx) - defer cancel() - - eventq := r.adapter.events(ctx) - - ctnr, err := r.adapter.inspect(ctx) - if err != nil { - if !isUnknownContainer(err) { - return errors.Wrap(err, "inspect container failed") - } - } else { - switch ctnr.State.Status { - case "running", "exited", "dead": - return nil - } - } - - for { - select { - case event := <-eventq: - if !r.matchevent(event) { - continue - } - - switch event.Action { - case "start": - return nil - } - case <-ctx.Done(): - return ctx.Err() - case <-r.closed: - return r.err - } - } -} - -func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, options api.LogSubscriptionOptions) error { - if err := r.checkClosed(); err != nil { - return err - } - - // if we're following, wait for this container to be ready. there is a - // problem here: if the container will never be ready (for example, it has - // been totally deleted) then this will wait forever. however, this doesn't - // actually cause any UI issues, and shouldn't be a problem. the stuck wait - // will go away when the follow (context) is canceled. - if options.Follow { - if err := r.waitReady(ctx); err != nil { - return errors.Wrap(err, "container not ready for logs") - } - } - // if we're not following, we're not gonna wait for the container to be - // ready. just call logs. if the container isn't ready, the call will fail - // and return an error. no big deal, we don't care, we only want the logs - // we can get RIGHT NOW with no follow - - logsContext, cancel := context.WithCancel(ctx) - msgs, err := r.adapter.logs(logsContext, options) - defer cancel() - if err != nil { - return errors.Wrap(err, "failed getting container logs") - } - - var ( - // use a rate limiter to keep things under control but also provides some - // ability coalesce messages. - limiter = rate.NewLimiter(rate.Every(time.Second), 10<<20) // 10 MB/s - msgctx = api.LogContext{ - NodeID: r.task.NodeID, - ServiceID: r.task.ServiceID, - TaskID: r.task.ID, - } - ) - - for { - msg, ok := <-msgs - if !ok { - // we're done here, no more messages - return nil - } - - if msg.Err != nil { - // the defered cancel closes the adapter's log stream - return msg.Err - } - - // wait here for the limiter to catch up - if err := limiter.WaitN(ctx, len(msg.Line)); err != nil { - return errors.Wrap(err, "failed rate limiter") - } - tsp, err := gogotypes.TimestampProto(msg.Timestamp) - if err != nil { - return errors.Wrap(err, "failed to convert timestamp") - } - var stream api.LogStream - if msg.Source == "stdout" { - stream = api.LogStreamStdout - } else if msg.Source == "stderr" { - stream = api.LogStreamStderr - } - - // parse the details out of the Attrs map - var attrs []api.LogAttr - if len(msg.Attrs) != 0 { - attrs = make([]api.LogAttr, 0, len(msg.Attrs)) - for _, attr := range msg.Attrs { - attrs = append(attrs, api.LogAttr{Key: attr.Key, Value: attr.Value}) - } - } - - if err := publisher.Publish(ctx, api.LogMessage{ - Context: msgctx, - Timestamp: tsp, - Stream: stream, - Attrs: attrs, - Data: msg.Line, - }); err != nil { - return errors.Wrap(err, "failed to publish log message") - } - } -} - -// Close the runner and clean up any ephemeral resources. -func (r *controller) Close() error { - select { - case <-r.closed: - return r.err - default: - if r.cancelPull != nil { - r.cancelPull() - } - - r.err = exec.ErrControllerClosed - close(r.closed) - } - return nil -} - -func (r *controller) matchevent(event events.Message) bool { - if event.Type != events.ContainerEventType { - return false - } - // we can't filter using id since it will have huge chances to introduce a deadlock. see #33377. - return event.Actor.Attributes["name"] == r.adapter.container.name() -} - -func (r *controller) checkClosed() error { - select { - case <-r.closed: - return r.err - default: - return nil - } -} - -func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) { - status := &api.ContainerStatus{ - ContainerID: ctnr.ID, - PID: int32(ctnr.State.Pid), - ExitCode: int32(ctnr.State.ExitCode), - } - - return status, nil -} - -func parsePortStatus(ctnr types.ContainerJSON) (*api.PortStatus, error) { - status := &api.PortStatus{} - - if ctnr.NetworkSettings != nil && len(ctnr.NetworkSettings.Ports) > 0 { - exposedPorts, err := parsePortMap(ctnr.NetworkSettings.Ports) - if err != nil { - return nil, err - } - status.Ports = exposedPorts - } - - return status, nil -} - -func parsePortMap(portMap nat.PortMap) ([]*api.PortConfig, error) { - exposedPorts := make([]*api.PortConfig, 0, len(portMap)) - - for portProtocol, mapping := range portMap { - parts := strings.SplitN(string(portProtocol), "/", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid port mapping: %s", portProtocol) - } - - port, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return nil, err - } - - protocol := api.ProtocolTCP - switch strings.ToLower(parts[1]) { - case "tcp": - protocol = api.ProtocolTCP - case "udp": - protocol = api.ProtocolUDP - case "sctp": - protocol = api.ProtocolSCTP - default: - return nil, fmt.Errorf("invalid protocol: %s", parts[1]) - } - - for _, binding := range mapping { - hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16) - if err != nil { - return nil, err - } - - // TODO(aluzzardi): We're losing the port `name` here since - // there's no way to retrieve it back from the Engine. - exposedPorts = append(exposedPorts, &api.PortConfig{ - PublishMode: api.PublishModeHost, - Protocol: protocol, - TargetPort: uint32(port), - PublishedPort: uint32(hostPort), - }) - } - } - - return exposedPorts, nil -} - -type exitError struct { - code int - cause error -} - -func (e *exitError) Error() string { - if e.cause != nil { - return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause) - } - - return fmt.Sprintf("task: non-zero exit (%v)", e.code) -} - -func (e *exitError) ExitCode() int { - return e.code -} - -func (e *exitError) Cause() error { - return e.cause -} - -// checkHealth blocks until unhealthy container is detected or ctx exits -func (r *controller) checkHealth(ctx context.Context) error { - eventq := r.adapter.events(ctx) - - for { - select { - case <-ctx.Done(): - return nil - case <-r.closed: - return nil - case event := <-eventq: - if !r.matchevent(event) { - continue - } - - switch event.Action { - case "health_status: unhealthy": - return ErrContainerUnhealthy - } - } - } -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go deleted file mode 100644 index 4c90b9e0a..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/errors.go +++ /dev/null @@ -1,17 +0,0 @@ -package container // import "github.com/docker/docker/daemon/cluster/executor/container" - -import ( - "errors" -) - -var ( - // ErrImageRequired returned if a task is missing the image definition. - ErrImageRequired = errors.New("dockerexec: image required") - - // ErrContainerDestroyed returned when a container is prematurely destroyed - // during a wait call. - ErrContainerDestroyed = errors.New("dockerexec: container destroyed") - - // ErrContainerUnhealthy returned if controller detects the health check failure - ErrContainerUnhealthy = errors.New("dockerexec: unhealthy container") -) diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go deleted file mode 100644 index 940a943e4..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/executor.go +++ /dev/null @@ -1,293 +0,0 @@ -package container // import "github.com/docker/docker/daemon/cluster/executor/container" - -import ( - "context" - "fmt" - "sort" - "strings" - "sync" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" - swarmtypes "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/daemon/cluster/controllers/plugin" - "github.com/docker/docker/daemon/cluster/convert" - executorpkg "github.com/docker/docker/daemon/cluster/executor" - clustertypes "github.com/docker/docker/daemon/cluster/provider" - networktypes "github.com/docker/libnetwork/types" - "github.com/docker/swarmkit/agent" - "github.com/docker/swarmkit/agent/exec" - "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/api/naming" - "github.com/docker/swarmkit/template" - "github.com/sirupsen/logrus" -) - -type executor struct { - backend executorpkg.Backend - imageBackend executorpkg.ImageBackend - pluginBackend plugin.Backend - volumeBackend executorpkg.VolumeBackend - dependencies exec.DependencyManager - mutex sync.Mutex // This mutex protects the following node field - node *api.NodeDescription -} - -// NewExecutor returns an executor from the docker client. -func NewExecutor(b executorpkg.Backend, p plugin.Backend, i executorpkg.ImageBackend, v executorpkg.VolumeBackend) exec.Executor { - return &executor{ - backend: b, - pluginBackend: p, - imageBackend: i, - volumeBackend: v, - dependencies: agent.NewDependencyManager(), - } -} - -// Describe returns the underlying node description from the docker client. -func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { - info, err := e.backend.SystemInfo() - if err != nil { - return nil, err - } - - plugins := map[api.PluginDescription]struct{}{} - addPlugins := func(typ string, names []string) { - for _, name := range names { - plugins[api.PluginDescription{ - Type: typ, - Name: name, - }] = struct{}{} - } - } - - // add v1 plugins - addPlugins("Volume", info.Plugins.Volume) - // Add builtin driver "overlay" (the only builtin multi-host driver) to - // the plugin list by default. - addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...)) - addPlugins("Authorization", info.Plugins.Authorization) - addPlugins("Log", info.Plugins.Log) - - // add v2 plugins - v2Plugins, err := e.backend.PluginManager().List(filters.NewArgs()) - if err == nil { - for _, plgn := range v2Plugins { - for _, typ := range plgn.Config.Interface.Types { - if typ.Prefix != "docker" || !plgn.Enabled { - continue - } - plgnTyp := typ.Capability - switch typ.Capability { - case "volumedriver": - plgnTyp = "Volume" - case "networkdriver": - plgnTyp = "Network" - case "logdriver": - plgnTyp = "Log" - } - - plugins[api.PluginDescription{ - Type: plgnTyp, - Name: plgn.Name, - }] = struct{}{} - } - } - } - - pluginFields := make([]api.PluginDescription, 0, len(plugins)) - for k := range plugins { - pluginFields = append(pluginFields, k) - } - - sort.Sort(sortedPlugins(pluginFields)) - - // parse []string labels into a map[string]string - labels := map[string]string{} - for _, l := range info.Labels { - stringSlice := strings.SplitN(l, "=", 2) - // this will take the last value in the list for a given key - // ideally, one shouldn't assign multiple values to the same key - if len(stringSlice) > 1 { - labels[stringSlice[0]] = stringSlice[1] - } - } - - description := &api.NodeDescription{ - Hostname: info.Name, - Platform: &api.Platform{ - Architecture: info.Architecture, - OS: info.OSType, - }, - Engine: &api.EngineDescription{ - EngineVersion: info.ServerVersion, - Labels: labels, - Plugins: pluginFields, - }, - Resources: &api.Resources{ - NanoCPUs: int64(info.NCPU) * 1e9, - MemoryBytes: info.MemTotal, - Generic: convert.GenericResourcesToGRPC(info.GenericResources), - }, - } - - // Save the node information in the executor field - e.mutex.Lock() - e.node = description - e.mutex.Unlock() - - return description, nil -} - -func (e *executor) Configure(ctx context.Context, node *api.Node) error { - var ingressNA *api.NetworkAttachment - attachments := make(map[string]string) - - for _, na := range node.Attachments { - if na == nil || na.Network == nil || len(na.Addresses) == 0 { - // this should not happen, but we got a panic here and don't have a - // good idea about what the underlying data structure looks like. - logrus.WithField("NetworkAttachment", fmt.Sprintf("%#v", na)). - Warnf("skipping nil or malformed node network attachment entry") - continue - } - - if na.Network.Spec.Ingress { - ingressNA = na - } - - attachments[na.Network.ID] = na.Addresses[0] - } - - if (ingressNA == nil) && (node.Attachment != nil) && (len(node.Attachment.Addresses) > 0) { - ingressNA = node.Attachment - attachments[ingressNA.Network.ID] = ingressNA.Addresses[0] - } - - if ingressNA == nil { - e.backend.ReleaseIngress() - return e.backend.GetAttachmentStore().ResetAttachments(attachments) - } - - options := types.NetworkCreate{ - Driver: ingressNA.Network.DriverState.Name, - IPAM: &network.IPAM{ - Driver: ingressNA.Network.IPAM.Driver.Name, - }, - Options: ingressNA.Network.DriverState.Options, - Ingress: true, - CheckDuplicate: true, - } - - for _, ic := range ingressNA.Network.IPAM.Configs { - c := network.IPAMConfig{ - Subnet: ic.Subnet, - IPRange: ic.Range, - Gateway: ic.Gateway, - } - options.IPAM.Config = append(options.IPAM.Config, c) - } - - _, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{ - ID: ingressNA.Network.ID, - NetworkCreateRequest: types.NetworkCreateRequest{ - Name: ingressNA.Network.Spec.Annotations.Name, - NetworkCreate: options, - }, - }, ingressNA.Addresses[0]) - if err != nil { - return err - } - - return e.backend.GetAttachmentStore().ResetAttachments(attachments) -} - -// Controller returns a docker container runner. -func (e *executor) Controller(t *api.Task) (exec.Controller, error) { - dependencyGetter := template.NewTemplatedDependencyGetter(agent.Restrict(e.dependencies, t), t, nil) - - // Get the node description from the executor field - e.mutex.Lock() - nodeDescription := e.node - e.mutex.Unlock() - - if t.Spec.GetAttachment() != nil { - return newNetworkAttacherController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter) - } - - var ctlr exec.Controller - switch r := t.Spec.GetRuntime().(type) { - case *api.TaskSpec_Generic: - logrus.WithFields(logrus.Fields{ - "kind": r.Generic.Kind, - "type_url": r.Generic.Payload.TypeUrl, - }).Debug("custom runtime requested") - runtimeKind, err := naming.Runtime(t.Spec) - if err != nil { - return ctlr, err - } - switch runtimeKind { - case string(swarmtypes.RuntimePlugin): - info, _ := e.backend.SystemInfo() - if !info.ExperimentalBuild { - return ctlr, fmt.Errorf("runtime type %q only supported in experimental", swarmtypes.RuntimePlugin) - } - c, err := plugin.NewController(e.pluginBackend, t) - if err != nil { - return ctlr, err - } - ctlr = c - default: - return ctlr, fmt.Errorf("unsupported runtime type: %q", runtimeKind) - } - case *api.TaskSpec_Container: - c, err := newController(e.backend, e.imageBackend, e.volumeBackend, t, nodeDescription, dependencyGetter) - if err != nil { - return ctlr, err - } - ctlr = c - default: - return ctlr, fmt.Errorf("unsupported runtime: %q", r) - } - - return ctlr, nil -} - -func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error { - nwKeys := []*networktypes.EncryptionKey{} - for _, key := range keys { - nwKey := &networktypes.EncryptionKey{ - Subsystem: key.Subsystem, - Algorithm: int32(key.Algorithm), - Key: make([]byte, len(key.Key)), - LamportTime: key.LamportTime, - } - copy(nwKey.Key, key.Key) - nwKeys = append(nwKeys, nwKey) - } - e.backend.SetNetworkBootstrapKeys(nwKeys) - - return nil -} - -func (e *executor) Secrets() exec.SecretsManager { - return e.dependencies.Secrets() -} - -func (e *executor) Configs() exec.ConfigsManager { - return e.dependencies.Configs() -} - -type sortedPlugins []api.PluginDescription - -func (sp sortedPlugins) Len() int { return len(sp) } - -func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] } - -func (sp sortedPlugins) Less(i, j int) bool { - if sp[i].Type != sp[j].Type { - return sp[i].Type < sp[j].Type - } - return sp[i].Name < sp[j].Name -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go b/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go deleted file mode 100644 index cbe1f53c3..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/executor/container/validate.go +++ /dev/null @@ -1,40 +0,0 @@ -package container // import "github.com/docker/docker/daemon/cluster/executor/container" - -import ( - "errors" - "fmt" - "path/filepath" - - "github.com/docker/swarmkit/api" -) - -func validateMounts(mounts []api.Mount) error { - for _, mount := range mounts { - // Target must always be absolute - if !filepath.IsAbs(mount.Target) { - return fmt.Errorf("invalid mount target, must be an absolute path: %s", mount.Target) - } - - switch mount.Type { - // The checks on abs paths are required due to the container API confusing - // volume mounts as bind mounts when the source is absolute (and vice-versa) - // See #25253 - // TODO: This is probably not necessary once #22373 is merged - case api.MountTypeBind: - if !filepath.IsAbs(mount.Source) { - return fmt.Errorf("invalid bind mount source, must be an absolute path: %s", mount.Source) - } - case api.MountTypeVolume: - if filepath.IsAbs(mount.Source) { - return fmt.Errorf("invalid volume mount source, must not be an absolute path: %s", mount.Source) - } - case api.MountTypeTmpfs: - if mount.Source != "" { - return errors.New("invalid tmpfs source, source must be empty") - } - default: - return fmt.Errorf("invalid mount type: %s", mount.Type) - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/filters.go b/vendor/github.com/docker/docker/daemon/cluster/filters.go deleted file mode 100644 index 15469f907..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/filters.go +++ /dev/null @@ -1,123 +0,0 @@ -package cluster // import "github.com/docker/docker/daemon/cluster" - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types/filters" - runconfigopts "github.com/docker/docker/runconfig/opts" - swarmapi "github.com/docker/swarmkit/api" -) - -func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) { - accepted := map[string]bool{ - "name": true, - "id": true, - "label": true, - "role": true, - "membership": true, - } - if err := filter.Validate(accepted); err != nil { - return nil, err - } - f := &swarmapi.ListNodesRequest_Filters{ - NamePrefixes: filter.Get("name"), - IDPrefixes: filter.Get("id"), - Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), - } - - for _, r := range filter.Get("role") { - if role, ok := swarmapi.NodeRole_value[strings.ToUpper(r)]; ok { - f.Roles = append(f.Roles, swarmapi.NodeRole(role)) - } else if r != "" { - return nil, fmt.Errorf("Invalid role filter: '%s'", r) - } - } - - for _, a := range filter.Get("membership") { - if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(a)]; ok { - f.Memberships = append(f.Memberships, swarmapi.NodeSpec_Membership(membership)) - } else if a != "" { - return nil, fmt.Errorf("Invalid membership filter: '%s'", a) - } - } - - return f, nil -} - -func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) error) (*swarmapi.ListTasksRequest_Filters, error) { - accepted := map[string]bool{ - "name": true, - "id": true, - "label": true, - "service": true, - "node": true, - "desired-state": true, - // UpToDate is not meant to be exposed to users. It's for - // internal use in checking create/update progress. Therefore, - // we prefix it with a '_'. - "_up-to-date": true, - "runtime": true, - } - if err := filter.Validate(accepted); err != nil { - return nil, err - } - if transformFunc != nil { - if err := transformFunc(filter); err != nil { - return nil, err - } - } - f := &swarmapi.ListTasksRequest_Filters{ - NamePrefixes: filter.Get("name"), - IDPrefixes: filter.Get("id"), - Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), - ServiceIDs: filter.Get("service"), - NodeIDs: filter.Get("node"), - UpToDate: len(filter.Get("_up-to-date")) != 0, - Runtimes: filter.Get("runtime"), - } - - for _, s := range filter.Get("desired-state") { - if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok { - f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state)) - } else if s != "" { - return nil, fmt.Errorf("Invalid desired-state filter: '%s'", s) - } - } - - return f, nil -} - -func newListSecretsFilters(filter filters.Args) (*swarmapi.ListSecretsRequest_Filters, error) { - accepted := map[string]bool{ - "names": true, - "name": true, - "id": true, - "label": true, - } - if err := filter.Validate(accepted); err != nil { - return nil, err - } - return &swarmapi.ListSecretsRequest_Filters{ - Names: filter.Get("names"), - NamePrefixes: filter.Get("name"), - IDPrefixes: filter.Get("id"), - Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), - }, nil -} - -func newListConfigsFilters(filter filters.Args) (*swarmapi.ListConfigsRequest_Filters, error) { - accepted := map[string]bool{ - "name": true, - "id": true, - "label": true, - } - if err := filter.Validate(accepted); err != nil { - return nil, err - } - return &swarmapi.ListConfigsRequest_Filters{ - NamePrefixes: filter.Get("name"), - IDPrefixes: filter.Get("id"), - Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), - }, nil -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/helpers.go b/vendor/github.com/docker/docker/daemon/cluster/helpers.go deleted file mode 100644 index 653593e1c..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/helpers.go +++ /dev/null @@ -1,246 +0,0 @@ -package cluster // import "github.com/docker/docker/daemon/cluster" - -import ( - "context" - "fmt" - - "github.com/docker/docker/errdefs" - swarmapi "github.com/docker/swarmkit/api" - "github.com/pkg/errors" -) - -func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, error) { - rl, err := c.ListClusters(ctx, &swarmapi.ListClustersRequest{}) - if err != nil { - return nil, err - } - - if len(rl.Clusters) == 0 { - return nil, errors.WithStack(errNoSwarm) - } - - // TODO: assume one cluster only - return rl.Clusters[0], nil -} - -func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Node, error) { - // GetNode to match via full ID. - if rg, err := c.GetNode(ctx, &swarmapi.GetNodeRequest{NodeID: input}); err == nil { - return rg.Node, nil - } - - // If any error (including NotFound), ListNodes to match via full name. - rl, err := c.ListNodes(ctx, &swarmapi.ListNodesRequest{ - Filters: &swarmapi.ListNodesRequest_Filters{ - Names: []string{input}, - }, - }) - if err != nil || len(rl.Nodes) == 0 { - // If any error or 0 result, ListNodes to match via ID prefix. - rl, err = c.ListNodes(ctx, &swarmapi.ListNodesRequest{ - Filters: &swarmapi.ListNodesRequest_Filters{ - IDPrefixes: []string{input}, - }, - }) - } - if err != nil { - return nil, err - } - - if len(rl.Nodes) == 0 { - err := fmt.Errorf("node %s not found", input) - return nil, errdefs.NotFound(err) - } - - if l := len(rl.Nodes); l > 1 { - return nil, errdefs.InvalidParameter(fmt.Errorf("node %s is ambiguous (%d matches found)", input, l)) - } - - return rl.Nodes[0], nil -} - -func getService(ctx context.Context, c swarmapi.ControlClient, input string, insertDefaults bool) (*swarmapi.Service, error) { - // GetService to match via full ID. - if rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input, InsertDefaults: insertDefaults}); err == nil { - return rg.Service, nil - } - - // If any error (including NotFound), ListServices to match via full name. - rl, err := c.ListServices(ctx, &swarmapi.ListServicesRequest{ - Filters: &swarmapi.ListServicesRequest_Filters{ - Names: []string{input}, - }, - }) - if err != nil || len(rl.Services) == 0 { - // If any error or 0 result, ListServices to match via ID prefix. - rl, err = c.ListServices(ctx, &swarmapi.ListServicesRequest{ - Filters: &swarmapi.ListServicesRequest_Filters{ - IDPrefixes: []string{input}, - }, - }) - } - if err != nil { - return nil, err - } - - if len(rl.Services) == 0 { - err := fmt.Errorf("service %s not found", input) - return nil, errdefs.NotFound(err) - } - - if l := len(rl.Services); l > 1 { - return nil, errdefs.InvalidParameter(fmt.Errorf("service %s is ambiguous (%d matches found)", input, l)) - } - - if !insertDefaults { - return rl.Services[0], nil - } - - rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: rl.Services[0].ID, InsertDefaults: true}) - if err == nil { - return rg.Service, nil - } - return nil, err -} - -func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Task, error) { - // GetTask to match via full ID. - if rg, err := c.GetTask(ctx, &swarmapi.GetTaskRequest{TaskID: input}); err == nil { - return rg.Task, nil - } - - // If any error (including NotFound), ListTasks to match via full name. - rl, err := c.ListTasks(ctx, &swarmapi.ListTasksRequest{ - Filters: &swarmapi.ListTasksRequest_Filters{ - Names: []string{input}, - }, - }) - if err != nil || len(rl.Tasks) == 0 { - // If any error or 0 result, ListTasks to match via ID prefix. - rl, err = c.ListTasks(ctx, &swarmapi.ListTasksRequest{ - Filters: &swarmapi.ListTasksRequest_Filters{ - IDPrefixes: []string{input}, - }, - }) - } - if err != nil { - return nil, err - } - - if len(rl.Tasks) == 0 { - err := fmt.Errorf("task %s not found", input) - return nil, errdefs.NotFound(err) - } - - if l := len(rl.Tasks); l > 1 { - return nil, errdefs.InvalidParameter(fmt.Errorf("task %s is ambiguous (%d matches found)", input, l)) - } - - return rl.Tasks[0], nil -} - -func getSecret(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Secret, error) { - // attempt to lookup secret by full ID - if rg, err := c.GetSecret(ctx, &swarmapi.GetSecretRequest{SecretID: input}); err == nil { - return rg.Secret, nil - } - - // If any error (including NotFound), ListSecrets to match via full name. - rl, err := c.ListSecrets(ctx, &swarmapi.ListSecretsRequest{ - Filters: &swarmapi.ListSecretsRequest_Filters{ - Names: []string{input}, - }, - }) - if err != nil || len(rl.Secrets) == 0 { - // If any error or 0 result, ListSecrets to match via ID prefix. - rl, err = c.ListSecrets(ctx, &swarmapi.ListSecretsRequest{ - Filters: &swarmapi.ListSecretsRequest_Filters{ - IDPrefixes: []string{input}, - }, - }) - } - if err != nil { - return nil, err - } - - if len(rl.Secrets) == 0 { - err := fmt.Errorf("secret %s not found", input) - return nil, errdefs.NotFound(err) - } - - if l := len(rl.Secrets); l > 1 { - return nil, errdefs.InvalidParameter(fmt.Errorf("secret %s is ambiguous (%d matches found)", input, l)) - } - - return rl.Secrets[0], nil -} - -func getConfig(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Config, error) { - // attempt to lookup config by full ID - if rg, err := c.GetConfig(ctx, &swarmapi.GetConfigRequest{ConfigID: input}); err == nil { - return rg.Config, nil - } - - // If any error (including NotFound), ListConfigs to match via full name. - rl, err := c.ListConfigs(ctx, &swarmapi.ListConfigsRequest{ - Filters: &swarmapi.ListConfigsRequest_Filters{ - Names: []string{input}, - }, - }) - if err != nil || len(rl.Configs) == 0 { - // If any error or 0 result, ListConfigs to match via ID prefix. - rl, err = c.ListConfigs(ctx, &swarmapi.ListConfigsRequest{ - Filters: &swarmapi.ListConfigsRequest_Filters{ - IDPrefixes: []string{input}, - }, - }) - } - if err != nil { - return nil, err - } - - if len(rl.Configs) == 0 { - err := fmt.Errorf("config %s not found", input) - return nil, errdefs.NotFound(err) - } - - if l := len(rl.Configs); l > 1 { - return nil, errdefs.InvalidParameter(fmt.Errorf("config %s is ambiguous (%d matches found)", input, l)) - } - - return rl.Configs[0], nil -} - -func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) { - // GetNetwork to match via full ID. - if rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input}); err == nil { - return rg.Network, nil - } - - // If any error (including NotFound), ListNetworks to match via ID prefix and full name. - rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{ - Filters: &swarmapi.ListNetworksRequest_Filters{ - Names: []string{input}, - }, - }) - if err != nil || len(rl.Networks) == 0 { - rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{ - Filters: &swarmapi.ListNetworksRequest_Filters{ - IDPrefixes: []string{input}, - }, - }) - } - if err != nil { - return nil, err - } - - if len(rl.Networks) == 0 { - return nil, fmt.Errorf("network %s not found", input) - } - - if l := len(rl.Networks); l > 1 { - return nil, errdefs.InvalidParameter(fmt.Errorf("network %s is ambiguous (%d matches found)", input, l)) - } - - return rl.Networks[0], nil -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go b/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go deleted file mode 100644 index e1ebfec8d..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/listen_addr.go +++ /dev/null @@ -1,301 +0,0 @@ -package cluster // import "github.com/docker/docker/daemon/cluster" - -import ( - "fmt" - "net" -) - -const ( - errNoSuchInterface configError = "no such interface" - errNoIP configError = "could not find the system's IP address" - errMustSpecifyListenAddr configError = "must specify a listening address because the address to advertise is not recognized as a system address, and a system's IP address to use could not be uniquely identified" - errBadNetworkIdentifier configError = "must specify a valid IP address or interface name" - errBadListenAddr configError = "listen address must be an IP address or network interface (with optional port number)" - errBadAdvertiseAddr configError = "advertise address must be a non-zero IP address or network interface (with optional port number)" - errBadDataPathAddr configError = "data path address must be a non-zero IP address or network interface (without a port number)" - errBadDefaultAdvertiseAddr configError = "default advertise address must be a non-zero IP address or network interface (without a port number)" -) - -func resolveListenAddr(specifiedAddr string) (string, string, error) { - specifiedHost, specifiedPort, err := net.SplitHostPort(specifiedAddr) - if err != nil { - return "", "", fmt.Errorf("could not parse listen address %s", specifiedAddr) - } - // Does the host component match any of the interface names on the - // system? If so, use the address from that interface. - specifiedIP, err := resolveInputIPAddr(specifiedHost, true) - if err != nil { - if err == errBadNetworkIdentifier { - err = errBadListenAddr - } - return "", "", err - } - - return specifiedIP.String(), specifiedPort, nil -} - -func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (string, string, error) { - // Approach: - // - If an advertise address is specified, use that. Resolve the - // interface's address if an interface was specified in - // advertiseAddr. Fill in the port from listenAddrPort if necessary. - // - If DefaultAdvertiseAddr is not empty, use that with the port from - // listenAddrPort. Resolve the interface's address from - // if an interface name was specified in DefaultAdvertiseAddr. - // - Otherwise, try to autodetect the system's address. Use the port in - // listenAddrPort with this address if autodetection succeeds. - - if advertiseAddr != "" { - advertiseHost, advertisePort, err := net.SplitHostPort(advertiseAddr) - if err != nil { - // Not a host:port specification - advertiseHost = advertiseAddr - advertisePort = listenAddrPort - } - // Does the host component match any of the interface names on the - // system? If so, use the address from that interface. - advertiseIP, err := resolveInputIPAddr(advertiseHost, false) - if err != nil { - if err == errBadNetworkIdentifier { - err = errBadAdvertiseAddr - } - return "", "", err - } - - return advertiseIP.String(), advertisePort, nil - } - - if c.config.DefaultAdvertiseAddr != "" { - // Does the default advertise address component match any of the - // interface names on the system? If so, use the address from - // that interface. - defaultAdvertiseIP, err := resolveInputIPAddr(c.config.DefaultAdvertiseAddr, false) - if err != nil { - if err == errBadNetworkIdentifier { - err = errBadDefaultAdvertiseAddr - } - return "", "", err - } - - return defaultAdvertiseIP.String(), listenAddrPort, nil - } - - systemAddr, err := c.resolveSystemAddr() - if err != nil { - return "", "", err - } - return systemAddr.String(), listenAddrPort, nil -} - -func resolveDataPathAddr(dataPathAddr string) (string, error) { - if dataPathAddr == "" { - // dataPathAddr is not defined - return "", nil - } - // If a data path flag is specified try to resolve the IP address. - dataPathIP, err := resolveInputIPAddr(dataPathAddr, false) - if err != nil { - if err == errBadNetworkIdentifier { - err = errBadDataPathAddr - } - return "", err - } - return dataPathIP.String(), nil -} - -func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) { - // Use a specific interface's IP address. - intf, err := net.InterfaceByName(specifiedInterface) - if err != nil { - return nil, errNoSuchInterface - } - - addrs, err := intf.Addrs() - if err != nil { - return nil, err - } - - var interfaceAddr4, interfaceAddr6 net.IP - - for _, addr := range addrs { - ipAddr, ok := addr.(*net.IPNet) - - if ok { - if ipAddr.IP.To4() != nil { - // IPv4 - if interfaceAddr4 != nil { - return nil, configError(fmt.Sprintf("interface %s has more than one IPv4 address (%s and %s)", specifiedInterface, interfaceAddr4, ipAddr.IP)) - } - interfaceAddr4 = ipAddr.IP - } else { - // IPv6 - if interfaceAddr6 != nil { - return nil, configError(fmt.Sprintf("interface %s has more than one IPv6 address (%s and %s)", specifiedInterface, interfaceAddr6, ipAddr.IP)) - } - interfaceAddr6 = ipAddr.IP - } - } - } - - if interfaceAddr4 == nil && interfaceAddr6 == nil { - return nil, configError(fmt.Sprintf("interface %s has no usable IPv4 or IPv6 address", specifiedInterface)) - } - - // In the case that there's exactly one IPv4 address - // and exactly one IPv6 address, favor IPv4 over IPv6. - if interfaceAddr4 != nil { - return interfaceAddr4, nil - } - return interfaceAddr6, nil -} - -// resolveInputIPAddr tries to resolve the IP address from the string passed as input -// - tries to match the string as an interface name, if so returns the IP address associated with it -// - on failure of previous step tries to parse the string as an IP address itself -// if succeeds returns the IP address -func resolveInputIPAddr(input string, isUnspecifiedValid bool) (net.IP, error) { - // Try to see if it is an interface name - interfaceAddr, err := resolveInterfaceAddr(input) - if err == nil { - return interfaceAddr, nil - } - // String matched interface but there is a potential ambiguity to be resolved - if err != errNoSuchInterface { - return nil, err - } - - // String is not an interface check if it is a valid IP - if ip := net.ParseIP(input); ip != nil && (isUnspecifiedValid || !ip.IsUnspecified()) { - return ip, nil - } - - // Not valid IP found - return nil, errBadNetworkIdentifier -} - -func (c *Cluster) resolveSystemAddrViaSubnetCheck() (net.IP, error) { - // Use the system's only IP address, or fail if there are - // multiple addresses to choose from. Skip interfaces which - // are managed by docker via subnet check. - interfaces, err := net.Interfaces() - if err != nil { - return nil, err - } - - var systemAddr net.IP - var systemInterface string - - // List Docker-managed subnets - v4Subnets, v6Subnets := c.config.NetworkSubnetsProvider.Subnets() - -ifaceLoop: - for _, intf := range interfaces { - // Skip inactive interfaces and loopback interfaces - if (intf.Flags&net.FlagUp == 0) || (intf.Flags&net.FlagLoopback) != 0 { - continue - } - - addrs, err := intf.Addrs() - if err != nil { - continue - } - - var interfaceAddr4, interfaceAddr6 net.IP - - for _, addr := range addrs { - ipAddr, ok := addr.(*net.IPNet) - - // Skip loopback and link-local addresses - if !ok || !ipAddr.IP.IsGlobalUnicast() { - continue - } - - if ipAddr.IP.To4() != nil { - // IPv4 - - // Ignore addresses in subnets that are managed by Docker. - for _, subnet := range v4Subnets { - if subnet.Contains(ipAddr.IP) { - continue ifaceLoop - } - } - - if interfaceAddr4 != nil { - return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr4, ipAddr.IP) - } - - interfaceAddr4 = ipAddr.IP - } else { - // IPv6 - - // Ignore addresses in subnets that are managed by Docker. - for _, subnet := range v6Subnets { - if subnet.Contains(ipAddr.IP) { - continue ifaceLoop - } - } - - if interfaceAddr6 != nil { - return nil, errMultipleIPs(intf.Name, intf.Name, interfaceAddr6, ipAddr.IP) - } - - interfaceAddr6 = ipAddr.IP - } - } - - // In the case that this interface has exactly one IPv4 address - // and exactly one IPv6 address, favor IPv4 over IPv6. - if interfaceAddr4 != nil { - if systemAddr != nil { - return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr4) - } - systemAddr = interfaceAddr4 - systemInterface = intf.Name - } else if interfaceAddr6 != nil { - if systemAddr != nil { - return nil, errMultipleIPs(systemInterface, intf.Name, systemAddr, interfaceAddr6) - } - systemAddr = interfaceAddr6 - systemInterface = intf.Name - } - } - - if systemAddr == nil { - return nil, errNoIP - } - - return systemAddr, nil -} - -func listSystemIPs() []net.IP { - interfaces, err := net.Interfaces() - if err != nil { - return nil - } - - var systemAddrs []net.IP - - for _, intf := range interfaces { - addrs, err := intf.Addrs() - if err != nil { - continue - } - - for _, addr := range addrs { - ipAddr, ok := addr.(*net.IPNet) - - if ok { - systemAddrs = append(systemAddrs, ipAddr.IP) - } - } - } - - return systemAddrs -} - -func errMultipleIPs(interfaceA, interfaceB string, addrA, addrB net.IP) error { - if interfaceA == interfaceB { - return configError(fmt.Sprintf("could not choose an IP address to advertise since this system has multiple addresses on interface %s (%s and %s)", interfaceA, addrA, addrB)) - } - return configError(fmt.Sprintf("could not choose an IP address to advertise since this system has multiple addresses on different interfaces (%s on %s and %s on %s)", addrA, interfaceA, addrB, interfaceB)) -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go deleted file mode 100644 index 62e4f61a6..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_linux.go +++ /dev/null @@ -1,89 +0,0 @@ -package cluster // import "github.com/docker/docker/daemon/cluster" - -import ( - "net" - - "github.com/vishvananda/netlink" -) - -func (c *Cluster) resolveSystemAddr() (net.IP, error) { - // Use the system's only device IP address, or fail if there are - // multiple addresses to choose from. - interfaces, err := netlink.LinkList() - if err != nil { - return nil, err - } - - var ( - systemAddr net.IP - systemInterface string - deviceFound bool - ) - - for _, intf := range interfaces { - // Skip non device or inactive interfaces - if intf.Type() != "device" || intf.Attrs().Flags&net.FlagUp == 0 { - continue - } - - addrs, err := netlink.AddrList(intf, netlink.FAMILY_ALL) - if err != nil { - continue - } - - var interfaceAddr4, interfaceAddr6 net.IP - - for _, addr := range addrs { - ipAddr := addr.IPNet.IP - - // Skip loopback and link-local addresses - if !ipAddr.IsGlobalUnicast() { - continue - } - - // At least one non-loopback device is found and it is administratively up - deviceFound = true - - if ipAddr.To4() != nil { - if interfaceAddr4 != nil { - return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr4, ipAddr) - } - interfaceAddr4 = ipAddr - } else { - if interfaceAddr6 != nil { - return nil, errMultipleIPs(intf.Attrs().Name, intf.Attrs().Name, interfaceAddr6, ipAddr) - } - interfaceAddr6 = ipAddr - } - } - - // In the case that this interface has exactly one IPv4 address - // and exactly one IPv6 address, favor IPv4 over IPv6. - if interfaceAddr4 != nil { - if systemAddr != nil { - return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr4) - } - systemAddr = interfaceAddr4 - systemInterface = intf.Attrs().Name - } else if interfaceAddr6 != nil { - if systemAddr != nil { - return nil, errMultipleIPs(systemInterface, intf.Attrs().Name, systemAddr, interfaceAddr6) - } - systemAddr = interfaceAddr6 - systemInterface = intf.Attrs().Name - } - } - - if systemAddr == nil { - if !deviceFound { - // If no non-loopback device type interface is found, - // fall back to the regular auto-detection mechanism. - // This is to cover the case where docker is running - // inside a container (eths are in fact veths). - return c.resolveSystemAddrViaSubnetCheck() - } - return nil, errNoIP - } - - return systemAddr, nil -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go b/vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go deleted file mode 100644 index fe75848e5..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/listen_addr_others.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !linux - -package cluster // import "github.com/docker/docker/daemon/cluster" - -import "net" - -func (c *Cluster) resolveSystemAddr() (net.IP, error) { - return c.resolveSystemAddrViaSubnetCheck() -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/networks.go b/vendor/github.com/docker/docker/daemon/cluster/networks.go deleted file mode 100644 index b8e31baa1..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/networks.go +++ /dev/null @@ -1,316 +0,0 @@ -package cluster // import "github.com/docker/docker/daemon/cluster" - -import ( - "context" - "fmt" - - apitypes "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" - types "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/daemon/cluster/convert" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/runconfig" - swarmapi "github.com/docker/swarmkit/api" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// GetNetworks returns all current cluster managed networks. -func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) { - list, err := c.getNetworks(nil) - if err != nil { - return nil, err - } - removePredefinedNetworks(&list) - return list, nil -} - -func removePredefinedNetworks(networks *[]apitypes.NetworkResource) { - if networks == nil { - return - } - var idxs []int - for i, n := range *networks { - if v, ok := n.Labels["com.docker.swarm.predefined"]; ok && v == "true" { - idxs = append(idxs, i) - } - } - for i, idx := range idxs { - idx -= i - *networks = append((*networks)[:idx], (*networks)[idx+1:]...) - } -} - -func (c *Cluster) getNetworks(filters *swarmapi.ListNetworksRequest_Filters) ([]apitypes.NetworkResource, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - state := c.currentNodeState() - if !state.IsActiveManager() { - return nil, c.errNoManager(state) - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - r, err := state.controlClient.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: filters}) - if err != nil { - return nil, err - } - - networks := make([]apitypes.NetworkResource, 0, len(r.Networks)) - - for _, network := range r.Networks { - networks = append(networks, convert.BasicNetworkFromGRPC(*network)) - } - - return networks, nil -} - -// GetNetwork returns a cluster network by an ID. -func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) { - var network *swarmapi.Network - - if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - n, err := getNetwork(ctx, state.controlClient, input) - if err != nil { - return err - } - network = n - return nil - }); err != nil { - return apitypes.NetworkResource{}, err - } - return convert.BasicNetworkFromGRPC(*network), nil -} - -// GetNetworksByName returns cluster managed networks by name. -// It is ok to have multiple networks here. #18864 -func (c *Cluster) GetNetworksByName(name string) ([]apitypes.NetworkResource, error) { - // Note that swarmapi.GetNetworkRequest.Name is not functional. - // So we cannot just use that with c.GetNetwork. - return c.getNetworks(&swarmapi.ListNetworksRequest_Filters{ - Names: []string{name}, - }) -} - -func attacherKey(target, containerID string) string { - return containerID + ":" + target -} - -// UpdateAttachment signals the attachment config to the attachment -// waiter who is trying to start or attach the container to the -// network. -func (c *Cluster) UpdateAttachment(target, containerID string, config *network.NetworkingConfig) error { - c.mu.Lock() - attacher, ok := c.attachers[attacherKey(target, containerID)] - if !ok || attacher == nil { - c.mu.Unlock() - return fmt.Errorf("could not find attacher for container %s to network %s", containerID, target) - } - if attacher.inProgress { - logrus.Debugf("Discarding redundant notice of resource allocation on network %s for task id %s", target, attacher.taskID) - c.mu.Unlock() - return nil - } - attacher.inProgress = true - c.mu.Unlock() - - attacher.attachWaitCh <- config - - return nil -} - -// WaitForDetachment waits for the container to stop or detach from -// the network. -func (c *Cluster) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { - c.mu.RLock() - attacher, ok := c.attachers[attacherKey(networkName, containerID)] - if !ok { - attacher, ok = c.attachers[attacherKey(networkID, containerID)] - } - state := c.currentNodeState() - if state.swarmNode == nil || state.swarmNode.Agent() == nil { - c.mu.RUnlock() - return errors.New("invalid cluster node while waiting for detachment") - } - - c.mu.RUnlock() - agent := state.swarmNode.Agent() - if ok && attacher != nil && - attacher.detachWaitCh != nil && - attacher.attachCompleteCh != nil { - // Attachment may be in progress still so wait for - // attachment to complete. - select { - case <-attacher.attachCompleteCh: - case <-ctx.Done(): - return ctx.Err() - } - - if attacher.taskID == taskID { - select { - case <-attacher.detachWaitCh: - case <-ctx.Done(): - return ctx.Err() - } - } - } - - return agent.ResourceAllocator().DetachNetwork(ctx, taskID) -} - -// AttachNetwork generates an attachment request towards the manager. -func (c *Cluster) AttachNetwork(target string, containerID string, addresses []string) (*network.NetworkingConfig, error) { - aKey := attacherKey(target, containerID) - c.mu.Lock() - state := c.currentNodeState() - if state.swarmNode == nil || state.swarmNode.Agent() == nil { - c.mu.Unlock() - return nil, errors.New("invalid cluster node while attaching to network") - } - if attacher, ok := c.attachers[aKey]; ok { - c.mu.Unlock() - return attacher.config, nil - } - - agent := state.swarmNode.Agent() - attachWaitCh := make(chan *network.NetworkingConfig) - detachWaitCh := make(chan struct{}) - attachCompleteCh := make(chan struct{}) - c.attachers[aKey] = &attacher{ - attachWaitCh: attachWaitCh, - attachCompleteCh: attachCompleteCh, - detachWaitCh: detachWaitCh, - } - c.mu.Unlock() - - ctx, cancel := c.getRequestContext() - defer cancel() - - taskID, err := agent.ResourceAllocator().AttachNetwork(ctx, containerID, target, addresses) - if err != nil { - c.mu.Lock() - delete(c.attachers, aKey) - c.mu.Unlock() - return nil, fmt.Errorf("Could not attach to network %s: %v", target, err) - } - - c.mu.Lock() - c.attachers[aKey].taskID = taskID - close(attachCompleteCh) - c.mu.Unlock() - - logrus.Debugf("Successfully attached to network %s with task id %s", target, taskID) - - release := func() { - ctx, cancel := c.getRequestContext() - defer cancel() - if err := agent.ResourceAllocator().DetachNetwork(ctx, taskID); err != nil { - logrus.Errorf("Failed remove network attachment %s to network %s on allocation failure: %v", - taskID, target, err) - } - } - - var config *network.NetworkingConfig - select { - case config = <-attachWaitCh: - case <-ctx.Done(): - release() - return nil, fmt.Errorf("attaching to network failed, make sure your network options are correct and check manager logs: %v", ctx.Err()) - } - - c.mu.Lock() - c.attachers[aKey].config = config - c.mu.Unlock() - - logrus.Debugf("Successfully allocated resources on network %s for task id %s", target, taskID) - - return config, nil -} - -// DetachNetwork unblocks the waiters waiting on WaitForDetachment so -// that a request to detach can be generated towards the manager. -func (c *Cluster) DetachNetwork(target string, containerID string) error { - aKey := attacherKey(target, containerID) - - c.mu.Lock() - attacher, ok := c.attachers[aKey] - delete(c.attachers, aKey) - c.mu.Unlock() - - if !ok { - return fmt.Errorf("could not find network attachment for container %s to network %s", containerID, target) - } - - close(attacher.detachWaitCh) - return nil -} - -// CreateNetwork creates a new cluster managed network. -func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) { - if runconfig.IsPreDefinedNetwork(s.Name) { - err := notAllowedError(fmt.Sprintf("%s is a pre-defined network and cannot be created", s.Name)) - return "", errors.WithStack(err) - } - - var resp *swarmapi.CreateNetworkResponse - if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - networkSpec := convert.BasicNetworkCreateToGRPC(s) - r, err := state.controlClient.CreateNetwork(ctx, &swarmapi.CreateNetworkRequest{Spec: &networkSpec}) - if err != nil { - return err - } - resp = r - return nil - }); err != nil { - return "", err - } - - return resp.Network.ID, nil -} - -// RemoveNetwork removes a cluster network. -func (c *Cluster) RemoveNetwork(input string) error { - return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - network, err := getNetwork(ctx, state.controlClient, input) - if err != nil { - return err - } - - _, err = state.controlClient.RemoveNetwork(ctx, &swarmapi.RemoveNetworkRequest{NetworkID: network.ID}) - return err - }) -} - -func (c *Cluster) populateNetworkID(ctx context.Context, client swarmapi.ControlClient, s *types.ServiceSpec) error { - // Always prefer NetworkAttachmentConfigs from TaskTemplate - // but fallback to service spec for backward compatibility - networks := s.TaskTemplate.Networks - if len(networks) == 0 { - networks = s.Networks - } - for i, n := range networks { - apiNetwork, err := getNetwork(ctx, client, n.Target) - if err != nil { - ln, _ := c.config.Backend.FindNetwork(n.Target) - if ln != nil && runconfig.IsPreDefinedNetwork(ln.Name()) { - // Need to retrieve the corresponding predefined swarm network - // and use its id for the request. - apiNetwork, err = getNetwork(ctx, client, ln.Name()) - if err != nil { - return errors.Wrap(errdefs.NotFound(err), "could not find the corresponding predefined swarm network") - } - goto setid - } - if ln != nil && !ln.Info().Dynamic() { - errMsg := fmt.Sprintf("The network %s cannot be used with services. Only networks scoped to the swarm can be used, such as those created with the overlay driver.", ln.Name()) - return errors.WithStack(notAllowedError(errMsg)) - } - return err - } - setid: - networks[i].Target = apiNetwork.ID - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/noderunner.go b/vendor/github.com/docker/docker/daemon/cluster/noderunner.go deleted file mode 100644 index 87e65aaea..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/noderunner.go +++ /dev/null @@ -1,388 +0,0 @@ -package cluster // import "github.com/docker/docker/daemon/cluster" - -import ( - "context" - "fmt" - "path/filepath" - "runtime" - "strings" - "sync" - "time" - - types "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/daemon/cluster/executor/container" - lncluster "github.com/docker/libnetwork/cluster" - swarmapi "github.com/docker/swarmkit/api" - swarmnode "github.com/docker/swarmkit/node" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// nodeRunner implements a manager for continuously running swarmkit node, restarting them with backoff delays if needed. -type nodeRunner struct { - nodeState - mu sync.RWMutex - done chan struct{} // closed when swarmNode exits - ready chan struct{} // closed when swarmNode becomes active - reconnectDelay time.Duration - config nodeStartConfig - - repeatedRun bool - cancelReconnect func() - stopping bool - cluster *Cluster // only for accessing config helpers, never call any methods. TODO: change to config struct -} - -// nodeStartConfig holds configuration needed to start a new node. Exported -// fields of this structure are saved to disk in json. Unexported fields -// contain data that shouldn't be persisted between daemon reloads. -type nodeStartConfig struct { - // LocalAddr is this machine's local IP or hostname, if specified. - LocalAddr string - // RemoteAddr is the address that was given to "swarm join". It is used - // to find LocalAddr if necessary. - RemoteAddr string - // ListenAddr is the address we bind to, including a port. - ListenAddr string - // AdvertiseAddr is the address other nodes should connect to, - // including a port. - AdvertiseAddr string - // DataPathAddr is the address that has to be used for the data path - DataPathAddr string - // JoinInProgress is set to true if a join operation has started, but - // not completed yet. - JoinInProgress bool - - joinAddr string - forceNewCluster bool - joinToken string - lockKey []byte - autolock bool - availability types.NodeAvailability -} - -func (n *nodeRunner) Ready() chan error { - c := make(chan error, 1) - n.mu.RLock() - ready, done := n.ready, n.done - n.mu.RUnlock() - go func() { - select { - case <-ready: - case <-done: - } - select { - case <-ready: - default: - n.mu.RLock() - c <- n.err - n.mu.RUnlock() - } - close(c) - }() - return c -} - -func (n *nodeRunner) Start(conf nodeStartConfig) error { - n.mu.Lock() - defer n.mu.Unlock() - - n.reconnectDelay = initialReconnectDelay - - return n.start(conf) -} - -func (n *nodeRunner) start(conf nodeStartConfig) error { - var control string - if runtime.GOOS == "windows" { - control = `\\.\pipe\` + controlSocket - } else { - control = filepath.Join(n.cluster.runtimeRoot, controlSocket) - } - - joinAddr := conf.joinAddr - if joinAddr == "" && conf.JoinInProgress { - // We must have been restarted while trying to join a cluster. - // Continue trying to join instead of forming our own cluster. - joinAddr = conf.RemoteAddr - } - - // Hostname is not set here. Instead, it is obtained from - // the node description that is reported periodically - swarmnodeConfig := swarmnode.Config{ - ForceNewCluster: conf.forceNewCluster, - ListenControlAPI: control, - ListenRemoteAPI: conf.ListenAddr, - AdvertiseRemoteAPI: conf.AdvertiseAddr, - JoinAddr: joinAddr, - StateDir: n.cluster.root, - JoinToken: conf.joinToken, - Executor: container.NewExecutor( - n.cluster.config.Backend, - n.cluster.config.PluginBackend, - n.cluster.config.ImageBackend, - n.cluster.config.VolumeBackend, - ), - HeartbeatTick: n.cluster.config.RaftHeartbeatTick, - // Recommended value in etcd/raft is 10 x (HeartbeatTick). - // Lower values were seen to have caused instability because of - // frequent leader elections when running on flakey networks. - ElectionTick: n.cluster.config.RaftElectionTick, - UnlockKey: conf.lockKey, - AutoLockManagers: conf.autolock, - PluginGetter: n.cluster.config.Backend.PluginGetter(), - } - if conf.availability != "" { - avail, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(conf.availability))] - if !ok { - return fmt.Errorf("invalid Availability: %q", conf.availability) - } - swarmnodeConfig.Availability = swarmapi.NodeSpec_Availability(avail) - } - node, err := swarmnode.New(&swarmnodeConfig) - if err != nil { - return err - } - if err := node.Start(context.Background()); err != nil { - return err - } - - n.done = make(chan struct{}) - n.ready = make(chan struct{}) - n.swarmNode = node - if conf.joinAddr != "" { - conf.JoinInProgress = true - } - n.config = conf - savePersistentState(n.cluster.root, conf) - - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - n.handleNodeExit(node) - cancel() - }() - - go n.handleReadyEvent(ctx, node, n.ready) - go n.handleControlSocketChange(ctx, node) - - return nil -} - -func (n *nodeRunner) handleControlSocketChange(ctx context.Context, node *swarmnode.Node) { - for conn := range node.ListenControlSocket(ctx) { - n.mu.Lock() - if n.grpcConn != conn { - if conn == nil { - n.controlClient = nil - n.logsClient = nil - } else { - n.controlClient = swarmapi.NewControlClient(conn) - n.logsClient = swarmapi.NewLogsClient(conn) - // push store changes to daemon - go n.watchClusterEvents(ctx, conn) - } - } - n.grpcConn = conn - n.mu.Unlock() - n.cluster.SendClusterEvent(lncluster.EventSocketChange) - } -} - -func (n *nodeRunner) watchClusterEvents(ctx context.Context, conn *grpc.ClientConn) { - client := swarmapi.NewWatchClient(conn) - watch, err := client.Watch(ctx, &swarmapi.WatchRequest{ - Entries: []*swarmapi.WatchRequest_WatchEntry{ - { - Kind: "node", - Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, - }, - { - Kind: "service", - Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, - }, - { - Kind: "network", - Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, - }, - { - Kind: "secret", - Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, - }, - { - Kind: "config", - Action: swarmapi.WatchActionKindCreate | swarmapi.WatchActionKindUpdate | swarmapi.WatchActionKindRemove, - }, - }, - IncludeOldObject: true, - }) - if err != nil { - logrus.WithError(err).Error("failed to watch cluster store") - return - } - for { - msg, err := watch.Recv() - if err != nil { - // store watch is broken - errStatus, ok := status.FromError(err) - if !ok || errStatus.Code() != codes.Canceled { - logrus.WithError(err).Error("failed to receive changes from store watch API") - } - return - } - select { - case <-ctx.Done(): - return - case n.cluster.watchStream <- msg: - } - } -} - -func (n *nodeRunner) handleReadyEvent(ctx context.Context, node *swarmnode.Node, ready chan struct{}) { - select { - case <-node.Ready(): - n.mu.Lock() - n.err = nil - if n.config.JoinInProgress { - n.config.JoinInProgress = false - savePersistentState(n.cluster.root, n.config) - } - n.mu.Unlock() - close(ready) - case <-ctx.Done(): - } - n.cluster.SendClusterEvent(lncluster.EventNodeReady) -} - -func (n *nodeRunner) handleNodeExit(node *swarmnode.Node) { - err := detectLockedError(node.Err(context.Background())) - if err != nil { - logrus.Errorf("cluster exited with error: %v", err) - } - n.mu.Lock() - n.swarmNode = nil - n.err = err - close(n.done) - select { - case <-n.ready: - n.enableReconnectWatcher() - default: - if n.repeatedRun { - n.enableReconnectWatcher() - } - } - n.repeatedRun = true - n.mu.Unlock() -} - -// Stop stops the current swarm node if it is running. -func (n *nodeRunner) Stop() error { - n.mu.Lock() - if n.cancelReconnect != nil { // between restarts - n.cancelReconnect() - n.cancelReconnect = nil - } - if n.swarmNode == nil { - n.mu.Unlock() - return nil - } - n.stopping = true - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - defer cancel() - n.mu.Unlock() - if err := n.swarmNode.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") { - return err - } - n.cluster.SendClusterEvent(lncluster.EventNodeLeave) - <-n.done - return nil -} - -func (n *nodeRunner) State() nodeState { - if n == nil { - return nodeState{status: types.LocalNodeStateInactive} - } - n.mu.RLock() - defer n.mu.RUnlock() - - ns := n.nodeState - - if ns.err != nil || n.cancelReconnect != nil { - if errors.Cause(ns.err) == errSwarmLocked { - ns.status = types.LocalNodeStateLocked - } else { - ns.status = types.LocalNodeStateError - } - } else { - select { - case <-n.ready: - ns.status = types.LocalNodeStateActive - default: - ns.status = types.LocalNodeStatePending - } - } - - return ns -} - -func (n *nodeRunner) enableReconnectWatcher() { - if n.stopping { - return - } - n.reconnectDelay *= 2 - if n.reconnectDelay > maxReconnectDelay { - n.reconnectDelay = maxReconnectDelay - } - logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds()) - delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay) - n.cancelReconnect = cancel - - go func() { - <-delayCtx.Done() - if delayCtx.Err() != context.DeadlineExceeded { - return - } - n.mu.Lock() - defer n.mu.Unlock() - if n.stopping { - return - } - - if err := n.start(n.config); err != nil { - n.err = err - } - }() -} - -// nodeState represents information about the current state of the cluster and -// provides access to the grpc clients. -type nodeState struct { - swarmNode *swarmnode.Node - grpcConn *grpc.ClientConn - controlClient swarmapi.ControlClient - logsClient swarmapi.LogsClient - status types.LocalNodeState - actualLocalAddr string - err error -} - -// IsActiveManager returns true if node is a manager ready to accept control requests. It is safe to access the client properties if this returns true. -func (ns nodeState) IsActiveManager() bool { - return ns.controlClient != nil -} - -// IsManager returns true if node is a manager. -func (ns nodeState) IsManager() bool { - return ns.swarmNode != nil && ns.swarmNode.Manager() != nil -} - -// NodeID returns node's ID or empty string if node is inactive. -func (ns nodeState) NodeID() string { - if ns.swarmNode != nil { - return ns.swarmNode.NodeID() - } - return "" -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/nodes.go b/vendor/github.com/docker/docker/daemon/cluster/nodes.go deleted file mode 100644 index 3c073b0ba..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/nodes.go +++ /dev/null @@ -1,105 +0,0 @@ -package cluster // import "github.com/docker/docker/daemon/cluster" - -import ( - "context" - - apitypes "github.com/docker/docker/api/types" - types "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/daemon/cluster/convert" - "github.com/docker/docker/errdefs" - swarmapi "github.com/docker/swarmkit/api" -) - -// GetNodes returns a list of all nodes known to a cluster. -func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - state := c.currentNodeState() - if !state.IsActiveManager() { - return nil, c.errNoManager(state) - } - - filters, err := newListNodesFilters(options.Filters) - if err != nil { - return nil, err - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - r, err := state.controlClient.ListNodes( - ctx, - &swarmapi.ListNodesRequest{Filters: filters}) - if err != nil { - return nil, err - } - - nodes := make([]types.Node, 0, len(r.Nodes)) - - for _, node := range r.Nodes { - nodes = append(nodes, convert.NodeFromGRPC(*node)) - } - return nodes, nil -} - -// GetNode returns a node based on an ID. -func (c *Cluster) GetNode(input string) (types.Node, error) { - var node *swarmapi.Node - - if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - n, err := getNode(ctx, state.controlClient, input) - if err != nil { - return err - } - node = n - return nil - }); err != nil { - return types.Node{}, err - } - - return convert.NodeFromGRPC(*node), nil -} - -// UpdateNode updates existing nodes properties. -func (c *Cluster) UpdateNode(input string, version uint64, spec types.NodeSpec) error { - return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - nodeSpec, err := convert.NodeSpecToGRPC(spec) - if err != nil { - return errdefs.InvalidParameter(err) - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - currentNode, err := getNode(ctx, state.controlClient, input) - if err != nil { - return err - } - - _, err = state.controlClient.UpdateNode( - ctx, - &swarmapi.UpdateNodeRequest{ - NodeID: currentNode.ID, - Spec: &nodeSpec, - NodeVersion: &swarmapi.Version{ - Index: version, - }, - }, - ) - return err - }) -} - -// RemoveNode removes a node from a cluster -func (c *Cluster) RemoveNode(input string, force bool) error { - return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - node, err := getNode(ctx, state.controlClient, input) - if err != nil { - return err - } - - _, err = state.controlClient.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID, Force: force}) - return err - }) -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/provider/network.go b/vendor/github.com/docker/docker/daemon/cluster/provider/network.go deleted file mode 100644 index 533baa0e1..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/provider/network.go +++ /dev/null @@ -1,37 +0,0 @@ -package provider // import "github.com/docker/docker/daemon/cluster/provider" - -import "github.com/docker/docker/api/types" - -// NetworkCreateRequest is a request when creating a network. -type NetworkCreateRequest struct { - ID string - types.NetworkCreateRequest -} - -// NetworkCreateResponse is a response when creating a network. -type NetworkCreateResponse struct { - ID string `json:"Id"` -} - -// VirtualAddress represents a virtual address. -type VirtualAddress struct { - IPv4 string - IPv6 string -} - -// PortConfig represents a port configuration. -type PortConfig struct { - Name string - Protocol int32 - TargetPort uint32 - PublishedPort uint32 -} - -// ServiceConfig represents a service configuration. -type ServiceConfig struct { - ID string - Name string - Aliases map[string][]string - VirtualAddresses map[string]*VirtualAddress - ExposedPorts []*PortConfig -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/secrets.go b/vendor/github.com/docker/docker/daemon/cluster/secrets.go deleted file mode 100644 index c6fd84208..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/secrets.go +++ /dev/null @@ -1,118 +0,0 @@ -package cluster // import "github.com/docker/docker/daemon/cluster" - -import ( - "context" - - apitypes "github.com/docker/docker/api/types" - types "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/daemon/cluster/convert" - swarmapi "github.com/docker/swarmkit/api" -) - -// GetSecret returns a secret from a managed swarm cluster -func (c *Cluster) GetSecret(input string) (types.Secret, error) { - var secret *swarmapi.Secret - - if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - s, err := getSecret(ctx, state.controlClient, input) - if err != nil { - return err - } - secret = s - return nil - }); err != nil { - return types.Secret{}, err - } - return convert.SecretFromGRPC(secret), nil -} - -// GetSecrets returns all secrets of a managed swarm cluster. -func (c *Cluster) GetSecrets(options apitypes.SecretListOptions) ([]types.Secret, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - state := c.currentNodeState() - if !state.IsActiveManager() { - return nil, c.errNoManager(state) - } - - filters, err := newListSecretsFilters(options.Filters) - if err != nil { - return nil, err - } - ctx, cancel := c.getRequestContext() - defer cancel() - - r, err := state.controlClient.ListSecrets(ctx, - &swarmapi.ListSecretsRequest{Filters: filters}) - if err != nil { - return nil, err - } - - secrets := make([]types.Secret, 0, len(r.Secrets)) - - for _, secret := range r.Secrets { - secrets = append(secrets, convert.SecretFromGRPC(secret)) - } - - return secrets, nil -} - -// CreateSecret creates a new secret in a managed swarm cluster. -func (c *Cluster) CreateSecret(s types.SecretSpec) (string, error) { - var resp *swarmapi.CreateSecretResponse - if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - secretSpec := convert.SecretSpecToGRPC(s) - - r, err := state.controlClient.CreateSecret(ctx, - &swarmapi.CreateSecretRequest{Spec: &secretSpec}) - if err != nil { - return err - } - resp = r - return nil - }); err != nil { - return "", err - } - return resp.Secret.ID, nil -} - -// RemoveSecret removes a secret from a managed swarm cluster. -func (c *Cluster) RemoveSecret(input string) error { - return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - secret, err := getSecret(ctx, state.controlClient, input) - if err != nil { - return err - } - - req := &swarmapi.RemoveSecretRequest{ - SecretID: secret.ID, - } - - _, err = state.controlClient.RemoveSecret(ctx, req) - return err - }) -} - -// UpdateSecret updates a secret in a managed swarm cluster. -// Note: this is not exposed to the CLI but is available from the API only -func (c *Cluster) UpdateSecret(input string, version uint64, spec types.SecretSpec) error { - return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - secret, err := getSecret(ctx, state.controlClient, input) - if err != nil { - return err - } - - secretSpec := convert.SecretSpecToGRPC(spec) - - _, err = state.controlClient.UpdateSecret(ctx, - &swarmapi.UpdateSecretRequest{ - SecretID: secret.ID, - SecretVersion: &swarmapi.Version{ - Index: version, - }, - Spec: &secretSpec, - }) - return err - }) -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/services.go b/vendor/github.com/docker/docker/daemon/cluster/services.go deleted file mode 100644 index c14037645..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/services.go +++ /dev/null @@ -1,602 +0,0 @@ -package cluster // import "github.com/docker/docker/daemon/cluster" - -import ( - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "os" - "strconv" - "strings" - "time" - - "github.com/docker/distribution/reference" - apitypes "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - types "github.com/docker/docker/api/types/swarm" - timetypes "github.com/docker/docker/api/types/time" - "github.com/docker/docker/daemon/cluster/convert" - "github.com/docker/docker/errdefs" - runconfigopts "github.com/docker/docker/runconfig/opts" - swarmapi "github.com/docker/swarmkit/api" - gogotypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// GetServices returns all services of a managed swarm cluster. -func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - state := c.currentNodeState() - if !state.IsActiveManager() { - return nil, c.errNoManager(state) - } - - // We move the accepted filter check here as "mode" filter - // is processed in the daemon, not in SwarmKit. So it might - // be good to have accepted file check in the same file as - // the filter processing (in the for loop below). - accepted := map[string]bool{ - "name": true, - "id": true, - "label": true, - "mode": true, - "runtime": true, - } - if err := options.Filters.Validate(accepted); err != nil { - return nil, err - } - - if len(options.Filters.Get("runtime")) == 0 { - // Default to using the container runtime filter - options.Filters.Add("runtime", string(types.RuntimeContainer)) - } - - filters := &swarmapi.ListServicesRequest_Filters{ - NamePrefixes: options.Filters.Get("name"), - IDPrefixes: options.Filters.Get("id"), - Labels: runconfigopts.ConvertKVStringsToMap(options.Filters.Get("label")), - Runtimes: options.Filters.Get("runtime"), - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - r, err := state.controlClient.ListServices( - ctx, - &swarmapi.ListServicesRequest{Filters: filters}) - if err != nil { - return nil, err - } - - services := make([]types.Service, 0, len(r.Services)) - - for _, service := range r.Services { - if options.Filters.Contains("mode") { - var mode string - switch service.Spec.GetMode().(type) { - case *swarmapi.ServiceSpec_Global: - mode = "global" - case *swarmapi.ServiceSpec_Replicated: - mode = "replicated" - } - - if !options.Filters.ExactMatch("mode", mode) { - continue - } - } - svcs, err := convert.ServiceFromGRPC(*service) - if err != nil { - return nil, err - } - services = append(services, svcs) - } - - return services, nil -} - -// GetService returns a service based on an ID or name. -func (c *Cluster) GetService(input string, insertDefaults bool) (types.Service, error) { - var service *swarmapi.Service - if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - s, err := getService(ctx, state.controlClient, input, insertDefaults) - if err != nil { - return err - } - service = s - return nil - }); err != nil { - return types.Service{}, err - } - svc, err := convert.ServiceFromGRPC(*service) - if err != nil { - return types.Service{}, err - } - return svc, nil -} - -// CreateService creates a new service in a managed swarm cluster. -func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string, queryRegistry bool) (*apitypes.ServiceCreateResponse, error) { - var resp *apitypes.ServiceCreateResponse - err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - err := c.populateNetworkID(ctx, state.controlClient, &s) - if err != nil { - return err - } - - serviceSpec, err := convert.ServiceSpecToGRPC(s) - if err != nil { - return errdefs.InvalidParameter(err) - } - - resp = &apitypes.ServiceCreateResponse{} - - switch serviceSpec.Task.Runtime.(type) { - case *swarmapi.TaskSpec_Attachment: - return fmt.Errorf("invalid task spec: spec type %q not supported", types.RuntimeNetworkAttachment) - // handle other runtimes here - case *swarmapi.TaskSpec_Generic: - switch serviceSpec.Task.GetGeneric().Kind { - case string(types.RuntimePlugin): - info, _ := c.config.Backend.SystemInfo() - if !info.ExperimentalBuild { - return fmt.Errorf("runtime type %q only supported in experimental", types.RuntimePlugin) - } - if s.TaskTemplate.PluginSpec == nil { - return errors.New("plugin spec must be set") - } - - default: - return fmt.Errorf("unsupported runtime type: %q", serviceSpec.Task.GetGeneric().Kind) - } - - r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) - if err != nil { - return err - } - - resp.ID = r.Service.ID - case *swarmapi.TaskSpec_Container: - ctnr := serviceSpec.Task.GetContainer() - if ctnr == nil { - return errors.New("service does not use container tasks") - } - if encodedAuth != "" { - ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} - } - - // retrieve auth config from encoded auth - authConfig := &apitypes.AuthConfig{} - if encodedAuth != "" { - authReader := strings.NewReader(encodedAuth) - dec := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, authReader)) - if err := dec.Decode(authConfig); err != nil { - logrus.Warnf("invalid authconfig: %v", err) - } - } - - // pin image by digest for API versions < 1.30 - // TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" - // should be removed in the future. Since integration tests only use the - // latest API version, so this is no longer required. - if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { - digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig) - if err != nil { - logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()) - // warning in the client response should be concise - resp.Warnings = append(resp.Warnings, digestWarning(ctnr.Image)) - - } else if ctnr.Image != digestImage { - logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage) - ctnr.Image = digestImage - - } else { - logrus.Debugf("creating service using supplied digest reference %s", ctnr.Image) - - } - - // Replace the context with a fresh one. - // If we timed out while communicating with the - // registry, then "ctx" will already be expired, which - // would cause UpdateService below to fail. Reusing - // "ctx" could make it impossible to create a service - // if the registry is slow or unresponsive. - var cancel func() - ctx, cancel = c.getRequestContext() - defer cancel() - } - - r, err := state.controlClient.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) - if err != nil { - return err - } - - resp.ID = r.Service.ID - } - return nil - }) - - return resp, err -} - -// UpdateService updates existing service to match new properties. -func (c *Cluster) UpdateService(serviceIDOrName string, version uint64, spec types.ServiceSpec, flags apitypes.ServiceUpdateOptions, queryRegistry bool) (*apitypes.ServiceUpdateResponse, error) { - var resp *apitypes.ServiceUpdateResponse - - err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - - err := c.populateNetworkID(ctx, state.controlClient, &spec) - if err != nil { - return err - } - - serviceSpec, err := convert.ServiceSpecToGRPC(spec) - if err != nil { - return errdefs.InvalidParameter(err) - } - - currentService, err := getService(ctx, state.controlClient, serviceIDOrName, false) - if err != nil { - return err - } - - resp = &apitypes.ServiceUpdateResponse{} - - switch serviceSpec.Task.Runtime.(type) { - case *swarmapi.TaskSpec_Attachment: - return fmt.Errorf("invalid task spec: spec type %q not supported", types.RuntimeNetworkAttachment) - case *swarmapi.TaskSpec_Generic: - switch serviceSpec.Task.GetGeneric().Kind { - case string(types.RuntimePlugin): - if spec.TaskTemplate.PluginSpec == nil { - return errors.New("plugin spec must be set") - } - } - case *swarmapi.TaskSpec_Container: - newCtnr := serviceSpec.Task.GetContainer() - if newCtnr == nil { - return errors.New("service does not use container tasks") - } - - encodedAuth := flags.EncodedRegistryAuth - if encodedAuth != "" { - newCtnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} - } else { - // this is needed because if the encodedAuth isn't being updated then we - // shouldn't lose it, and continue to use the one that was already present - var ctnr *swarmapi.ContainerSpec - switch flags.RegistryAuthFrom { - case apitypes.RegistryAuthFromSpec, "": - ctnr = currentService.Spec.Task.GetContainer() - case apitypes.RegistryAuthFromPreviousSpec: - if currentService.PreviousSpec == nil { - return errors.New("service does not have a previous spec") - } - ctnr = currentService.PreviousSpec.Task.GetContainer() - default: - return errors.New("unsupported registryAuthFrom value") - } - if ctnr == nil { - return errors.New("service does not use container tasks") - } - newCtnr.PullOptions = ctnr.PullOptions - // update encodedAuth so it can be used to pin image by digest - if ctnr.PullOptions != nil { - encodedAuth = ctnr.PullOptions.RegistryAuth - } - } - - // retrieve auth config from encoded auth - authConfig := &apitypes.AuthConfig{} - if encodedAuth != "" { - if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil { - logrus.Warnf("invalid authconfig: %v", err) - } - } - - // pin image by digest for API versions < 1.30 - // TODO(nishanttotla): The check on "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE" - // should be removed in the future. Since integration tests only use the - // latest API version, so this is no longer required. - if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" && queryRegistry { - digestImage, err := c.imageWithDigestString(ctx, newCtnr.Image, authConfig) - if err != nil { - logrus.Warnf("unable to pin image %s to digest: %s", newCtnr.Image, err.Error()) - // warning in the client response should be concise - resp.Warnings = append(resp.Warnings, digestWarning(newCtnr.Image)) - } else if newCtnr.Image != digestImage { - logrus.Debugf("pinning image %s by digest: %s", newCtnr.Image, digestImage) - newCtnr.Image = digestImage - } else { - logrus.Debugf("updating service using supplied digest reference %s", newCtnr.Image) - } - - // Replace the context with a fresh one. - // If we timed out while communicating with the - // registry, then "ctx" will already be expired, which - // would cause UpdateService below to fail. Reusing - // "ctx" could make it impossible to update a service - // if the registry is slow or unresponsive. - var cancel func() - ctx, cancel = c.getRequestContext() - defer cancel() - } - } - - var rollback swarmapi.UpdateServiceRequest_Rollback - switch flags.Rollback { - case "", "none": - rollback = swarmapi.UpdateServiceRequest_NONE - case "previous": - rollback = swarmapi.UpdateServiceRequest_PREVIOUS - default: - return fmt.Errorf("unrecognized rollback option %s", flags.Rollback) - } - - _, err = state.controlClient.UpdateService( - ctx, - &swarmapi.UpdateServiceRequest{ - ServiceID: currentService.ID, - Spec: &serviceSpec, - ServiceVersion: &swarmapi.Version{ - Index: version, - }, - Rollback: rollback, - }, - ) - return err - }) - return resp, err -} - -// RemoveService removes a service from a managed swarm cluster. -func (c *Cluster) RemoveService(input string) error { - return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - service, err := getService(ctx, state.controlClient, input, false) - if err != nil { - return err - } - - _, err = state.controlClient.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID}) - return err - }) -} - -// ServiceLogs collects service logs and writes them back to `config.OutStream` -func (c *Cluster) ServiceLogs(ctx context.Context, selector *backend.LogSelector, config *apitypes.ContainerLogsOptions) (<-chan *backend.LogMessage, error) { - c.mu.RLock() - defer c.mu.RUnlock() - - state := c.currentNodeState() - if !state.IsActiveManager() { - return nil, c.errNoManager(state) - } - - swarmSelector, err := convertSelector(ctx, state.controlClient, selector) - if err != nil { - return nil, errors.Wrap(err, "error making log selector") - } - - // set the streams we'll use - stdStreams := []swarmapi.LogStream{} - if config.ShowStdout { - stdStreams = append(stdStreams, swarmapi.LogStreamStdout) - } - if config.ShowStderr { - stdStreams = append(stdStreams, swarmapi.LogStreamStderr) - } - - // Get tail value squared away - the number of previous log lines we look at - var tail int64 - // in ContainerLogs, if the tail value is ANYTHING non-integer, we just set - // it to -1 (all). i don't agree with that, but i also think no tail value - // should be legitimate. if you don't pass tail, we assume you want "all" - if config.Tail == "all" || config.Tail == "" { - // tail of 0 means send all logs on the swarmkit side - tail = 0 - } else { - t, err := strconv.Atoi(config.Tail) - if err != nil { - return nil, errors.New("tail value must be a positive integer or \"all\"") - } - if t < 0 { - return nil, errors.New("negative tail values not supported") - } - // we actually use negative tail in swarmkit to represent messages - // backwards starting from the beginning. also, -1 means no logs. so, - // basically, for api compat with docker container logs, add one and - // flip the sign. we error above if you try to negative tail, which - // isn't supported by docker (and would error deeper in the stack - // anyway) - // - // See the logs protobuf for more information - tail = int64(-(t + 1)) - } - - // get the since value - the time in the past we're looking at logs starting from - var sinceProto *gogotypes.Timestamp - if config.Since != "" { - s, n, err := timetypes.ParseTimestamps(config.Since, 0) - if err != nil { - return nil, errors.Wrap(err, "could not parse since timestamp") - } - since := time.Unix(s, n) - sinceProto, err = gogotypes.TimestampProto(since) - if err != nil { - return nil, errors.Wrap(err, "could not parse timestamp to proto") - } - } - - stream, err := state.logsClient.SubscribeLogs(ctx, &swarmapi.SubscribeLogsRequest{ - Selector: swarmSelector, - Options: &swarmapi.LogSubscriptionOptions{ - Follow: config.Follow, - Streams: stdStreams, - Tail: tail, - Since: sinceProto, - }, - }) - if err != nil { - return nil, err - } - - messageChan := make(chan *backend.LogMessage, 1) - go func() { - defer close(messageChan) - for { - // Check the context before doing anything. - select { - case <-ctx.Done(): - return - default: - } - subscribeMsg, err := stream.Recv() - if err == io.EOF { - return - } - // if we're not io.EOF, push the message in and return - if err != nil { - select { - case <-ctx.Done(): - case messageChan <- &backend.LogMessage{Err: err}: - } - return - } - - for _, msg := range subscribeMsg.Messages { - // make a new message - m := new(backend.LogMessage) - m.Attrs = make([]backend.LogAttr, 0, len(msg.Attrs)+3) - // add the timestamp, adding the error if it fails - m.Timestamp, err = gogotypes.TimestampFromProto(msg.Timestamp) - if err != nil { - m.Err = err - } - - nodeKey := contextPrefix + ".node.id" - serviceKey := contextPrefix + ".service.id" - taskKey := contextPrefix + ".task.id" - - // copy over all of the details - for _, d := range msg.Attrs { - switch d.Key { - case nodeKey, serviceKey, taskKey: - // we have the final say over context details (in case there - // is a conflict (if the user added a detail with a context's - // key for some reason)) - default: - m.Attrs = append(m.Attrs, backend.LogAttr{Key: d.Key, Value: d.Value}) - } - } - m.Attrs = append(m.Attrs, - backend.LogAttr{Key: nodeKey, Value: msg.Context.NodeID}, - backend.LogAttr{Key: serviceKey, Value: msg.Context.ServiceID}, - backend.LogAttr{Key: taskKey, Value: msg.Context.TaskID}, - ) - - switch msg.Stream { - case swarmapi.LogStreamStdout: - m.Source = "stdout" - case swarmapi.LogStreamStderr: - m.Source = "stderr" - } - m.Line = msg.Data - - // there could be a case where the reader stops accepting - // messages and the context is canceled. we need to check that - // here, or otherwise we risk blocking forever on the message - // send. - select { - case <-ctx.Done(): - return - case messageChan <- m: - } - } - } - }() - return messageChan, nil -} - -// convertSelector takes a backend.LogSelector, which contains raw names that -// may or may not be valid, and converts them to an api.LogSelector proto. It -// returns an error if something fails -func convertSelector(ctx context.Context, cc swarmapi.ControlClient, selector *backend.LogSelector) (*swarmapi.LogSelector, error) { - // don't rely on swarmkit to resolve IDs, do it ourselves - swarmSelector := &swarmapi.LogSelector{} - for _, s := range selector.Services { - service, err := getService(ctx, cc, s, false) - if err != nil { - return nil, err - } - c := service.Spec.Task.GetContainer() - if c == nil { - return nil, errors.New("logs only supported on container tasks") - } - swarmSelector.ServiceIDs = append(swarmSelector.ServiceIDs, service.ID) - } - for _, t := range selector.Tasks { - task, err := getTask(ctx, cc, t) - if err != nil { - return nil, err - } - c := task.Spec.GetContainer() - if c == nil { - return nil, errors.New("logs only supported on container tasks") - } - swarmSelector.TaskIDs = append(swarmSelector.TaskIDs, task.ID) - } - return swarmSelector, nil -} - -// imageWithDigestString takes an image such as name or name:tag -// and returns the image pinned to a digest, such as name@sha256:34234 -func (c *Cluster) imageWithDigestString(ctx context.Context, image string, authConfig *apitypes.AuthConfig) (string, error) { - ref, err := reference.ParseAnyReference(image) - if err != nil { - return "", err - } - namedRef, ok := ref.(reference.Named) - if !ok { - if _, ok := ref.(reference.Digested); ok { - return image, nil - } - return "", errors.Errorf("unknown image reference format: %s", image) - } - // only query registry if not a canonical reference (i.e. with digest) - if _, ok := namedRef.(reference.Canonical); !ok { - namedRef = reference.TagNameOnly(namedRef) - - taggedRef, ok := namedRef.(reference.NamedTagged) - if !ok { - return "", errors.Errorf("image reference not tagged: %s", image) - } - - repo, _, err := c.config.ImageBackend.GetRepository(ctx, taggedRef, authConfig) - if err != nil { - return "", err - } - dscrptr, err := repo.Tags(ctx).Get(ctx, taggedRef.Tag()) - if err != nil { - return "", err - } - - namedDigestedRef, err := reference.WithDigest(taggedRef, dscrptr.Digest) - if err != nil { - return "", err - } - // return familiar form until interface updated to return type - return reference.FamiliarString(namedDigestedRef), nil - } - // reference already contains a digest, so just return it - return reference.FamiliarString(ref), nil -} - -// digestWarning constructs a formatted warning string -// using the image name that could not be pinned by digest. The -// formatting is hardcoded, but could me made smarter in the future -func digestWarning(image string) string { - return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/swarm.go b/vendor/github.com/docker/docker/daemon/cluster/swarm.go deleted file mode 100644 index 2f498ce26..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/swarm.go +++ /dev/null @@ -1,569 +0,0 @@ -package cluster // import "github.com/docker/docker/daemon/cluster" - -import ( - "context" - "fmt" - "net" - "strings" - "time" - - apitypes "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - types "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/daemon/cluster/convert" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/signal" - swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/manager/encryption" - swarmnode "github.com/docker/swarmkit/node" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// Init initializes new cluster from user provided request. -func (c *Cluster) Init(req types.InitRequest) (string, error) { - c.controlMutex.Lock() - defer c.controlMutex.Unlock() - if c.nr != nil { - if req.ForceNewCluster { - - // Take c.mu temporarily to wait for presently running - // API handlers to finish before shutting down the node. - c.mu.Lock() - if !c.nr.nodeState.IsManager() { - return "", errSwarmNotManager - } - c.mu.Unlock() - - if err := c.nr.Stop(); err != nil { - return "", err - } - } else { - return "", errSwarmExists - } - } - - if err := validateAndSanitizeInitRequest(&req); err != nil { - return "", errdefs.InvalidParameter(err) - } - - listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) - if err != nil { - return "", err - } - - advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) - if err != nil { - return "", err - } - - dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) - if err != nil { - return "", err - } - - localAddr := listenHost - - // If the local address is undetermined, the advertise address - // will be used as local address, if it belongs to this system. - // If the advertise address is not local, then we try to find - // a system address to use as local address. If this fails, - // we give up and ask the user to pass the listen address. - if net.ParseIP(localAddr).IsUnspecified() { - advertiseIP := net.ParseIP(advertiseHost) - - found := false - for _, systemIP := range listSystemIPs() { - if systemIP.Equal(advertiseIP) { - localAddr = advertiseIP.String() - found = true - break - } - } - - if !found { - ip, err := c.resolveSystemAddr() - if err != nil { - logrus.Warnf("Could not find a local address: %v", err) - return "", errMustSpecifyListenAddr - } - localAddr = ip.String() - } - } - - nr, err := c.newNodeRunner(nodeStartConfig{ - forceNewCluster: req.ForceNewCluster, - autolock: req.AutoLockManagers, - LocalAddr: localAddr, - ListenAddr: net.JoinHostPort(listenHost, listenPort), - AdvertiseAddr: net.JoinHostPort(advertiseHost, advertisePort), - DataPathAddr: dataPathAddr, - availability: req.Availability, - }) - if err != nil { - return "", err - } - c.mu.Lock() - c.nr = nr - c.mu.Unlock() - - if err := <-nr.Ready(); err != nil { - c.mu.Lock() - c.nr = nil - c.mu.Unlock() - if !req.ForceNewCluster { // if failure on first attempt don't keep state - if err := clearPersistentState(c.root); err != nil { - return "", err - } - } - return "", err - } - state := nr.State() - if state.swarmNode == nil { // should never happen but protect from panic - return "", errors.New("invalid cluster state for spec initialization") - } - if err := initClusterSpec(state.swarmNode, req.Spec); err != nil { - return "", err - } - return state.NodeID(), nil -} - -// Join makes current Cluster part of an existing swarm cluster. -func (c *Cluster) Join(req types.JoinRequest) error { - c.controlMutex.Lock() - defer c.controlMutex.Unlock() - c.mu.Lock() - if c.nr != nil { - c.mu.Unlock() - return errors.WithStack(errSwarmExists) - } - c.mu.Unlock() - - if err := validateAndSanitizeJoinRequest(&req); err != nil { - return errdefs.InvalidParameter(err) - } - - listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) - if err != nil { - return err - } - - var advertiseAddr string - if req.AdvertiseAddr != "" { - advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) - // For joining, we don't need to provide an advertise address, - // since the remote side can detect it. - if err == nil { - advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort) - } - } - - dataPathAddr, err := resolveDataPathAddr(req.DataPathAddr) - if err != nil { - return err - } - - nr, err := c.newNodeRunner(nodeStartConfig{ - RemoteAddr: req.RemoteAddrs[0], - ListenAddr: net.JoinHostPort(listenHost, listenPort), - AdvertiseAddr: advertiseAddr, - DataPathAddr: dataPathAddr, - joinAddr: req.RemoteAddrs[0], - joinToken: req.JoinToken, - availability: req.Availability, - }) - if err != nil { - return err - } - - c.mu.Lock() - c.nr = nr - c.mu.Unlock() - - select { - case <-time.After(swarmConnectTimeout): - return errSwarmJoinTimeoutReached - case err := <-nr.Ready(): - if err != nil { - c.mu.Lock() - c.nr = nil - c.mu.Unlock() - if err := clearPersistentState(c.root); err != nil { - return err - } - } - return err - } -} - -// Inspect retrieves the configuration properties of a managed swarm cluster. -func (c *Cluster) Inspect() (types.Swarm, error) { - var swarm types.Swarm - if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - s, err := c.inspect(ctx, state) - if err != nil { - return err - } - swarm = s - return nil - }); err != nil { - return types.Swarm{}, err - } - return swarm, nil -} - -func (c *Cluster) inspect(ctx context.Context, state nodeState) (types.Swarm, error) { - s, err := getSwarm(ctx, state.controlClient) - if err != nil { - return types.Swarm{}, err - } - return convert.SwarmFromGRPC(*s), nil -} - -// Update updates configuration of a managed swarm cluster. -func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error { - return c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - swarm, err := getSwarm(ctx, state.controlClient) - if err != nil { - return err - } - - // Validate spec name. - if spec.Annotations.Name == "" { - spec.Annotations.Name = "default" - } else if spec.Annotations.Name != "default" { - return errdefs.InvalidParameter(errors.New(`swarm spec must be named "default"`)) - } - - // In update, client should provide the complete spec of the swarm, including - // Name and Labels. If a field is specified with 0 or nil, then the default value - // will be used to swarmkit. - clusterSpec, err := convert.SwarmSpecToGRPC(spec) - if err != nil { - return errdefs.InvalidParameter(err) - } - - _, err = state.controlClient.UpdateCluster( - ctx, - &swarmapi.UpdateClusterRequest{ - ClusterID: swarm.ID, - Spec: &clusterSpec, - ClusterVersion: &swarmapi.Version{ - Index: version, - }, - Rotation: swarmapi.KeyRotation{ - WorkerJoinToken: flags.RotateWorkerToken, - ManagerJoinToken: flags.RotateManagerToken, - ManagerUnlockKey: flags.RotateManagerUnlockKey, - }, - }, - ) - return err - }) -} - -// GetUnlockKey returns the unlock key for the swarm. -func (c *Cluster) GetUnlockKey() (string, error) { - var resp *swarmapi.GetUnlockKeyResponse - if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - client := swarmapi.NewCAClient(state.grpcConn) - - r, err := client.GetUnlockKey(ctx, &swarmapi.GetUnlockKeyRequest{}) - if err != nil { - return err - } - resp = r - return nil - }); err != nil { - return "", err - } - if len(resp.UnlockKey) == 0 { - // no key - return "", nil - } - return encryption.HumanReadableKey(resp.UnlockKey), nil -} - -// UnlockSwarm provides a key to decrypt data that is encrypted at rest. -func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error { - c.controlMutex.Lock() - defer c.controlMutex.Unlock() - - c.mu.RLock() - state := c.currentNodeState() - - if !state.IsActiveManager() { - // when manager is not active, - // unless it is locked, otherwise return error. - if err := c.errNoManager(state); err != errSwarmLocked { - c.mu.RUnlock() - return err - } - } else { - // when manager is active, return an error of "not locked" - c.mu.RUnlock() - return notLockedError{} - } - - // only when swarm is locked, code running reaches here - nr := c.nr - c.mu.RUnlock() - - key, err := encryption.ParseHumanReadableKey(req.UnlockKey) - if err != nil { - return errdefs.InvalidParameter(err) - } - - config := nr.config - config.lockKey = key - if err := nr.Stop(); err != nil { - return err - } - nr, err = c.newNodeRunner(config) - if err != nil { - return err - } - - c.mu.Lock() - c.nr = nr - c.mu.Unlock() - - if err := <-nr.Ready(); err != nil { - if errors.Cause(err) == errSwarmLocked { - return invalidUnlockKey{} - } - return errors.Errorf("swarm component could not be started: %v", err) - } - return nil -} - -// Leave shuts down Cluster and removes current state. -func (c *Cluster) Leave(force bool) error { - c.controlMutex.Lock() - defer c.controlMutex.Unlock() - - c.mu.Lock() - nr := c.nr - if nr == nil { - c.mu.Unlock() - return errors.WithStack(errNoSwarm) - } - - state := c.currentNodeState() - - c.mu.Unlock() - - if errors.Cause(state.err) == errSwarmLocked && !force { - // leave a locked swarm without --force is not allowed - return errors.WithStack(notAvailableError("Swarm is encrypted and locked. Please unlock it first or use `--force` to ignore this message.")) - } - - if state.IsManager() && !force { - msg := "You are attempting to leave the swarm on a node that is participating as a manager. " - if state.IsActiveManager() { - active, reachable, unreachable, err := managerStats(state.controlClient, state.NodeID()) - if err == nil { - if active && removingManagerCausesLossOfQuorum(reachable, unreachable) { - if isLastManager(reachable, unreachable) { - msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. " - return errors.WithStack(notAvailableError(msg)) - } - msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable) - } - } - } else { - msg += "Doing so may lose the consensus of your cluster. " - } - - msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message." - return errors.WithStack(notAvailableError(msg)) - } - // release readers in here - if err := nr.Stop(); err != nil { - logrus.Errorf("failed to shut down cluster node: %v", err) - signal.DumpStacks("") - return err - } - - c.mu.Lock() - c.nr = nil - c.mu.Unlock() - - if nodeID := state.NodeID(); nodeID != "" { - nodeContainers, err := c.listContainerForNode(nodeID) - if err != nil { - return err - } - for _, id := range nodeContainers { - if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil { - logrus.Errorf("error removing %v: %v", id, err) - } - } - } - - // todo: cleanup optional? - if err := clearPersistentState(c.root); err != nil { - return err - } - c.config.Backend.DaemonLeavesCluster() - return nil -} - -// Info returns information about the current cluster state. -func (c *Cluster) Info() types.Info { - info := types.Info{ - NodeAddr: c.GetAdvertiseAddress(), - } - c.mu.RLock() - defer c.mu.RUnlock() - - state := c.currentNodeState() - info.LocalNodeState = state.status - if state.err != nil { - info.Error = state.err.Error() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - if state.IsActiveManager() { - info.ControlAvailable = true - swarm, err := c.inspect(ctx, state) - if err != nil { - info.Error = err.Error() - } - - info.Cluster = &swarm.ClusterInfo - - if r, err := state.controlClient.ListNodes(ctx, &swarmapi.ListNodesRequest{}); err != nil { - info.Error = err.Error() - } else { - info.Nodes = len(r.Nodes) - for _, n := range r.Nodes { - if n.ManagerStatus != nil { - info.Managers = info.Managers + 1 - } - } - } - } - - if state.swarmNode != nil { - for _, r := range state.swarmNode.Remotes() { - info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr}) - } - info.NodeID = state.swarmNode.NodeID() - } - - return info -} - -func validateAndSanitizeInitRequest(req *types.InitRequest) error { - var err error - req.ListenAddr, err = validateAddr(req.ListenAddr) - if err != nil { - return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) - } - - if req.Spec.Annotations.Name == "" { - req.Spec.Annotations.Name = "default" - } else if req.Spec.Annotations.Name != "default" { - return errors.New(`swarm spec must be named "default"`) - } - - return nil -} - -func validateAndSanitizeJoinRequest(req *types.JoinRequest) error { - var err error - req.ListenAddr, err = validateAddr(req.ListenAddr) - if err != nil { - return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) - } - if len(req.RemoteAddrs) == 0 { - return errors.New("at least 1 RemoteAddr is required to join") - } - for i := range req.RemoteAddrs { - req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i]) - if err != nil { - return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err) - } - } - return nil -} - -func validateAddr(addr string) (string, error) { - if addr == "" { - return addr, errors.New("invalid empty address") - } - newaddr, err := opts.ParseTCPAddr(addr, defaultAddr) - if err != nil { - return addr, nil - } - return strings.TrimPrefix(newaddr, "tcp://"), nil -} - -func initClusterSpec(node *swarmnode.Node, spec types.Spec) error { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - for conn := range node.ListenControlSocket(ctx) { - if ctx.Err() != nil { - return ctx.Err() - } - if conn != nil { - client := swarmapi.NewControlClient(conn) - var cluster *swarmapi.Cluster - for i := 0; ; i++ { - lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{}) - if err != nil { - return fmt.Errorf("error on listing clusters: %v", err) - } - if len(lcr.Clusters) == 0 { - if i < 10 { - time.Sleep(200 * time.Millisecond) - continue - } - return errors.New("empty list of clusters was returned") - } - cluster = lcr.Clusters[0] - break - } - // In init, we take the initial default values from swarmkit, and merge - // any non nil or 0 value from spec to GRPC spec. This will leave the - // default value alone. - // Note that this is different from Update(), as in Update() we expect - // user to specify the complete spec of the cluster (as they already know - // the existing one and knows which field to update) - clusterSpec, err := convert.MergeSwarmSpecToGRPC(spec, cluster.Spec) - if err != nil { - return fmt.Errorf("error updating cluster settings: %v", err) - } - _, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{ - ClusterID: cluster.ID, - ClusterVersion: &cluster.Meta.Version, - Spec: &clusterSpec, - }) - if err != nil { - return fmt.Errorf("error updating cluster settings: %v", err) - } - return nil - } - } - return ctx.Err() -} - -func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) { - var ids []string - filters := filters.NewArgs() - filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID)) - containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{ - Filters: filters, - }) - if err != nil { - return []string{}, err - } - for _, c := range containers { - ids = append(ids, c.ID) - } - return ids, nil -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/tasks.go b/vendor/github.com/docker/docker/daemon/cluster/tasks.go deleted file mode 100644 index de1240dfe..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/tasks.go +++ /dev/null @@ -1,87 +0,0 @@ -package cluster // import "github.com/docker/docker/daemon/cluster" - -import ( - "context" - - apitypes "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - types "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/daemon/cluster/convert" - swarmapi "github.com/docker/swarmkit/api" -) - -// GetTasks returns a list of tasks matching the filter options. -func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) { - var r *swarmapi.ListTasksResponse - - if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - filterTransform := func(filter filters.Args) error { - if filter.Contains("service") { - serviceFilters := filter.Get("service") - for _, serviceFilter := range serviceFilters { - service, err := getService(ctx, state.controlClient, serviceFilter, false) - if err != nil { - return err - } - filter.Del("service", serviceFilter) - filter.Add("service", service.ID) - } - } - if filter.Contains("node") { - nodeFilters := filter.Get("node") - for _, nodeFilter := range nodeFilters { - node, err := getNode(ctx, state.controlClient, nodeFilter) - if err != nil { - return err - } - filter.Del("node", nodeFilter) - filter.Add("node", node.ID) - } - } - if !filter.Contains("runtime") { - // default to only showing container tasks - filter.Add("runtime", "container") - filter.Add("runtime", "") - } - return nil - } - - filters, err := newListTasksFilters(options.Filters, filterTransform) - if err != nil { - return err - } - - r, err = state.controlClient.ListTasks( - ctx, - &swarmapi.ListTasksRequest{Filters: filters}) - return err - }); err != nil { - return nil, err - } - - tasks := make([]types.Task, 0, len(r.Tasks)) - for _, task := range r.Tasks { - t, err := convert.TaskFromGRPC(*task) - if err != nil { - return nil, err - } - tasks = append(tasks, t) - } - return tasks, nil -} - -// GetTask returns a task by an ID. -func (c *Cluster) GetTask(input string) (types.Task, error) { - var task *swarmapi.Task - if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error { - t, err := getTask(ctx, state.controlClient, input) - if err != nil { - return err - } - task = t - return nil - }); err != nil { - return types.Task{}, err - } - return convert.TaskFromGRPC(*task) -} diff --git a/vendor/github.com/docker/docker/daemon/cluster/utils.go b/vendor/github.com/docker/docker/daemon/cluster/utils.go deleted file mode 100644 index d55e0012b..000000000 --- a/vendor/github.com/docker/docker/daemon/cluster/utils.go +++ /dev/null @@ -1,63 +0,0 @@ -package cluster // import "github.com/docker/docker/daemon/cluster" - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/ioutils" -) - -func loadPersistentState(root string) (*nodeStartConfig, error) { - dt, err := ioutil.ReadFile(filepath.Join(root, stateFile)) - if err != nil { - return nil, err - } - // missing certificate means no actual state to restore from - if _, err := os.Stat(filepath.Join(root, "certificates/swarm-node.crt")); err != nil { - if os.IsNotExist(err) { - clearPersistentState(root) - } - return nil, err - } - var st nodeStartConfig - if err := json.Unmarshal(dt, &st); err != nil { - return nil, err - } - return &st, nil -} - -func savePersistentState(root string, config nodeStartConfig) error { - dt, err := json.Marshal(config) - if err != nil { - return err - } - return ioutils.AtomicWriteFile(filepath.Join(root, stateFile), dt, 0600) -} - -func clearPersistentState(root string) error { - // todo: backup this data instead of removing? - // rather than delete the entire swarm directory, delete the contents in order to preserve the inode - // (for example, allowing it to be bind-mounted) - files, err := ioutil.ReadDir(root) - if err != nil { - return err - } - - for _, f := range files { - if err := os.RemoveAll(filepath.Join(root, f.Name())); err != nil { - return err - } - } - - return nil -} - -func removingManagerCausesLossOfQuorum(reachable, unreachable int) bool { - return reachable-2 <= unreachable -} - -func isLastManager(reachable, unreachable int) bool { - return reachable == 1 && unreachable == 0 -} diff --git a/vendor/github.com/docker/docker/daemon/commit.go b/vendor/github.com/docker/docker/daemon/commit.go deleted file mode 100644 index 0f6f44051..000000000 --- a/vendor/github.com/docker/docker/daemon/commit.go +++ /dev/null @@ -1,186 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "runtime" - "strings" - "time" - - "github.com/docker/docker/api/types/backend" - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/builder/dockerfile" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// merge merges two Config, the image container configuration (defaults values), -// and the user container configuration, either passed by the API or generated -// by the cli. -// It will mutate the specified user configuration (userConf) with the image -// configuration where the user configuration is incomplete. -func merge(userConf, imageConf *containertypes.Config) error { - if userConf.User == "" { - userConf.User = imageConf.User - } - if len(userConf.ExposedPorts) == 0 { - userConf.ExposedPorts = imageConf.ExposedPorts - } else if imageConf.ExposedPorts != nil { - for port := range imageConf.ExposedPorts { - if _, exists := userConf.ExposedPorts[port]; !exists { - userConf.ExposedPorts[port] = struct{}{} - } - } - } - - if len(userConf.Env) == 0 { - userConf.Env = imageConf.Env - } else { - for _, imageEnv := range imageConf.Env { - found := false - imageEnvKey := strings.Split(imageEnv, "=")[0] - for _, userEnv := range userConf.Env { - userEnvKey := strings.Split(userEnv, "=")[0] - if runtime.GOOS == "windows" { - // Case insensitive environment variables on Windows - imageEnvKey = strings.ToUpper(imageEnvKey) - userEnvKey = strings.ToUpper(userEnvKey) - } - if imageEnvKey == userEnvKey { - found = true - break - } - } - if !found { - userConf.Env = append(userConf.Env, imageEnv) - } - } - } - - if userConf.Labels == nil { - userConf.Labels = map[string]string{} - } - for l, v := range imageConf.Labels { - if _, ok := userConf.Labels[l]; !ok { - userConf.Labels[l] = v - } - } - - if len(userConf.Entrypoint) == 0 { - if len(userConf.Cmd) == 0 { - userConf.Cmd = imageConf.Cmd - userConf.ArgsEscaped = imageConf.ArgsEscaped - } - - if userConf.Entrypoint == nil { - userConf.Entrypoint = imageConf.Entrypoint - } - } - if imageConf.Healthcheck != nil { - if userConf.Healthcheck == nil { - userConf.Healthcheck = imageConf.Healthcheck - } else { - if len(userConf.Healthcheck.Test) == 0 { - userConf.Healthcheck.Test = imageConf.Healthcheck.Test - } - if userConf.Healthcheck.Interval == 0 { - userConf.Healthcheck.Interval = imageConf.Healthcheck.Interval - } - if userConf.Healthcheck.Timeout == 0 { - userConf.Healthcheck.Timeout = imageConf.Healthcheck.Timeout - } - if userConf.Healthcheck.StartPeriod == 0 { - userConf.Healthcheck.StartPeriod = imageConf.Healthcheck.StartPeriod - } - if userConf.Healthcheck.Retries == 0 { - userConf.Healthcheck.Retries = imageConf.Healthcheck.Retries - } - } - } - - if userConf.WorkingDir == "" { - userConf.WorkingDir = imageConf.WorkingDir - } - if len(userConf.Volumes) == 0 { - userConf.Volumes = imageConf.Volumes - } else { - for k, v := range imageConf.Volumes { - userConf.Volumes[k] = v - } - } - - if userConf.StopSignal == "" { - userConf.StopSignal = imageConf.StopSignal - } - return nil -} - -// CreateImageFromContainer creates a new image from a container. The container -// config will be updated by applying the change set to the custom config, then -// applying that config over the existing container config. -func (daemon *Daemon) CreateImageFromContainer(name string, c *backend.CreateImageConfig) (string, error) { - start := time.Now() - container, err := daemon.GetContainer(name) - if err != nil { - return "", err - } - - // It is not possible to commit a running container on Windows - if (runtime.GOOS == "windows") && container.IsRunning() { - return "", errors.Errorf("%+v does not support commit of a running container", runtime.GOOS) - } - - if container.IsDead() { - err := fmt.Errorf("You cannot commit container %s which is Dead", container.ID) - return "", errdefs.Conflict(err) - } - - if container.IsRemovalInProgress() { - err := fmt.Errorf("You cannot commit container %s which is being removed", container.ID) - return "", errdefs.Conflict(err) - } - - if c.Pause && !container.IsPaused() { - daemon.containerPause(container) - defer daemon.containerUnpause(container) - } - - if c.Config == nil { - c.Config = container.Config - } - newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes, container.OS) - if err != nil { - return "", err - } - if err := merge(newConfig, container.Config); err != nil { - return "", err - } - - id, err := daemon.imageService.CommitImage(backend.CommitConfig{ - Author: c.Author, - Comment: c.Comment, - Config: newConfig, - ContainerConfig: container.Config, - ContainerID: container.ID, - ContainerMountLabel: container.MountLabel, - ContainerOS: container.OS, - ParentImageID: string(container.ImageID), - }) - if err != nil { - return "", err - } - - var imageRef string - if c.Repo != "" { - imageRef, err = daemon.imageService.TagImage(string(id), c.Repo, c.Tag) - if err != nil { - return "", err - } - } - daemon.LogContainerEventWithAttributes(container, "commit", map[string]string{ - "comment": c.Comment, - "imageID": id.String(), - "imageRef": imageRef, - }) - containerActions.WithValues("commit").UpdateSince(start) - return id.String(), nil -} diff --git a/vendor/github.com/docker/docker/daemon/config/config.go b/vendor/github.com/docker/docker/daemon/config/config.go deleted file mode 100644 index 6cda223a1..000000000 --- a/vendor/github.com/docker/docker/daemon/config/config.go +++ /dev/null @@ -1,567 +0,0 @@ -package config // import "github.com/docker/docker/daemon/config" - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "reflect" - "runtime" - "strings" - "sync" - - daemondiscovery "github.com/docker/docker/daemon/discovery" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/authorization" - "github.com/docker/docker/pkg/discovery" - "github.com/docker/docker/registry" - "github.com/imdario/mergo" - "github.com/sirupsen/logrus" - "github.com/spf13/pflag" -) - -const ( - // DefaultMaxConcurrentDownloads is the default value for - // maximum number of downloads that - // may take place at a time for each pull. - DefaultMaxConcurrentDownloads = 3 - // DefaultMaxConcurrentUploads is the default value for - // maximum number of uploads that - // may take place at a time for each push. - DefaultMaxConcurrentUploads = 5 - // StockRuntimeName is the reserved name/alias used to represent the - // OCI runtime being shipped with the docker daemon package. - StockRuntimeName = "runc" - // DefaultShmSize is the default value for container's shm size - DefaultShmSize = int64(67108864) - // DefaultNetworkMtu is the default value for network MTU - DefaultNetworkMtu = 1500 - // DisableNetworkBridge is the default value of the option to disable network bridge - DisableNetworkBridge = "none" - // DefaultInitBinary is the name of the default init binary - DefaultInitBinary = "docker-init" -) - -// flatOptions contains configuration keys -// that MUST NOT be parsed as deep structures. -// Use this to differentiate these options -// with others like the ones in CommonTLSOptions. -var flatOptions = map[string]bool{ - "cluster-store-opts": true, - "log-opts": true, - "runtimes": true, - "default-ulimits": true, -} - -// LogConfig represents the default log configuration. -// It includes json tags to deserialize configuration from a file -// using the same names that the flags in the command line use. -type LogConfig struct { - Type string `json:"log-driver,omitempty"` - Config map[string]string `json:"log-opts,omitempty"` -} - -// commonBridgeConfig stores all the platform-common bridge driver specific -// configuration. -type commonBridgeConfig struct { - Iface string `json:"bridge,omitempty"` - FixedCIDR string `json:"fixed-cidr,omitempty"` -} - -// NetworkConfig stores the daemon-wide networking configurations -type NetworkConfig struct { - // Default address pools for docker networks - DefaultAddressPools opts.PoolsOpt `json:"default-address-pools,omitempty"` -} - -// CommonTLSOptions defines TLS configuration for the daemon server. -// It includes json tags to deserialize configuration from a file -// using the same names that the flags in the command line use. -type CommonTLSOptions struct { - CAFile string `json:"tlscacert,omitempty"` - CertFile string `json:"tlscert,omitempty"` - KeyFile string `json:"tlskey,omitempty"` -} - -// CommonConfig defines the configuration of a docker daemon which is -// common across platforms. -// It includes json tags to deserialize configuration from a file -// using the same names that the flags in the command line use. -type CommonConfig struct { - AuthzMiddleware *authorization.Middleware `json:"-"` - AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins - AutoRestart bool `json:"-"` - Context map[string][]string `json:"-"` - DisableBridge bool `json:"-"` - DNS []string `json:"dns,omitempty"` - DNSOptions []string `json:"dns-opts,omitempty"` - DNSSearch []string `json:"dns-search,omitempty"` - ExecOptions []string `json:"exec-opts,omitempty"` - GraphDriver string `json:"storage-driver,omitempty"` - GraphOptions []string `json:"storage-opts,omitempty"` - Labels []string `json:"labels,omitempty"` - Mtu int `json:"mtu,omitempty"` - NetworkDiagnosticPort int `json:"network-diagnostic-port,omitempty"` - Pidfile string `json:"pidfile,omitempty"` - RawLogs bool `json:"raw-logs,omitempty"` - RootDeprecated string `json:"graph,omitempty"` - Root string `json:"data-root,omitempty"` - ExecRoot string `json:"exec-root,omitempty"` - SocketGroup string `json:"group,omitempty"` - CorsHeaders string `json:"api-cors-header,omitempty"` - - // TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests - // when pushing to a registry which does not support schema 2. This field is marked as - // deprecated because schema 1 manifests are deprecated in favor of schema 2 and the - // daemon ID will use a dedicated identifier not shared with exported signatures. - TrustKeyPath string `json:"deprecated-key-path,omitempty"` - - // LiveRestoreEnabled determines whether we should keep containers - // alive upon daemon shutdown/start - LiveRestoreEnabled bool `json:"live-restore,omitempty"` - - // ClusterStore is the storage backend used for the cluster information. It is used by both - // multihost networking (to store networks and endpoints information) and by the node discovery - // mechanism. - ClusterStore string `json:"cluster-store,omitempty"` - - // ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such - // as TLS configuration settings. - ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"` - - // ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node - // discovery. This should be a 'host:port' combination on which that daemon instance is - // reachable by other hosts. - ClusterAdvertise string `json:"cluster-advertise,omitempty"` - - // MaxConcurrentDownloads is the maximum number of downloads that - // may take place at a time for each pull. - MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"` - - // MaxConcurrentUploads is the maximum number of uploads that - // may take place at a time for each push. - MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"` - - // ShutdownTimeout is the timeout value (in seconds) the daemon will wait for the container - // to stop when daemon is being shutdown - ShutdownTimeout int `json:"shutdown-timeout,omitempty"` - - Debug bool `json:"debug,omitempty"` - Hosts []string `json:"hosts,omitempty"` - LogLevel string `json:"log-level,omitempty"` - TLS bool `json:"tls,omitempty"` - TLSVerify bool `json:"tlsverify,omitempty"` - - // Embedded structs that allow config - // deserialization without the full struct. - CommonTLSOptions - - // SwarmDefaultAdvertiseAddr is the default host/IP or network interface - // to use if a wildcard address is specified in the ListenAddr value - // given to the /swarm/init endpoint and no advertise address is - // specified. - SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"` - - // SwarmRaftHeartbeatTick is the number of ticks in time for swarm mode raft quorum heartbeat - // Typical value is 1 - SwarmRaftHeartbeatTick uint32 `json:"swarm-raft-heartbeat-tick"` - - // SwarmRaftElectionTick is the number of ticks to elapse before followers in the quorum can propose - // a new round of leader election. Default, recommended value is at least 10X that of Heartbeat tick. - // Higher values can make the quorum less sensitive to transient faults in the environment, but this also - // means it takes longer for the managers to detect a down leader. - SwarmRaftElectionTick uint32 `json:"swarm-raft-election-tick"` - - MetricsAddress string `json:"metrics-addr"` - - LogConfig - BridgeConfig // bridgeConfig holds bridge network specific configuration. - NetworkConfig - registry.ServiceOptions - - sync.Mutex - // FIXME(vdemeester) This part is not that clear and is mainly dependent on cli flags - // It should probably be handled outside this package. - ValuesSet map[string]interface{} `json:"-"` - - Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not - - // Exposed node Generic Resources - // e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"] - NodeGenericResources []string `json:"node-generic-resources,omitempty"` - // NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components - NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"` - - // ContainerAddr is the address used to connect to containerd if we're - // not starting it ourselves - ContainerdAddr string `json:"containerd,omitempty"` -} - -// IsValueSet returns true if a configuration value -// was explicitly set in the configuration file. -func (conf *Config) IsValueSet(name string) bool { - if conf.ValuesSet == nil { - return false - } - _, ok := conf.ValuesSet[name] - return ok -} - -// New returns a new fully initialized Config struct -func New() *Config { - config := Config{} - config.LogConfig.Config = make(map[string]string) - config.ClusterOpts = make(map[string]string) - - if runtime.GOOS != "linux" { - config.V2Only = true - } - return &config -} - -// ParseClusterAdvertiseSettings parses the specified advertise settings -func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) { - if clusterAdvertise == "" { - return "", daemondiscovery.ErrDiscoveryDisabled - } - if clusterStore == "" { - return "", errors.New("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration") - } - - advertise, err := discovery.ParseAdvertise(clusterAdvertise) - if err != nil { - return "", fmt.Errorf("discovery advertise parsing failed (%v)", err) - } - return advertise, nil -} - -// GetConflictFreeLabels validates Labels for conflict -// In swarm the duplicates for labels are removed -// so we only take same values here, no conflict values -// If the key-value is the same we will only take the last label -func GetConflictFreeLabels(labels []string) ([]string, error) { - labelMap := map[string]string{} - for _, label := range labels { - stringSlice := strings.SplitN(label, "=", 2) - if len(stringSlice) > 1 { - // If there is a conflict we will return an error - if v, ok := labelMap[stringSlice[0]]; ok && v != stringSlice[1] { - return nil, fmt.Errorf("conflict labels for %s=%s and %s=%s", stringSlice[0], stringSlice[1], stringSlice[0], v) - } - labelMap[stringSlice[0]] = stringSlice[1] - } - } - - newLabels := []string{} - for k, v := range labelMap { - newLabels = append(newLabels, fmt.Sprintf("%s=%s", k, v)) - } - return newLabels, nil -} - -// ValidateReservedNamespaceLabels errors if the reserved namespaces com.docker.*, -// io.docker.*, org.dockerproject.* are used in a configured engine label. -// -// TODO: This is a separate function because we need to warn users first of the -// deprecation. When we return an error, this logic can be added to Validate -// or GetConflictFreeLabels instead of being here. -func ValidateReservedNamespaceLabels(labels []string) error { - for _, label := range labels { - lowered := strings.ToLower(label) - if strings.HasPrefix(lowered, "com.docker.") || strings.HasPrefix(lowered, "io.docker.") || - strings.HasPrefix(lowered, "org.dockerproject.") { - return fmt.Errorf( - "label %s not allowed: the namespaces com.docker.*, io.docker.*, and org.dockerproject.* are reserved for Docker's internal use", - label) - } - } - return nil -} - -// Reload reads the configuration in the host and reloads the daemon and server. -func Reload(configFile string, flags *pflag.FlagSet, reload func(*Config)) error { - logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile) - newConfig, err := getConflictFreeConfiguration(configFile, flags) - if err != nil { - if flags.Changed("config-file") || !os.IsNotExist(err) { - return fmt.Errorf("unable to configure the Docker daemon with file %s: %v", configFile, err) - } - newConfig = New() - } - - if err := Validate(newConfig); err != nil { - return fmt.Errorf("file configuration validation failed (%v)", err) - } - - // Check if duplicate label-keys with different values are found - newLabels, err := GetConflictFreeLabels(newConfig.Labels) - if err != nil { - return err - } - newConfig.Labels = newLabels - - reload(newConfig) - return nil -} - -// boolValue is an interface that boolean value flags implement -// to tell the command line how to make -name equivalent to -name=true. -type boolValue interface { - IsBoolFlag() bool -} - -// MergeDaemonConfigurations reads a configuration file, -// loads the file configuration in an isolated structure, -// and merges the configuration provided from flags on top -// if there are no conflicts. -func MergeDaemonConfigurations(flagsConfig *Config, flags *pflag.FlagSet, configFile string) (*Config, error) { - fileConfig, err := getConflictFreeConfiguration(configFile, flags) - if err != nil { - return nil, err - } - - if err := Validate(fileConfig); err != nil { - return nil, fmt.Errorf("configuration validation from file failed (%v)", err) - } - - // merge flags configuration on top of the file configuration - if err := mergo.Merge(fileConfig, flagsConfig); err != nil { - return nil, err - } - - // We need to validate again once both fileConfig and flagsConfig - // have been merged - if err := Validate(fileConfig); err != nil { - return nil, fmt.Errorf("merged configuration validation from file and command line flags failed (%v)", err) - } - - return fileConfig, nil -} - -// getConflictFreeConfiguration loads the configuration from a JSON file. -// It compares that configuration with the one provided by the flags, -// and returns an error if there are conflicts. -func getConflictFreeConfiguration(configFile string, flags *pflag.FlagSet) (*Config, error) { - b, err := ioutil.ReadFile(configFile) - if err != nil { - return nil, err - } - - var config Config - var reader io.Reader - if flags != nil { - var jsonConfig map[string]interface{} - reader = bytes.NewReader(b) - if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil { - return nil, err - } - - configSet := configValuesSet(jsonConfig) - - if err := findConfigurationConflicts(configSet, flags); err != nil { - return nil, err - } - - // Override flag values to make sure the values set in the config file with nullable values, like `false`, - // are not overridden by default truthy values from the flags that were not explicitly set. - // See https://github.com/docker/docker/issues/20289 for an example. - // - // TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers. - namedOptions := make(map[string]interface{}) - for key, value := range configSet { - f := flags.Lookup(key) - if f == nil { // ignore named flags that don't match - namedOptions[key] = value - continue - } - - if _, ok := f.Value.(boolValue); ok { - f.Value.Set(fmt.Sprintf("%v", value)) - } - } - if len(namedOptions) > 0 { - // set also default for mergeVal flags that are boolValue at the same time. - flags.VisitAll(func(f *pflag.Flag) { - if opt, named := f.Value.(opts.NamedOption); named { - v, set := namedOptions[opt.Name()] - _, boolean := f.Value.(boolValue) - if set && boolean { - f.Value.Set(fmt.Sprintf("%v", v)) - } - } - }) - } - - config.ValuesSet = configSet - } - - reader = bytes.NewReader(b) - if err := json.NewDecoder(reader).Decode(&config); err != nil { - return nil, err - } - - if config.RootDeprecated != "" { - logrus.Warn(`The "graph" config file option is deprecated. Please use "data-root" instead.`) - - if config.Root != "" { - return nil, fmt.Errorf(`cannot specify both "graph" and "data-root" config file options`) - } - - config.Root = config.RootDeprecated - } - - return &config, nil -} - -// configValuesSet returns the configuration values explicitly set in the file. -func configValuesSet(config map[string]interface{}) map[string]interface{} { - flatten := make(map[string]interface{}) - for k, v := range config { - if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] { - for km, vm := range m { - flatten[km] = vm - } - continue - } - - flatten[k] = v - } - return flatten -} - -// findConfigurationConflicts iterates over the provided flags searching for -// duplicated configurations and unknown keys. It returns an error with all the conflicts if -// it finds any. -func findConfigurationConflicts(config map[string]interface{}, flags *pflag.FlagSet) error { - // 1. Search keys from the file that we don't recognize as flags. - unknownKeys := make(map[string]interface{}) - for key, value := range config { - if flag := flags.Lookup(key); flag == nil { - unknownKeys[key] = value - } - } - - // 2. Discard values that implement NamedOption. - // Their configuration name differs from their flag name, like `labels` and `label`. - if len(unknownKeys) > 0 { - unknownNamedConflicts := func(f *pflag.Flag) { - if namedOption, ok := f.Value.(opts.NamedOption); ok { - if _, valid := unknownKeys[namedOption.Name()]; valid { - delete(unknownKeys, namedOption.Name()) - } - } - } - flags.VisitAll(unknownNamedConflicts) - } - - if len(unknownKeys) > 0 { - var unknown []string - for key := range unknownKeys { - unknown = append(unknown, key) - } - return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) - } - - var conflicts []string - printConflict := func(name string, flagValue, fileValue interface{}) string { - return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue) - } - - // 3. Search keys that are present as a flag and as a file option. - duplicatedConflicts := func(f *pflag.Flag) { - // search option name in the json configuration payload if the value is a named option - if namedOption, ok := f.Value.(opts.NamedOption); ok { - if optsValue, ok := config[namedOption.Name()]; ok { - conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue)) - } - } else { - // search flag name in the json configuration payload - for _, name := range []string{f.Name, f.Shorthand} { - if value, ok := config[name]; ok { - conflicts = append(conflicts, printConflict(name, f.Value.String(), value)) - break - } - } - } - } - - flags.Visit(duplicatedConflicts) - - if len(conflicts) > 0 { - return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) - } - return nil -} - -// Validate validates some specific configs. -// such as config.DNS, config.Labels, config.DNSSearch, -// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads. -func Validate(config *Config) error { - // validate DNS - for _, dns := range config.DNS { - if _, err := opts.ValidateIPAddress(dns); err != nil { - return err - } - } - - // validate DNSSearch - for _, dnsSearch := range config.DNSSearch { - if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil { - return err - } - } - - // validate Labels - for _, label := range config.Labels { - if _, err := opts.ValidateLabel(label); err != nil { - return err - } - } - // validate MaxConcurrentDownloads - if config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 { - return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads) - } - // validate MaxConcurrentUploads - if config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 { - return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads) - } - - // validate that "default" runtime is not reset - if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 { - if _, ok := runtimes[StockRuntimeName]; ok { - return fmt.Errorf("runtime name '%s' is reserved", StockRuntimeName) - } - } - - if _, err := ParseGenericResources(config.NodeGenericResources); err != nil { - return err - } - - if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" && defaultRuntime != StockRuntimeName { - runtimes := config.GetAllRuntimes() - if _, ok := runtimes[defaultRuntime]; !ok { - return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime) - } - } - - // validate platform-specific settings - return config.ValidatePlatformConfig() -} - -// ModifiedDiscoverySettings returns whether the discovery configuration has been modified or not. -func ModifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool { - if config.ClusterStore != backendType || config.ClusterAdvertise != advertise { - return true - } - - if (config.ClusterOpts == nil && clusterOpts == nil) || - (config.ClusterOpts == nil && len(clusterOpts) == 0) || - (len(config.ClusterOpts) == 0 && clusterOpts == nil) { - return false - } - - return !reflect.DeepEqual(config.ClusterOpts, clusterOpts) -} diff --git a/vendor/github.com/docker/docker/daemon/config/config_common_unix.go b/vendor/github.com/docker/docker/daemon/config/config_common_unix.go deleted file mode 100644 index 4bdf75886..000000000 --- a/vendor/github.com/docker/docker/daemon/config/config_common_unix.go +++ /dev/null @@ -1,71 +0,0 @@ -// +build linux freebsd - -package config // import "github.com/docker/docker/daemon/config" - -import ( - "net" - - "github.com/docker/docker/api/types" -) - -// CommonUnixConfig defines configuration of a docker daemon that is -// common across Unix platforms. -type CommonUnixConfig struct { - Runtimes map[string]types.Runtime `json:"runtimes,omitempty"` - DefaultRuntime string `json:"default-runtime,omitempty"` - DefaultInitBinary string `json:"default-init,omitempty"` -} - -type commonUnixBridgeConfig struct { - DefaultIP net.IP `json:"ip,omitempty"` - IP string `json:"bip,omitempty"` - DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` - DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` - InterContainerCommunication bool `json:"icc,omitempty"` -} - -// GetRuntime returns the runtime path and arguments for a given -// runtime name -func (conf *Config) GetRuntime(name string) *types.Runtime { - conf.Lock() - defer conf.Unlock() - if rt, ok := conf.Runtimes[name]; ok { - return &rt - } - return nil -} - -// GetDefaultRuntimeName returns the current default runtime -func (conf *Config) GetDefaultRuntimeName() string { - conf.Lock() - rt := conf.DefaultRuntime - conf.Unlock() - - return rt -} - -// GetAllRuntimes returns a copy of the runtimes map -func (conf *Config) GetAllRuntimes() map[string]types.Runtime { - conf.Lock() - rts := conf.Runtimes - conf.Unlock() - return rts -} - -// GetExecRoot returns the user configured Exec-root -func (conf *Config) GetExecRoot() string { - return conf.ExecRoot -} - -// GetInitPath returns the configured docker-init path -func (conf *Config) GetInitPath() string { - conf.Lock() - defer conf.Unlock() - if conf.InitPath != "" { - return conf.InitPath - } - if conf.DefaultInitBinary != "" { - return conf.DefaultInitBinary - } - return DefaultInitBinary -} diff --git a/vendor/github.com/docker/docker/daemon/config/config_unix.go b/vendor/github.com/docker/docker/daemon/config/config_unix.go deleted file mode 100644 index 1970928f9..000000000 --- a/vendor/github.com/docker/docker/daemon/config/config_unix.go +++ /dev/null @@ -1,87 +0,0 @@ -// +build linux freebsd - -package config // import "github.com/docker/docker/daemon/config" - -import ( - "fmt" - - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/opts" - "github.com/docker/go-units" -) - -const ( - // DefaultIpcMode is default for container's IpcMode, if not set otherwise - DefaultIpcMode = "shareable" // TODO: change to private -) - -// Config defines the configuration of a docker daemon. -// It includes json tags to deserialize configuration from a file -// using the same names that the flags in the command line uses. -type Config struct { - CommonConfig - - // These fields are common to all unix platforms. - CommonUnixConfig - // Fields below here are platform specific. - CgroupParent string `json:"cgroup-parent,omitempty"` - EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"` - RemappedRoot string `json:"userns-remap,omitempty"` - Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"` - CPURealtimePeriod int64 `json:"cpu-rt-period,omitempty"` - CPURealtimeRuntime int64 `json:"cpu-rt-runtime,omitempty"` - OOMScoreAdjust int `json:"oom-score-adjust,omitempty"` - Init bool `json:"init,omitempty"` - InitPath string `json:"init-path,omitempty"` - SeccompProfile string `json:"seccomp-profile,omitempty"` - ShmSize opts.MemBytes `json:"default-shm-size,omitempty"` - NoNewPrivileges bool `json:"no-new-privileges,omitempty"` - IpcMode string `json:"default-ipc-mode,omitempty"` -} - -// BridgeConfig stores all the bridge driver specific -// configuration. -type BridgeConfig struct { - commonBridgeConfig - - // These fields are common to all unix platforms. - commonUnixBridgeConfig - - // Fields below here are platform specific. - EnableIPv6 bool `json:"ipv6,omitempty"` - EnableIPTables bool `json:"iptables,omitempty"` - EnableIPForward bool `json:"ip-forward,omitempty"` - EnableIPMasq bool `json:"ip-masq,omitempty"` - EnableUserlandProxy bool `json:"userland-proxy,omitempty"` - UserlandProxyPath string `json:"userland-proxy-path,omitempty"` - FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` -} - -// IsSwarmCompatible defines if swarm mode can be enabled in this config -func (conf *Config) IsSwarmCompatible() error { - if conf.ClusterStore != "" || conf.ClusterAdvertise != "" { - return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") - } - if conf.LiveRestoreEnabled { - return fmt.Errorf("--live-restore daemon configuration is incompatible with swarm mode") - } - return nil -} - -func verifyDefaultIpcMode(mode string) error { - const hint = "Use \"shareable\" or \"private\"." - - dm := containertypes.IpcMode(mode) - if !dm.Valid() { - return fmt.Errorf("Default IPC mode setting (%v) is invalid. "+hint, dm) - } - if dm != "" && !dm.IsPrivate() && !dm.IsShareable() { - return fmt.Errorf("IPC mode \"%v\" is not supported as default value. "+hint, dm) - } - return nil -} - -// ValidatePlatformConfig checks if any platform-specific configuration settings are invalid. -func (conf *Config) ValidatePlatformConfig() error { - return verifyDefaultIpcMode(conf.IpcMode) -} diff --git a/vendor/github.com/docker/docker/daemon/config/config_windows.go b/vendor/github.com/docker/docker/daemon/config/config_windows.go deleted file mode 100644 index 0aa7d54bf..000000000 --- a/vendor/github.com/docker/docker/daemon/config/config_windows.go +++ /dev/null @@ -1,57 +0,0 @@ -package config // import "github.com/docker/docker/daemon/config" - -import ( - "github.com/docker/docker/api/types" -) - -// BridgeConfig stores all the bridge driver specific -// configuration. -type BridgeConfig struct { - commonBridgeConfig -} - -// Config defines the configuration of a docker daemon. -// These are the configuration settings that you pass -// to the docker daemon when you launch it with say: `dockerd -e windows` -type Config struct { - CommonConfig - - // Fields below here are platform specific. (There are none presently - // for the Windows daemon.) -} - -// GetRuntime returns the runtime path and arguments for a given -// runtime name -func (conf *Config) GetRuntime(name string) *types.Runtime { - return nil -} - -// GetInitPath returns the configure docker-init path -func (conf *Config) GetInitPath() string { - return "" -} - -// GetDefaultRuntimeName returns the current default runtime -func (conf *Config) GetDefaultRuntimeName() string { - return StockRuntimeName -} - -// GetAllRuntimes returns a copy of the runtimes map -func (conf *Config) GetAllRuntimes() map[string]types.Runtime { - return map[string]types.Runtime{} -} - -// GetExecRoot returns the user configured Exec-root -func (conf *Config) GetExecRoot() string { - return "" -} - -// IsSwarmCompatible defines if swarm mode can be enabled in this config -func (conf *Config) IsSwarmCompatible() error { - return nil -} - -// ValidatePlatformConfig checks if any platform-specific configuration settings are invalid. -func (conf *Config) ValidatePlatformConfig() error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/config/opts.go b/vendor/github.com/docker/docker/daemon/config/opts.go deleted file mode 100644 index 8b114929f..000000000 --- a/vendor/github.com/docker/docker/daemon/config/opts.go +++ /dev/null @@ -1,22 +0,0 @@ -package config // import "github.com/docker/docker/daemon/config" - -import ( - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/daemon/cluster/convert" - "github.com/docker/swarmkit/api/genericresource" -) - -// ParseGenericResources parses and validates the specified string as a list of GenericResource -func ParseGenericResources(value []string) ([]swarm.GenericResource, error) { - if len(value) == 0 { - return nil, nil - } - - resources, err := genericresource.Parse(value) - if err != nil { - return nil, err - } - - obj := convert.GenericResourcesFromGRPC(resources) - return obj, nil -} diff --git a/vendor/github.com/docker/docker/daemon/configs.go b/vendor/github.com/docker/docker/daemon/configs.go deleted file mode 100644 index 4fd0d2272..000000000 --- a/vendor/github.com/docker/docker/daemon/configs.go +++ /dev/null @@ -1,21 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - swarmtypes "github.com/docker/docker/api/types/swarm" - "github.com/sirupsen/logrus" -) - -// SetContainerConfigReferences sets the container config references needed -func (daemon *Daemon) SetContainerConfigReferences(name string, refs []*swarmtypes.ConfigReference) error { - if !configsSupported() && len(refs) > 0 { - logrus.Warn("configs are not supported on this platform") - return nil - } - - c, err := daemon.GetContainer(name) - if err != nil { - return err - } - c.ConfigReferences = append(c.ConfigReferences, refs...) - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/configs_linux.go b/vendor/github.com/docker/docker/daemon/configs_linux.go deleted file mode 100644 index ceb666337..000000000 --- a/vendor/github.com/docker/docker/daemon/configs_linux.go +++ /dev/null @@ -1,5 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -func configsSupported() bool { - return true -} diff --git a/vendor/github.com/docker/docker/daemon/configs_unsupported.go b/vendor/github.com/docker/docker/daemon/configs_unsupported.go deleted file mode 100644 index ae6f14f54..000000000 --- a/vendor/github.com/docker/docker/daemon/configs_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux,!windows - -package daemon // import "github.com/docker/docker/daemon" - -func configsSupported() bool { - return false -} diff --git a/vendor/github.com/docker/docker/daemon/configs_windows.go b/vendor/github.com/docker/docker/daemon/configs_windows.go deleted file mode 100644 index ceb666337..000000000 --- a/vendor/github.com/docker/docker/daemon/configs_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -func configsSupported() bool { - return true -} diff --git a/vendor/github.com/docker/docker/daemon/container.go b/vendor/github.com/docker/docker/daemon/container.go deleted file mode 100644 index c8e205397..000000000 --- a/vendor/github.com/docker/docker/daemon/container.go +++ /dev/null @@ -1,358 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "os" - "path" - "path/filepath" - "runtime" - "strings" - "time" - - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/network" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/truncindex" - "github.com/docker/docker/runconfig" - volumemounts "github.com/docker/docker/volume/mounts" - "github.com/docker/go-connections/nat" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" -) - -// GetContainer looks for a container using the provided information, which could be -// one of the following inputs from the caller: -// - A full container ID, which will exact match a container in daemon's list -// - A container name, which will only exact match via the GetByName() function -// - A partial container ID prefix (e.g. short ID) of any length that is -// unique enough to only return a single container object -// If none of these searches succeed, an error is returned -func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) { - if len(prefixOrName) == 0 { - return nil, errors.WithStack(invalidIdentifier(prefixOrName)) - } - - if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { - // prefix is an exact match to a full container ID - return containerByID, nil - } - - // GetByName will match only an exact name provided; we ignore errors - if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil { - // prefix is an exact match to a full container Name - return containerByName, nil - } - - containerID, indexError := daemon.idIndex.Get(prefixOrName) - if indexError != nil { - // When truncindex defines an error type, use that instead - if indexError == truncindex.ErrNotExist { - return nil, containerNotFound(prefixOrName) - } - return nil, errdefs.System(indexError) - } - return daemon.containers.Get(containerID), nil -} - -// checkContainer make sure the specified container validates the specified conditions -func (daemon *Daemon) checkContainer(container *container.Container, conditions ...func(*container.Container) error) error { - for _, condition := range conditions { - if err := condition(container); err != nil { - return err - } - } - return nil -} - -// Exists returns a true if a container of the specified ID or name exists, -// false otherwise. -func (daemon *Daemon) Exists(id string) bool { - c, _ := daemon.GetContainer(id) - return c != nil -} - -// IsPaused returns a bool indicating if the specified container is paused. -func (daemon *Daemon) IsPaused(id string) bool { - c, _ := daemon.GetContainer(id) - return c.State.IsPaused() -} - -func (daemon *Daemon) containerRoot(id string) string { - return filepath.Join(daemon.repository, id) -} - -// Load reads the contents of a container from disk -// This is typically done at startup. -func (daemon *Daemon) load(id string) (*container.Container, error) { - container := daemon.newBaseContainer(id) - - if err := container.FromDisk(); err != nil { - return nil, err - } - if err := label.ReserveLabel(container.ProcessLabel); err != nil { - return nil, err - } - - if container.ID != id { - return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) - } - - return container, nil -} - -// Register makes a container object usable by the daemon as -func (daemon *Daemon) Register(c *container.Container) error { - // Attach to stdout and stderr - if c.Config.OpenStdin { - c.StreamConfig.NewInputPipes() - } else { - c.StreamConfig.NewNopInputPipe() - } - - // once in the memory store it is visible to other goroutines - // grab a Lock until it has been checkpointed to avoid races - c.Lock() - defer c.Unlock() - - daemon.containers.Add(c.ID, c) - daemon.idIndex.Add(c.ID) - return c.CheckpointTo(daemon.containersReplica) -} - -func (daemon *Daemon) newContainer(name string, operatingSystem string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) { - var ( - id string - err error - noExplicitName = name == "" - ) - id, name, err = daemon.generateIDAndName(name) - if err != nil { - return nil, err - } - - if hostConfig.NetworkMode.IsHost() { - if config.Hostname == "" { - config.Hostname, err = os.Hostname() - if err != nil { - return nil, errdefs.System(err) - } - } - } else { - daemon.generateHostname(id, config) - } - entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) - - base := daemon.newBaseContainer(id) - base.Created = time.Now().UTC() - base.Managed = managed - base.Path = entrypoint - base.Args = args //FIXME: de-duplicate from config - base.Config = config - base.HostConfig = &containertypes.HostConfig{} - base.ImageID = imgID - base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} - base.Name = name - base.Driver = daemon.imageService.GraphDriverForOS(operatingSystem) - base.OS = operatingSystem - return base, err -} - -// GetByName returns a container given a name. -func (daemon *Daemon) GetByName(name string) (*container.Container, error) { - if len(name) == 0 { - return nil, fmt.Errorf("No container name supplied") - } - fullName := name - if name[0] != '/' { - fullName = "/" + name - } - id, err := daemon.containersReplica.Snapshot().GetID(fullName) - if err != nil { - return nil, fmt.Errorf("Could not find entity for %s", name) - } - e := daemon.containers.Get(id) - if e == nil { - return nil, fmt.Errorf("Could not find container for entity id %s", id) - } - return e, nil -} - -// newBaseContainer creates a new container with its initial -// configuration based on the root storage from the daemon. -func (daemon *Daemon) newBaseContainer(id string) *container.Container { - return container.NewBaseContainer(id, daemon.containerRoot(id)) -} - -func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint strslice.StrSlice, configCmd strslice.StrSlice) (string, []string) { - if len(configEntrypoint) != 0 { - return configEntrypoint[0], append(configEntrypoint[1:], configCmd...) - } - return configCmd[0], configCmd[1:] -} - -func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) { - // Generate default hostname - if config.Hostname == "" { - config.Hostname = id[:12] - } -} - -func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error { - container.Lock() - defer container.Unlock() - return daemon.parseSecurityOpt(container, hostConfig) -} - -func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error { - // Do not lock while creating volumes since this could be calling out to external plugins - // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin - if err := daemon.registerMountPoints(container, hostConfig); err != nil { - return err - } - - container.Lock() - defer container.Unlock() - - // Register any links from the host config before starting the container - if err := daemon.registerLinks(container, hostConfig); err != nil { - return err - } - - runconfig.SetDefaultNetModeIfBlank(hostConfig) - container.HostConfig = hostConfig - return container.CheckpointTo(daemon.containersReplica) -} - -// verifyContainerSettings performs validation of the hostconfig and config -// structures. -func (daemon *Daemon) verifyContainerSettings(platform string, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { - // First perform verification of settings common across all platforms. - if config != nil { - if config.WorkingDir != "" { - wdInvalid := false - if runtime.GOOS == platform { - config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics - if !system.IsAbs(config.WorkingDir) { - wdInvalid = true - } - } else { - // LCOW. Force Unix semantics - config.WorkingDir = strings.Replace(config.WorkingDir, string(os.PathSeparator), "/", -1) - if !path.IsAbs(config.WorkingDir) { - wdInvalid = true - } - } - if wdInvalid { - return nil, fmt.Errorf("the working directory '%s' is invalid, it needs to be an absolute path", config.WorkingDir) - } - } - - if len(config.StopSignal) > 0 { - _, err := signal.ParseSignal(config.StopSignal) - if err != nil { - return nil, err - } - } - - // Validate if Env contains empty variable or not (e.g., ``, `=foo`) - for _, env := range config.Env { - if _, err := opts.ValidateEnv(env); err != nil { - return nil, err - } - } - - // Validate the healthcheck params of Config - if config.Healthcheck != nil { - if config.Healthcheck.Interval != 0 && config.Healthcheck.Interval < containertypes.MinimumDuration { - return nil, errors.Errorf("Interval in Healthcheck cannot be less than %s", containertypes.MinimumDuration) - } - - if config.Healthcheck.Timeout != 0 && config.Healthcheck.Timeout < containertypes.MinimumDuration { - return nil, errors.Errorf("Timeout in Healthcheck cannot be less than %s", containertypes.MinimumDuration) - } - - if config.Healthcheck.Retries < 0 { - return nil, errors.Errorf("Retries in Healthcheck cannot be negative") - } - - if config.Healthcheck.StartPeriod != 0 && config.Healthcheck.StartPeriod < containertypes.MinimumDuration { - return nil, errors.Errorf("StartPeriod in Healthcheck cannot be less than %s", containertypes.MinimumDuration) - } - } - } - - if hostConfig == nil { - return nil, nil - } - - if hostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { - return nil, errors.Errorf("can't create 'AutoRemove' container with restart policy") - } - - // Validate mounts; check if host directories still exist - parser := volumemounts.NewParser(platform) - for _, cfg := range hostConfig.Mounts { - if err := parser.ValidateMountConfig(&cfg); err != nil { - return nil, err - } - } - - for _, extraHost := range hostConfig.ExtraHosts { - if _, err := opts.ValidateExtraHost(extraHost); err != nil { - return nil, err - } - } - - for port := range hostConfig.PortBindings { - _, portStr := nat.SplitProtoPort(string(port)) - if _, err := nat.ParsePort(portStr); err != nil { - return nil, errors.Errorf("invalid port specification: %q", portStr) - } - for _, pb := range hostConfig.PortBindings[port] { - _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) - if err != nil { - return nil, errors.Errorf("invalid port specification: %q", pb.HostPort) - } - } - } - - p := hostConfig.RestartPolicy - - switch p.Name { - case "always", "unless-stopped", "no": - if p.MaximumRetryCount != 0 { - return nil, errors.Errorf("maximum retry count cannot be used with restart policy '%s'", p.Name) - } - case "on-failure": - if p.MaximumRetryCount < 0 { - return nil, errors.Errorf("maximum retry count cannot be negative") - } - case "": - // do nothing - default: - return nil, errors.Errorf("invalid restart policy '%s'", p.Name) - } - - if !hostConfig.Isolation.IsValid() { - return nil, errors.Errorf("invalid isolation '%s' on %s", hostConfig.Isolation, runtime.GOOS) - } - - var ( - err error - warnings []string - ) - // Now do platform-specific verification - if warnings, err = verifyPlatformContainerSettings(daemon, hostConfig, config, update); err != nil { - return warnings, err - } - if hostConfig.NetworkMode.IsHost() && len(hostConfig.PortBindings) > 0 { - warnings = append(warnings, "Published ports are discarded when using host network mode") - } - return warnings, err -} diff --git a/vendor/github.com/docker/docker/daemon/container_linux.go b/vendor/github.com/docker/docker/daemon/container_linux.go deleted file mode 100644 index e6f5bf2cc..000000000 --- a/vendor/github.com/docker/docker/daemon/container_linux.go +++ /dev/null @@ -1,30 +0,0 @@ -//+build !windows - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/errdefs" -) - -func (daemon *Daemon) saveApparmorConfig(container *container.Container) error { - container.AppArmorProfile = "" //we don't care about the previous value. - - if !daemon.apparmorEnabled { - return nil // if apparmor is disabled there is nothing to do here. - } - - if err := parseSecurityOpt(container, container.HostConfig); err != nil { - return errdefs.InvalidParameter(err) - } - - if !container.HostConfig.Privileged { - if container.AppArmorProfile == "" { - container.AppArmorProfile = defaultApparmorProfile - } - - } else { - container.AppArmorProfile = "unconfined" - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/container_operations.go b/vendor/github.com/docker/docker/daemon/container_operations.go deleted file mode 100644 index df84f88f3..000000000 --- a/vendor/github.com/docker/docker/daemon/container_operations.go +++ /dev/null @@ -1,1150 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "errors" - "fmt" - "net" - "os" - "path" - "runtime" - "strings" - "time" - - containertypes "github.com/docker/docker/api/types/container" - networktypes "github.com/docker/docker/api/types/network" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/network" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/runconfig" - "github.com/docker/go-connections/nat" - "github.com/docker/libnetwork" - netconst "github.com/docker/libnetwork/datastore" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/options" - "github.com/docker/libnetwork/types" - "github.com/sirupsen/logrus" -) - -var ( - // ErrRootFSReadOnly is returned when a container - // rootfs is marked readonly. - ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") - getPortMapInfo = getSandboxPortMapInfo -) - -func (daemon *Daemon) getDNSSearchSettings(container *container.Container) []string { - if len(container.HostConfig.DNSSearch) > 0 { - return container.HostConfig.DNSSearch - } - - if len(daemon.configStore.DNSSearch) > 0 { - return daemon.configStore.DNSSearch - } - - return nil -} - -func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]libnetwork.SandboxOption, error) { - var ( - sboxOptions []libnetwork.SandboxOption - err error - dns []string - dnsOptions []string - bindings = make(nat.PortMap) - pbList []types.PortBinding - exposeList []types.TransportPort - ) - - defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() - sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname), - libnetwork.OptionDomainname(container.Config.Domainname)) - - if container.HostConfig.NetworkMode.IsHost() { - sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox()) - if len(container.HostConfig.ExtraHosts) == 0 { - sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts")) - } - if len(container.HostConfig.DNS) == 0 && len(daemon.configStore.DNS) == 0 && - len(container.HostConfig.DNSSearch) == 0 && len(daemon.configStore.DNSSearch) == 0 && - len(container.HostConfig.DNSOptions) == 0 && len(daemon.configStore.DNSOptions) == 0 { - sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf")) - } - } else { - // OptionUseExternalKey is mandatory for userns support. - // But optional for non-userns support - sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey()) - } - - if err = setupPathsAndSandboxOptions(container, &sboxOptions); err != nil { - return nil, err - } - - if len(container.HostConfig.DNS) > 0 { - dns = container.HostConfig.DNS - } else if len(daemon.configStore.DNS) > 0 { - dns = daemon.configStore.DNS - } - - for _, d := range dns { - sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d)) - } - - dnsSearch := daemon.getDNSSearchSettings(container) - - for _, ds := range dnsSearch { - sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds)) - } - - if len(container.HostConfig.DNSOptions) > 0 { - dnsOptions = container.HostConfig.DNSOptions - } else if len(daemon.configStore.DNSOptions) > 0 { - dnsOptions = daemon.configStore.DNSOptions - } - - for _, ds := range dnsOptions { - sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(ds)) - } - - if container.NetworkSettings.SecondaryIPAddresses != nil { - name := container.Config.Hostname - if container.Config.Domainname != "" { - name = name + "." + container.Config.Domainname - } - - for _, a := range container.NetworkSettings.SecondaryIPAddresses { - sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(name, a.Addr)) - } - } - - for _, extraHost := range container.HostConfig.ExtraHosts { - // allow IPv6 addresses in extra hosts; only split on first ":" - if _, err := opts.ValidateExtraHost(extraHost); err != nil { - return nil, err - } - parts := strings.SplitN(extraHost, ":", 2) - sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1])) - } - - if container.HostConfig.PortBindings != nil { - for p, b := range container.HostConfig.PortBindings { - bindings[p] = []nat.PortBinding{} - for _, bb := range b { - bindings[p] = append(bindings[p], nat.PortBinding{ - HostIP: bb.HostIP, - HostPort: bb.HostPort, - }) - } - } - } - - portSpecs := container.Config.ExposedPorts - ports := make([]nat.Port, len(portSpecs)) - var i int - for p := range portSpecs { - ports[i] = p - i++ - } - nat.SortPortMap(ports, bindings) - for _, port := range ports { - expose := types.TransportPort{} - expose.Proto = types.ParseProtocol(port.Proto()) - expose.Port = uint16(port.Int()) - exposeList = append(exposeList, expose) - - pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} - binding := bindings[port] - for i := 0; i < len(binding); i++ { - pbCopy := pb.GetCopy() - newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) - var portStart, portEnd int - if err == nil { - portStart, portEnd, err = newP.Range() - } - if err != nil { - return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) - } - pbCopy.HostPort = uint16(portStart) - pbCopy.HostPortEnd = uint16(portEnd) - pbCopy.HostIP = net.ParseIP(binding[i].HostIP) - pbList = append(pbList, pbCopy) - } - - if container.HostConfig.PublishAllPorts && len(binding) == 0 { - pbList = append(pbList, pb) - } - } - - sboxOptions = append(sboxOptions, - libnetwork.OptionPortMapping(pbList), - libnetwork.OptionExposedPorts(exposeList)) - - // Legacy Link feature is supported only for the default bridge network. - // return if this call to build join options is not for default bridge network - // Legacy Link is only supported by docker run --link - bridgeSettings, ok := container.NetworkSettings.Networks[defaultNetName] - if !ok || bridgeSettings.EndpointSettings == nil { - return sboxOptions, nil - } - - if bridgeSettings.EndpointID == "" { - return sboxOptions, nil - } - - var ( - childEndpoints, parentEndpoints []string - cEndpointID string - ) - - children := daemon.children(container) - for linkAlias, child := range children { - if !isLinkable(child) { - return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name) - } - _, alias := path.Split(linkAlias) - // allow access to the linked container via the alias, real name, and container hostname - aliasList := alias + " " + child.Config.Hostname - // only add the name if alias isn't equal to the name - if alias != child.Name[1:] { - aliasList = aliasList + " " + child.Name[1:] - } - sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, child.NetworkSettings.Networks[defaultNetName].IPAddress)) - cEndpointID = child.NetworkSettings.Networks[defaultNetName].EndpointID - if cEndpointID != "" { - childEndpoints = append(childEndpoints, cEndpointID) - } - } - - for alias, parent := range daemon.parents(container) { - if daemon.configStore.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() { - continue - } - - _, alias = path.Split(alias) - logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress) - sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate( - parent.ID, - alias, - bridgeSettings.IPAddress, - )) - if cEndpointID != "" { - parentEndpoints = append(parentEndpoints, cEndpointID) - } - } - - linkOptions := options.Generic{ - netlabel.GenericData: options.Generic{ - "ParentEndpoints": parentEndpoints, - "ChildEndpoints": childEndpoints, - }, - } - - sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions)) - return sboxOptions, nil -} - -func (daemon *Daemon) updateNetworkSettings(container *container.Container, n libnetwork.Network, endpointConfig *networktypes.EndpointSettings) error { - if container.NetworkSettings == nil { - container.NetworkSettings = &network.Settings{Networks: make(map[string]*network.EndpointSettings)} - } - - if !container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { - return runconfig.ErrConflictHostNetwork - } - - for s, v := range container.NetworkSettings.Networks { - sn, err := daemon.FindNetwork(getNetworkID(s, v.EndpointSettings)) - if err != nil { - continue - } - - if sn.Name() == n.Name() { - // If the network scope is swarm, then this - // is an attachable network, which may not - // be locally available previously. - // So always update. - if n.Info().Scope() == netconst.SwarmScope { - continue - } - // Avoid duplicate config - return nil - } - if !containertypes.NetworkMode(sn.Type()).IsPrivate() || - !containertypes.NetworkMode(n.Type()).IsPrivate() { - return runconfig.ErrConflictSharedNetwork - } - if containertypes.NetworkMode(sn.Name()).IsNone() || - containertypes.NetworkMode(n.Name()).IsNone() { - return runconfig.ErrConflictNoNetwork - } - } - - container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ - EndpointSettings: endpointConfig, - } - - return nil -} - -func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Container, n libnetwork.Network, ep libnetwork.Endpoint) error { - if err := buildEndpointInfo(container.NetworkSettings, n, ep); err != nil { - return err - } - - if container.HostConfig.NetworkMode == runconfig.DefaultDaemonNetworkMode() { - container.NetworkSettings.Bridge = daemon.configStore.BridgeConfig.Iface - } - - return nil -} - -// UpdateNetwork is used to update the container's network (e.g. when linked containers -// get removed/unlinked). -func (daemon *Daemon) updateNetwork(container *container.Container) error { - var ( - start = time.Now() - ctrl = daemon.netController - sid = container.NetworkSettings.SandboxID - ) - - sb, err := ctrl.SandboxByID(sid) - if err != nil { - return fmt.Errorf("error locating sandbox id %s: %v", sid, err) - } - - // Find if container is connected to the default bridge network - var n libnetwork.Network - for name, v := range container.NetworkSettings.Networks { - sn, err := daemon.FindNetwork(getNetworkID(name, v.EndpointSettings)) - if err != nil { - continue - } - if sn.Name() == runconfig.DefaultDaemonNetworkMode().NetworkName() { - n = sn - break - } - } - - if n == nil { - // Not connected to the default bridge network; Nothing to do - return nil - } - - options, err := daemon.buildSandboxOptions(container) - if err != nil { - return fmt.Errorf("Update network failed: %v", err) - } - - if err := sb.Refresh(options...); err != nil { - return fmt.Errorf("Update network failed: Failure in refresh sandbox %s: %v", sid, err) - } - - networkActions.WithValues("update").UpdateSince(start) - - return nil -} - -func (daemon *Daemon) findAndAttachNetwork(container *container.Container, idOrName string, epConfig *networktypes.EndpointSettings) (libnetwork.Network, *networktypes.NetworkingConfig, error) { - id := getNetworkID(idOrName, epConfig) - - n, err := daemon.FindNetwork(id) - if err != nil { - // We should always be able to find the network for a - // managed container. - if container.Managed { - return nil, nil, err - } - } - - // If we found a network and if it is not dynamically created - // we should never attempt to attach to that network here. - if n != nil { - if container.Managed || !n.Info().Dynamic() { - return n, nil, nil - } - } - - var addresses []string - if epConfig != nil && epConfig.IPAMConfig != nil { - if epConfig.IPAMConfig.IPv4Address != "" { - addresses = append(addresses, epConfig.IPAMConfig.IPv4Address) - } - - if epConfig.IPAMConfig.IPv6Address != "" { - addresses = append(addresses, epConfig.IPAMConfig.IPv6Address) - } - } - - var ( - config *networktypes.NetworkingConfig - retryCount int - ) - - if n == nil && daemon.attachableNetworkLock != nil { - daemon.attachableNetworkLock.Lock(id) - defer daemon.attachableNetworkLock.Unlock(id) - } - - for { - // In all other cases, attempt to attach to the network to - // trigger attachment in the swarm cluster manager. - if daemon.clusterProvider != nil { - var err error - config, err = daemon.clusterProvider.AttachNetwork(id, container.ID, addresses) - if err != nil { - return nil, nil, err - } - } - - n, err = daemon.FindNetwork(id) - if err != nil { - if daemon.clusterProvider != nil { - if err := daemon.clusterProvider.DetachNetwork(id, container.ID); err != nil { - logrus.Warnf("Could not rollback attachment for container %s to network %s: %v", container.ID, idOrName, err) - } - } - - // Retry network attach again if we failed to - // find the network after successful - // attachment because the only reason that - // would happen is if some other container - // attached to the swarm scope network went down - // and removed the network while we were in - // the process of attaching. - if config != nil { - if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok { - if retryCount >= 5 { - return nil, nil, fmt.Errorf("could not find network %s after successful attachment", idOrName) - } - retryCount++ - continue - } - } - - return nil, nil, err - } - - break - } - - // This container has attachment to a swarm scope - // network. Update the container network settings accordingly. - container.NetworkSettings.HasSwarmEndpoint = true - return n, config, nil -} - -// updateContainerNetworkSettings updates the network settings -func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) { - var n libnetwork.Network - - mode := container.HostConfig.NetworkMode - if container.Config.NetworkDisabled || mode.IsContainer() { - return - } - - networkName := mode.NetworkName() - if mode.IsDefault() { - networkName = daemon.netController.Config().Daemon.DefaultNetwork - } - - if mode.IsUserDefined() { - var err error - - n, err = daemon.FindNetwork(networkName) - if err == nil { - networkName = n.Name() - } - } - - if container.NetworkSettings == nil { - container.NetworkSettings = &network.Settings{} - } - - if len(endpointsConfig) > 0 { - if container.NetworkSettings.Networks == nil { - container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) - } - - for name, epConfig := range endpointsConfig { - container.NetworkSettings.Networks[name] = &network.EndpointSettings{ - EndpointSettings: epConfig, - } - } - } - - if container.NetworkSettings.Networks == nil { - container.NetworkSettings.Networks = make(map[string]*network.EndpointSettings) - container.NetworkSettings.Networks[networkName] = &network.EndpointSettings{ - EndpointSettings: &networktypes.EndpointSettings{}, - } - } - - // Convert any settings added by client in default name to - // engine's default network name key - if mode.IsDefault() { - if nConf, ok := container.NetworkSettings.Networks[mode.NetworkName()]; ok { - container.NetworkSettings.Networks[networkName] = nConf - delete(container.NetworkSettings.Networks, mode.NetworkName()) - } - } - - if !mode.IsUserDefined() { - return - } - // Make sure to internally store the per network endpoint config by network name - if _, ok := container.NetworkSettings.Networks[networkName]; ok { - return - } - - if n != nil { - if nwConfig, ok := container.NetworkSettings.Networks[n.ID()]; ok { - container.NetworkSettings.Networks[networkName] = nwConfig - delete(container.NetworkSettings.Networks, n.ID()) - return - } - } -} - -func (daemon *Daemon) allocateNetwork(container *container.Container) error { - start := time.Now() - controller := daemon.netController - - if daemon.netController == nil { - return nil - } - - // Cleanup any stale sandbox left over due to ungraceful daemon shutdown - if err := controller.SandboxDestroy(container.ID); err != nil { - logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID) - } - - if container.Config.NetworkDisabled || container.HostConfig.NetworkMode.IsContainer() { - return nil - } - - updateSettings := false - - if len(container.NetworkSettings.Networks) == 0 { - daemon.updateContainerNetworkSettings(container, nil) - updateSettings = true - } - - // always connect default network first since only default - // network mode support link and we need do some setting - // on sandbox initialize for link, but the sandbox only be initialized - // on first network connecting. - defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() - if nConf, ok := container.NetworkSettings.Networks[defaultNetName]; ok { - cleanOperationalData(nConf) - if err := daemon.connectToNetwork(container, defaultNetName, nConf.EndpointSettings, updateSettings); err != nil { - return err - } - - } - - // the intermediate map is necessary because "connectToNetwork" modifies "container.NetworkSettings.Networks" - networks := make(map[string]*network.EndpointSettings) - for n, epConf := range container.NetworkSettings.Networks { - if n == defaultNetName { - continue - } - - networks[n] = epConf - } - - for netName, epConf := range networks { - cleanOperationalData(epConf) - if err := daemon.connectToNetwork(container, netName, epConf.EndpointSettings, updateSettings); err != nil { - return err - } - } - - // If the container is not to be connected to any network, - // create its network sandbox now if not present - if len(networks) == 0 { - if nil == daemon.getNetworkSandbox(container) { - options, err := daemon.buildSandboxOptions(container) - if err != nil { - return err - } - sb, err := daemon.netController.NewSandbox(container.ID, options...) - if err != nil { - return err - } - updateSandboxNetworkSettings(container, sb) - defer func() { - if err != nil { - sb.Delete() - } - }() - } - - } - - if _, err := container.WriteHostConfig(); err != nil { - return err - } - networkActions.WithValues("allocate").UpdateSince(start) - return nil -} - -func (daemon *Daemon) getNetworkSandbox(container *container.Container) libnetwork.Sandbox { - var sb libnetwork.Sandbox - daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool { - if s.ContainerID() == container.ID { - sb = s - return true - } - return false - }) - return sb -} - -// hasUserDefinedIPAddress returns whether the passed endpoint configuration contains IP address configuration -func hasUserDefinedIPAddress(epConfig *networktypes.EndpointSettings) bool { - return epConfig != nil && epConfig.IPAMConfig != nil && (len(epConfig.IPAMConfig.IPv4Address) > 0 || len(epConfig.IPAMConfig.IPv6Address) > 0) -} - -// User specified ip address is acceptable only for networks with user specified subnets. -func validateNetworkingConfig(n libnetwork.Network, epConfig *networktypes.EndpointSettings) error { - if n == nil || epConfig == nil { - return nil - } - if !hasUserDefinedIPAddress(epConfig) { - return nil - } - _, _, nwIPv4Configs, nwIPv6Configs := n.Info().IpamConfig() - for _, s := range []struct { - ipConfigured bool - subnetConfigs []*libnetwork.IpamConf - }{ - { - ipConfigured: len(epConfig.IPAMConfig.IPv4Address) > 0, - subnetConfigs: nwIPv4Configs, - }, - { - ipConfigured: len(epConfig.IPAMConfig.IPv6Address) > 0, - subnetConfigs: nwIPv6Configs, - }, - } { - if s.ipConfigured { - foundSubnet := false - for _, cfg := range s.subnetConfigs { - if len(cfg.PreferredPool) > 0 { - foundSubnet = true - break - } - } - if !foundSubnet { - return runconfig.ErrUnsupportedNetworkNoSubnetAndIP - } - } - } - - return nil -} - -// cleanOperationalData resets the operational data from the passed endpoint settings -func cleanOperationalData(es *network.EndpointSettings) { - es.EndpointID = "" - es.Gateway = "" - es.IPAddress = "" - es.IPPrefixLen = 0 - es.IPv6Gateway = "" - es.GlobalIPv6Address = "" - es.GlobalIPv6PrefixLen = 0 - es.MacAddress = "" - if es.IPAMOperational { - es.IPAMConfig = nil - } -} - -func (daemon *Daemon) updateNetworkConfig(container *container.Container, n libnetwork.Network, endpointConfig *networktypes.EndpointSettings, updateSettings bool) error { - - if !containertypes.NetworkMode(n.Name()).IsUserDefined() { - if hasUserDefinedIPAddress(endpointConfig) && !enableIPOnPredefinedNetwork() { - return runconfig.ErrUnsupportedNetworkAndIP - } - if endpointConfig != nil && len(endpointConfig.Aliases) > 0 && !container.EnableServiceDiscoveryOnDefaultNetwork() { - return runconfig.ErrUnsupportedNetworkAndAlias - } - } else { - addShortID := true - shortID := stringid.TruncateID(container.ID) - for _, alias := range endpointConfig.Aliases { - if alias == shortID { - addShortID = false - break - } - } - if addShortID { - endpointConfig.Aliases = append(endpointConfig.Aliases, shortID) - } - } - - if err := validateNetworkingConfig(n, endpointConfig); err != nil { - return err - } - - if updateSettings { - if err := daemon.updateNetworkSettings(container, n, endpointConfig); err != nil { - return err - } - } - return nil -} - -func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) { - start := time.Now() - if container.HostConfig.NetworkMode.IsContainer() { - return runconfig.ErrConflictSharedNetwork - } - if containertypes.NetworkMode(idOrName).IsBridge() && - daemon.configStore.DisableBridge { - container.Config.NetworkDisabled = true - return nil - } - if endpointConfig == nil { - endpointConfig = &networktypes.EndpointSettings{} - } - - n, config, err := daemon.findAndAttachNetwork(container, idOrName, endpointConfig) - if err != nil { - return err - } - if n == nil { - return nil - } - - var operIPAM bool - if config != nil { - if epConfig, ok := config.EndpointsConfig[n.Name()]; ok { - if endpointConfig.IPAMConfig == nil || - (endpointConfig.IPAMConfig.IPv4Address == "" && - endpointConfig.IPAMConfig.IPv6Address == "" && - len(endpointConfig.IPAMConfig.LinkLocalIPs) == 0) { - operIPAM = true - } - - // copy IPAMConfig and NetworkID from epConfig via AttachNetwork - endpointConfig.IPAMConfig = epConfig.IPAMConfig - endpointConfig.NetworkID = epConfig.NetworkID - } - } - - err = daemon.updateNetworkConfig(container, n, endpointConfig, updateSettings) - if err != nil { - return err - } - - controller := daemon.netController - sb := daemon.getNetworkSandbox(container) - createOptions, err := buildCreateEndpointOptions(container, n, endpointConfig, sb, daemon.configStore.DNS) - if err != nil { - return err - } - - endpointName := strings.TrimPrefix(container.Name, "/") - ep, err := n.CreateEndpoint(endpointName, createOptions...) - if err != nil { - return err - } - defer func() { - if err != nil { - if e := ep.Delete(false); e != nil { - logrus.Warnf("Could not rollback container connection to network %s", idOrName) - } - } - }() - container.NetworkSettings.Networks[n.Name()] = &network.EndpointSettings{ - EndpointSettings: endpointConfig, - IPAMOperational: operIPAM, - } - if _, ok := container.NetworkSettings.Networks[n.ID()]; ok { - delete(container.NetworkSettings.Networks, n.ID()) - } - - if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil { - return err - } - - if sb == nil { - options, err := daemon.buildSandboxOptions(container) - if err != nil { - return err - } - sb, err = controller.NewSandbox(container.ID, options...) - if err != nil { - return err - } - - updateSandboxNetworkSettings(container, sb) - } - - joinOptions, err := buildJoinOptions(container.NetworkSettings, n) - if err != nil { - return err - } - - if err := ep.Join(sb, joinOptions...); err != nil { - return err - } - - if !container.Managed { - // add container name/alias to DNS - if err := daemon.ActivateContainerServiceBinding(container.Name); err != nil { - return fmt.Errorf("Activate container service binding for %s failed: %v", container.Name, err) - } - } - - if err := updateJoinInfo(container.NetworkSettings, n, ep); err != nil { - return fmt.Errorf("Updating join info failed: %v", err) - } - - container.NetworkSettings.Ports = getPortMapInfo(sb) - - daemon.LogNetworkEventWithAttributes(n, "connect", map[string]string{"container": container.ID}) - networkActions.WithValues("connect").UpdateSince(start) - return nil -} - -func updateJoinInfo(networkSettings *network.Settings, n libnetwork.Network, ep libnetwork.Endpoint) error { // nolint: interfacer - if ep == nil { - return errors.New("invalid enppoint whhile building portmap info") - } - - if networkSettings == nil { - return errors.New("invalid network settings while building port map info") - } - - if len(networkSettings.Ports) == 0 { - pm, err := getEndpointPortMapInfo(ep) - if err != nil { - return err - } - networkSettings.Ports = pm - } - - epInfo := ep.Info() - if epInfo == nil { - // It is not an error to get an empty endpoint info - return nil - } - if epInfo.Gateway() != nil { - networkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() - } - if epInfo.GatewayIPv6().To16() != nil { - networkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() - } - return nil -} - -// ForceEndpointDelete deletes an endpoint from a network forcefully -func (daemon *Daemon) ForceEndpointDelete(name string, networkName string) error { - n, err := daemon.FindNetwork(networkName) - if err != nil { - return err - } - - ep, err := n.EndpointByName(name) - if err != nil { - return err - } - return ep.Delete(true) -} - -func (daemon *Daemon) disconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { - var ( - ep libnetwork.Endpoint - sbox libnetwork.Sandbox - ) - - s := func(current libnetwork.Endpoint) bool { - epInfo := current.Info() - if epInfo == nil { - return false - } - if sb := epInfo.Sandbox(); sb != nil { - if sb.ContainerID() == container.ID { - ep = current - sbox = sb - return true - } - } - return false - } - n.WalkEndpoints(s) - - if ep == nil && force { - epName := strings.TrimPrefix(container.Name, "/") - ep, err := n.EndpointByName(epName) - if err != nil { - return err - } - return ep.Delete(force) - } - - if ep == nil { - return fmt.Errorf("container %s is not connected to network %s", container.ID, n.Name()) - } - - if err := ep.Leave(sbox); err != nil { - return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err) - } - - container.NetworkSettings.Ports = getPortMapInfo(sbox) - - if err := ep.Delete(false); err != nil { - return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err) - } - - delete(container.NetworkSettings.Networks, n.Name()) - - daemon.tryDetachContainerFromClusterNetwork(n, container) - - return nil -} - -func (daemon *Daemon) tryDetachContainerFromClusterNetwork(network libnetwork.Network, container *container.Container) { - if daemon.clusterProvider != nil && network.Info().Dynamic() && !container.Managed { - if err := daemon.clusterProvider.DetachNetwork(network.Name(), container.ID); err != nil { - logrus.Warnf("error detaching from network %s: %v", network.Name(), err) - if err := daemon.clusterProvider.DetachNetwork(network.ID(), container.ID); err != nil { - logrus.Warnf("error detaching from network %s: %v", network.ID(), err) - } - } - } - attributes := map[string]string{ - "container": container.ID, - } - daemon.LogNetworkEventWithAttributes(network, "disconnect", attributes) -} - -func (daemon *Daemon) initializeNetworking(container *container.Container) error { - var err error - - if container.HostConfig.NetworkMode.IsContainer() { - // we need to get the hosts files from the container to join - nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer()) - if err != nil { - return err - } - - err = daemon.initializeNetworkingPaths(container, nc) - if err != nil { - return err - } - - container.Config.Hostname = nc.Config.Hostname - container.Config.Domainname = nc.Config.Domainname - return nil - } - - if container.HostConfig.NetworkMode.IsHost() { - if container.Config.Hostname == "" { - container.Config.Hostname, err = os.Hostname() - if err != nil { - return err - } - } - } - - if err := daemon.allocateNetwork(container); err != nil { - return err - } - - return container.BuildHostnameFile() -} - -func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) { - nc, err := daemon.GetContainer(connectedContainerID) - if err != nil { - return nil, err - } - if containerID == nc.ID { - return nil, fmt.Errorf("cannot join own network") - } - if !nc.IsRunning() { - err := fmt.Errorf("cannot join network of a non running container: %s", connectedContainerID) - return nil, errdefs.Conflict(err) - } - if nc.IsRestarting() { - return nil, errContainerIsRestarting(connectedContainerID) - } - return nc, nil -} - -func (daemon *Daemon) releaseNetwork(container *container.Container) { - start := time.Now() - if daemon.netController == nil { - return - } - if container.HostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { - return - } - - sid := container.NetworkSettings.SandboxID - settings := container.NetworkSettings.Networks - container.NetworkSettings.Ports = nil - - if sid == "" { - return - } - - var networks []libnetwork.Network - for n, epSettings := range settings { - if nw, err := daemon.FindNetwork(getNetworkID(n, epSettings.EndpointSettings)); err == nil { - networks = append(networks, nw) - } - - if epSettings.EndpointSettings == nil { - continue - } - - cleanOperationalData(epSettings) - } - - sb, err := daemon.netController.SandboxByID(sid) - if err != nil { - logrus.Warnf("error locating sandbox id %s: %v", sid, err) - return - } - - if err := sb.Delete(); err != nil { - logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err) - } - - for _, nw := range networks { - daemon.tryDetachContainerFromClusterNetwork(nw, container) - } - networkActions.WithValues("release").UpdateSince(start) -} - -func errRemovalContainer(containerID string) error { - return fmt.Errorf("Container %s is marked for removal and cannot be connected or disconnected to the network", containerID) -} - -// ConnectToNetwork connects a container to a network -func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { - if endpointConfig == nil { - endpointConfig = &networktypes.EndpointSettings{} - } - container.Lock() - defer container.Unlock() - - if !container.Running { - if container.RemovalInProgress || container.Dead { - return errRemovalContainer(container.ID) - } - - n, err := daemon.FindNetwork(idOrName) - if err == nil && n != nil { - if err := daemon.updateNetworkConfig(container, n, endpointConfig, true); err != nil { - return err - } - } else { - container.NetworkSettings.Networks[idOrName] = &network.EndpointSettings{ - EndpointSettings: endpointConfig, - } - } - } else if !daemon.isNetworkHotPluggable() { - return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") - } else { - if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil { - return err - } - } - - return container.CheckpointTo(daemon.containersReplica) -} - -// DisconnectFromNetwork disconnects container from network n. -func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, networkName string, force bool) error { - n, err := daemon.FindNetwork(networkName) - container.Lock() - defer container.Unlock() - - if !container.Running || (err != nil && force) { - if container.RemovalInProgress || container.Dead { - return errRemovalContainer(container.ID) - } - // In case networkName is resolved we will use n.Name() - // this will cover the case where network id is passed. - if n != nil { - networkName = n.Name() - } - if _, ok := container.NetworkSettings.Networks[networkName]; !ok { - return fmt.Errorf("container %s is not connected to the network %s", container.ID, networkName) - } - delete(container.NetworkSettings.Networks, networkName) - } else if err == nil && !daemon.isNetworkHotPluggable() { - return fmt.Errorf(runtime.GOOS + " does not support connecting a running container to a network") - } else if err == nil { - if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { - return runconfig.ErrConflictHostNetwork - } - - if err := daemon.disconnectFromNetwork(container, n, false); err != nil { - return err - } - } else { - return err - } - - if err := container.CheckpointTo(daemon.containersReplica); err != nil { - return err - } - - if n != nil { - daemon.LogNetworkEventWithAttributes(n, "disconnect", map[string]string{ - "container": container.ID, - }) - } - - return nil -} - -// ActivateContainerServiceBinding puts this container into load balancer active rotation and DNS response -func (daemon *Daemon) ActivateContainerServiceBinding(containerName string) error { - container, err := daemon.GetContainer(containerName) - if err != nil { - return err - } - sb := daemon.getNetworkSandbox(container) - if sb == nil { - return fmt.Errorf("network sandbox does not exist for container %s", containerName) - } - return sb.EnableService() -} - -// DeactivateContainerServiceBinding removes this container from load balancer active rotation, and DNS response -func (daemon *Daemon) DeactivateContainerServiceBinding(containerName string) error { - container, err := daemon.GetContainer(containerName) - if err != nil { - return err - } - sb := daemon.getNetworkSandbox(container) - if sb == nil { - // If the network sandbox is not found, then there is nothing to deactivate - logrus.Debugf("Could not find network sandbox for container %s on service binding deactivation request", containerName) - return nil - } - return sb.DisableService() -} - -func getNetworkID(name string, endpointSettings *networktypes.EndpointSettings) string { - // We only want to prefer NetworkID for user defined networks. - // For systems like bridge, none, etc. the name is preferred (otherwise restart may cause issues) - if containertypes.NetworkMode(name).IsUserDefined() && endpointSettings != nil && endpointSettings.NetworkID != "" { - return endpointSettings.NetworkID - } - return name -} - -// updateSandboxNetworkSettings updates the sandbox ID and Key. -func updateSandboxNetworkSettings(c *container.Container, sb libnetwork.Sandbox) error { - c.NetworkSettings.SandboxID = sb.ID() - c.NetworkSettings.SandboxKey = sb.Key() - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/container_operations_unix.go b/vendor/github.com/docker/docker/daemon/container_operations_unix.go deleted file mode 100644 index bc7ee4523..000000000 --- a/vendor/github.com/docker/docker/daemon/container_operations_unix.go +++ /dev/null @@ -1,403 +0,0 @@ -// +build linux freebsd - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "time" - - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/links" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/runconfig" - "github.com/docker/libnetwork" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { - var env []string - children := daemon.children(container) - - bridgeSettings := container.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] - if bridgeSettings == nil || bridgeSettings.EndpointSettings == nil { - return nil, nil - } - - for linkAlias, child := range children { - if !child.IsRunning() { - return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) - } - - childBridgeSettings := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] - if childBridgeSettings == nil || childBridgeSettings.EndpointSettings == nil { - return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID) - } - - link := links.NewLink( - bridgeSettings.IPAddress, - childBridgeSettings.IPAddress, - linkAlias, - child.Config.Env, - child.Config.ExposedPorts, - ) - - env = append(env, link.ToEnv()...) - } - - return env, nil -} - -func (daemon *Daemon) getIpcContainer(id string) (*container.Container, error) { - errMsg := "can't join IPC of container " + id - // Check the container exists - container, err := daemon.GetContainer(id) - if err != nil { - return nil, errors.Wrap(err, errMsg) - } - // Check the container is running and not restarting - if err := daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting); err != nil { - return nil, errors.Wrap(err, errMsg) - } - // Check the container ipc is shareable - if st, err := os.Stat(container.ShmPath); err != nil || !st.IsDir() { - if err == nil || os.IsNotExist(err) { - return nil, errors.New(errMsg + ": non-shareable IPC") - } - // stat() failed? - return nil, errors.Wrap(err, errMsg+": unexpected error from stat "+container.ShmPath) - } - - return container, nil -} - -func (daemon *Daemon) getPidContainer(container *container.Container) (*container.Container, error) { - containerID := container.HostConfig.PidMode.Container() - container, err := daemon.GetContainer(containerID) - if err != nil { - return nil, errors.Wrapf(err, "cannot join PID of a non running container: %s", containerID) - } - return container, daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting) -} - -func containerIsRunning(c *container.Container) error { - if !c.IsRunning() { - return errdefs.Conflict(errors.Errorf("container %s is not running", c.ID)) - } - return nil -} - -func containerIsNotRestarting(c *container.Container) error { - if c.IsRestarting() { - return errContainerIsRestarting(c.ID) - } - return nil -} - -func (daemon *Daemon) setupIpcDirs(c *container.Container) error { - ipcMode := c.HostConfig.IpcMode - - switch { - case ipcMode.IsContainer(): - ic, err := daemon.getIpcContainer(ipcMode.Container()) - if err != nil { - return err - } - c.ShmPath = ic.ShmPath - - case ipcMode.IsHost(): - if _, err := os.Stat("/dev/shm"); err != nil { - return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") - } - c.ShmPath = "/dev/shm" - - case ipcMode.IsPrivate(), ipcMode.IsNone(): - // c.ShmPath will/should not be used, so make it empty. - // Container's /dev/shm mount comes from OCI spec. - c.ShmPath = "" - - case ipcMode.IsEmpty(): - // A container was created by an older version of the daemon. - // The default behavior used to be what is now called "shareable". - fallthrough - - case ipcMode.IsShareable(): - rootIDs := daemon.idMappings.RootPair() - if !c.HasMountFor("/dev/shm") { - shmPath, err := c.ShmResourcePath() - if err != nil { - return err - } - - if err := idtools.MkdirAllAndChown(shmPath, 0700, rootIDs); err != nil { - return err - } - - shmproperty := "mode=1777,size=" + strconv.FormatInt(c.HostConfig.ShmSize, 10) - if err := unix.Mount("shm", shmPath, "tmpfs", uintptr(unix.MS_NOEXEC|unix.MS_NOSUID|unix.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { - return fmt.Errorf("mounting shm tmpfs: %s", err) - } - if err := os.Chown(shmPath, rootIDs.UID, rootIDs.GID); err != nil { - return err - } - c.ShmPath = shmPath - } - - default: - return fmt.Errorf("invalid IPC mode: %v", ipcMode) - } - - return nil -} - -func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { - if len(c.SecretReferences) == 0 && len(c.ConfigReferences) == 0 { - return nil - } - - if err := daemon.createSecretsDir(c); err != nil { - return err - } - defer func() { - if setupErr != nil { - daemon.cleanupSecretDir(c) - } - }() - - if c.DependencyStore == nil { - return fmt.Errorf("secret store is not initialized") - } - - // retrieve possible remapped range start for root UID, GID - rootIDs := daemon.idMappings.RootPair() - - for _, s := range c.SecretReferences { - // TODO (ehazlett): use type switch when more are supported - if s.File == nil { - logrus.Error("secret target type is not a file target") - continue - } - - // secrets are created in the SecretMountPath on the host, at a - // single level - fPath, err := c.SecretFilePath(*s) - if err != nil { - return errors.Wrap(err, "error getting secret file path") - } - if err := idtools.MkdirAllAndChown(filepath.Dir(fPath), 0700, rootIDs); err != nil { - return errors.Wrap(err, "error creating secret mount path") - } - - logrus.WithFields(logrus.Fields{ - "name": s.File.Name, - "path": fPath, - }).Debug("injecting secret") - secret, err := c.DependencyStore.Secrets().Get(s.SecretID) - if err != nil { - return errors.Wrap(err, "unable to get secret from secret store") - } - if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { - return errors.Wrap(err, "error injecting secret") - } - - uid, err := strconv.Atoi(s.File.UID) - if err != nil { - return err - } - gid, err := strconv.Atoi(s.File.GID) - if err != nil { - return err - } - - if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil { - return errors.Wrap(err, "error setting ownership for secret") - } - if err := os.Chmod(fPath, s.File.Mode); err != nil { - return errors.Wrap(err, "error setting file mode for secret") - } - } - - for _, ref := range c.ConfigReferences { - // TODO (ehazlett): use type switch when more are supported - if ref.File == nil { - logrus.Error("config target type is not a file target") - continue - } - - fPath, err := c.ConfigFilePath(*ref) - if err != nil { - return errors.Wrap(err, "error getting config file path for container") - } - if err := idtools.MkdirAllAndChown(filepath.Dir(fPath), 0700, rootIDs); err != nil { - return errors.Wrap(err, "error creating config mount path") - } - - logrus.WithFields(logrus.Fields{ - "name": ref.File.Name, - "path": fPath, - }).Debug("injecting config") - config, err := c.DependencyStore.Configs().Get(ref.ConfigID) - if err != nil { - return errors.Wrap(err, "unable to get config from config store") - } - if err := ioutil.WriteFile(fPath, config.Spec.Data, ref.File.Mode); err != nil { - return errors.Wrap(err, "error injecting config") - } - - uid, err := strconv.Atoi(ref.File.UID) - if err != nil { - return err - } - gid, err := strconv.Atoi(ref.File.GID) - if err != nil { - return err - } - - if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil { - return errors.Wrap(err, "error setting ownership for config") - } - if err := os.Chmod(fPath, ref.File.Mode); err != nil { - return errors.Wrap(err, "error setting file mode for config") - } - } - - return daemon.remountSecretDir(c) -} - -// createSecretsDir is used to create a dir suitable for storing container secrets. -// In practice this is using a tmpfs mount and is used for both "configs" and "secrets" -func (daemon *Daemon) createSecretsDir(c *container.Container) error { - // retrieve possible remapped range start for root UID, GID - rootIDs := daemon.idMappings.RootPair() - dir, err := c.SecretMountPath() - if err != nil { - return errors.Wrap(err, "error getting container secrets dir") - } - - // create tmpfs - if err := idtools.MkdirAllAndChown(dir, 0700, rootIDs); err != nil { - return errors.Wrap(err, "error creating secret local mount path") - } - - tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootIDs.UID, rootIDs.GID) - if err := mount.Mount("tmpfs", dir, "tmpfs", "nodev,nosuid,noexec,"+tmpfsOwnership); err != nil { - return errors.Wrap(err, "unable to setup secret mount") - } - return nil -} - -func (daemon *Daemon) remountSecretDir(c *container.Container) error { - dir, err := c.SecretMountPath() - if err != nil { - return errors.Wrap(err, "error getting container secrets path") - } - if err := label.Relabel(dir, c.MountLabel, false); err != nil { - logrus.WithError(err).WithField("dir", dir).Warn("Error while attempting to set selinux label") - } - rootIDs := daemon.idMappings.RootPair() - tmpfsOwnership := fmt.Sprintf("uid=%d,gid=%d", rootIDs.UID, rootIDs.GID) - - // remount secrets ro - if err := mount.Mount("tmpfs", dir, "tmpfs", "remount,ro,"+tmpfsOwnership); err != nil { - return errors.Wrap(err, "unable to remount dir as readonly") - } - - return nil -} - -func (daemon *Daemon) cleanupSecretDir(c *container.Container) { - dir, err := c.SecretMountPath() - if err != nil { - logrus.WithError(err).WithField("container", c.ID).Warn("error getting secrets mount path for container") - } - if err := mount.RecursiveUnmount(dir); err != nil { - logrus.WithField("dir", dir).WithError(err).Warn("Error while attmepting to unmount dir, this may prevent removal of container.") - } - if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { - logrus.WithField("dir", dir).WithError(err).Error("Error removing dir.") - } -} - -func killProcessDirectly(cntr *container.Container) error { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - // Block until the container to stops or timeout. - status := <-cntr.Wait(ctx, container.WaitConditionNotRunning) - if status.Err() != nil { - // Ensure that we don't kill ourselves - if pid := cntr.GetPID(); pid != 0 { - logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(cntr.ID)) - if err := unix.Kill(pid, 9); err != nil { - if err != unix.ESRCH { - return err - } - e := errNoSuchProcess{pid, 9} - logrus.Debug(e) - return e - } - } - } - return nil -} - -func detachMounted(path string) error { - return unix.Unmount(path, unix.MNT_DETACH) -} - -func isLinkable(child *container.Container) bool { - // A container is linkable only if it belongs to the default network - _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] - return ok -} - -func enableIPOnPredefinedNetwork() bool { - return false -} - -func (daemon *Daemon) isNetworkHotPluggable() bool { - return true -} - -func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { - var err error - - container.HostsPath, err = container.GetRootResourcePath("hosts") - if err != nil { - return err - } - *sboxOptions = append(*sboxOptions, libnetwork.OptionHostsPath(container.HostsPath)) - - container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf") - if err != nil { - return err - } - *sboxOptions = append(*sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath)) - return nil -} - -func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error { - container.HostnamePath = nc.HostnamePath - container.HostsPath = nc.HostsPath - container.ResolvConfPath = nc.ResolvConfPath - return nil -} - -func (daemon *Daemon) setupContainerMountsRoot(c *container.Container) error { - // get the root mount path so we can make it unbindable - p, err := c.MountsResourcePath("") - if err != nil { - return err - } - return idtools.MkdirAllAndChown(p, 0700, daemon.idMappings.RootPair()) -} diff --git a/vendor/github.com/docker/docker/daemon/container_operations_windows.go b/vendor/github.com/docker/docker/daemon/container_operations_windows.go deleted file mode 100644 index 562528a8e..000000000 --- a/vendor/github.com/docker/docker/daemon/container_operations_windows.go +++ /dev/null @@ -1,201 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/system" - "github.com/docker/libnetwork" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { - return nil, nil -} - -func (daemon *Daemon) setupConfigDir(c *container.Container) (setupErr error) { - if len(c.ConfigReferences) == 0 { - return nil - } - - localPath := c.ConfigsDirPath() - logrus.Debugf("configs: setting up config dir: %s", localPath) - - // create local config root - if err := system.MkdirAllWithACL(localPath, 0, system.SddlAdministratorsLocalSystem); err != nil { - return errors.Wrap(err, "error creating config dir") - } - - defer func() { - if setupErr != nil { - if err := os.RemoveAll(localPath); err != nil { - logrus.Errorf("error cleaning up config dir: %s", err) - } - } - }() - - if c.DependencyStore == nil { - return fmt.Errorf("config store is not initialized") - } - - for _, configRef := range c.ConfigReferences { - // TODO (ehazlett): use type switch when more are supported - if configRef.File == nil { - logrus.Error("config target type is not a file target") - continue - } - - fPath := c.ConfigFilePath(*configRef) - log := logrus.WithFields(logrus.Fields{"name": configRef.File.Name, "path": fPath}) - - log.Debug("injecting config") - config, err := c.DependencyStore.Configs().Get(configRef.ConfigID) - if err != nil { - return errors.Wrap(err, "unable to get config from config store") - } - if err := ioutil.WriteFile(fPath, config.Spec.Data, configRef.File.Mode); err != nil { - return errors.Wrap(err, "error injecting config") - } - } - - return nil -} - -func (daemon *Daemon) setupIpcDirs(container *container.Container) error { - return nil -} - -// TODO Windows: Fix Post-TP5. This is a hack to allow docker cp to work -// against containers which have volumes. You will still be able to cp -// to somewhere on the container drive, but not to any mounted volumes -// inside the container. Without this fix, docker cp is broken to any -// container which has a volume, regardless of where the file is inside the -// container. -func (daemon *Daemon) mountVolumes(container *container.Container) error { - return nil -} - -func detachMounted(path string) error { - return nil -} - -func (daemon *Daemon) setupSecretDir(c *container.Container) (setupErr error) { - if len(c.SecretReferences) == 0 { - return nil - } - - localMountPath, err := c.SecretMountPath() - if err != nil { - return err - } - logrus.Debugf("secrets: setting up secret dir: %s", localMountPath) - - // create local secret root - if err := system.MkdirAllWithACL(localMountPath, 0, system.SddlAdministratorsLocalSystem); err != nil { - return errors.Wrap(err, "error creating secret local directory") - } - - defer func() { - if setupErr != nil { - if err := os.RemoveAll(localMountPath); err != nil { - logrus.Errorf("error cleaning up secret mount: %s", err) - } - } - }() - - if c.DependencyStore == nil { - return fmt.Errorf("secret store is not initialized") - } - - for _, s := range c.SecretReferences { - // TODO (ehazlett): use type switch when more are supported - if s.File == nil { - logrus.Error("secret target type is not a file target") - continue - } - - // secrets are created in the SecretMountPath on the host, at a - // single level - fPath, err := c.SecretFilePath(*s) - if err != nil { - return err - } - logrus.WithFields(logrus.Fields{ - "name": s.File.Name, - "path": fPath, - }).Debug("injecting secret") - secret, err := c.DependencyStore.Secrets().Get(s.SecretID) - if err != nil { - return errors.Wrap(err, "unable to get secret from secret store") - } - if err := ioutil.WriteFile(fPath, secret.Spec.Data, s.File.Mode); err != nil { - return errors.Wrap(err, "error injecting secret") - } - } - - return nil -} - -func killProcessDirectly(container *container.Container) error { - return nil -} - -func isLinkable(child *container.Container) bool { - return false -} - -func enableIPOnPredefinedNetwork() bool { - return true -} - -func (daemon *Daemon) isNetworkHotPluggable() bool { - return true -} - -func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error { - return nil -} - -func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error { - - if nc.HostConfig.Isolation.IsHyperV() { - return fmt.Errorf("sharing of hyperv containers network is not supported") - } - - container.NetworkSharedContainerID = nc.ID - - if nc.NetworkSettings != nil { - for n := range nc.NetworkSettings.Networks { - sn, err := daemon.FindNetwork(n) - if err != nil { - continue - } - - ep, err := getEndpointInNetwork(nc.Name, sn) - if err != nil { - continue - } - - data, err := ep.DriverInfo() - if err != nil { - continue - } - - if data["GW_INFO"] != nil { - gwInfo := data["GW_INFO"].(map[string]interface{}) - if gwInfo["hnsid"] != nil { - container.SharedEndpointList = append(container.SharedEndpointList, gwInfo["hnsid"].(string)) - } - } - - if data["hnsid"] != nil { - container.SharedEndpointList = append(container.SharedEndpointList, data["hnsid"].(string)) - } - } - } - - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/container_windows.go b/vendor/github.com/docker/docker/daemon/container_windows.go deleted file mode 100644 index 0ca8039dd..000000000 --- a/vendor/github.com/docker/docker/daemon/container_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/docker/docker/container" -) - -func (daemon *Daemon) saveApparmorConfig(container *container.Container) error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/create.go b/vendor/github.com/docker/docker/daemon/create.go deleted file mode 100644 index 6702243fa..000000000 --- a/vendor/github.com/docker/docker/daemon/create.go +++ /dev/null @@ -1,304 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "net" - "runtime" - "strings" - "time" - - "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" - networktypes "github.com/docker/docker/api/types/network" - "github.com/docker/docker/container" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/runconfig" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// CreateManagedContainer creates a container that is managed by a Service -func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { - return daemon.containerCreate(params, true) -} - -// ContainerCreate creates a regular container -func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (containertypes.ContainerCreateCreatedBody, error) { - return daemon.containerCreate(params, false) -} - -func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool) (containertypes.ContainerCreateCreatedBody, error) { - start := time.Now() - if params.Config == nil { - return containertypes.ContainerCreateCreatedBody{}, errdefs.InvalidParameter(errors.New("Config cannot be empty in order to create a container")) - } - - os := runtime.GOOS - if params.Config.Image != "" { - img, err := daemon.imageService.GetImage(params.Config.Image) - if err == nil { - os = img.OS - } - } else { - // This mean scratch. On Windows, we can safely assume that this is a linux - // container. On other platforms, it's the host OS (which it already is) - if runtime.GOOS == "windows" && system.LCOWSupported() { - os = "linux" - } - } - - warnings, err := daemon.verifyContainerSettings(os, params.HostConfig, params.Config, false) - if err != nil { - return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, errdefs.InvalidParameter(err) - } - - err = verifyNetworkingConfig(params.NetworkingConfig) - if err != nil { - return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, errdefs.InvalidParameter(err) - } - - if params.HostConfig == nil { - params.HostConfig = &containertypes.HostConfig{} - } - err = daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares) - if err != nil { - return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, errdefs.InvalidParameter(err) - } - - container, err := daemon.create(params, managed) - if err != nil { - return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, err - } - containerActions.WithValues("create").UpdateSince(start) - - return containertypes.ContainerCreateCreatedBody{ID: container.ID, Warnings: warnings}, nil -} - -// Create creates a new container from the given configuration with a given name. -func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (retC *container.Container, retErr error) { - var ( - container *container.Container - img *image.Image - imgID image.ID - err error - ) - - os := runtime.GOOS - if params.Config.Image != "" { - img, err = daemon.imageService.GetImage(params.Config.Image) - if err != nil { - return nil, err - } - if img.OS != "" { - os = img.OS - } else { - // default to the host OS except on Windows with LCOW - if runtime.GOOS == "windows" && system.LCOWSupported() { - os = "linux" - } - } - imgID = img.ID() - - if runtime.GOOS == "windows" && img.OS == "linux" && !system.LCOWSupported() { - return nil, errors.New("operating system on which parent image was created is not Windows") - } - } else { - if runtime.GOOS == "windows" { - os = "linux" // 'scratch' case. - } - } - - if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { - return nil, errdefs.InvalidParameter(err) - } - - if err := daemon.mergeAndVerifyLogConfig(¶ms.HostConfig.LogConfig); err != nil { - return nil, errdefs.InvalidParameter(err) - } - - if container, err = daemon.newContainer(params.Name, os, params.Config, params.HostConfig, imgID, managed); err != nil { - return nil, err - } - defer func() { - if retErr != nil { - if err := daemon.cleanupContainer(container, true, true); err != nil { - logrus.Errorf("failed to cleanup container on create error: %v", err) - } - } - }() - - if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { - return nil, err - } - - container.HostConfig.StorageOpt = params.HostConfig.StorageOpt - - // Fixes: https://github.com/moby/moby/issues/34074 and - // https://github.com/docker/for-win/issues/999. - // Merge the daemon's storage options if they aren't already present. We only - // do this on Windows as there's no effective sandbox size limit other than - // physical on Linux. - if runtime.GOOS == "windows" { - if container.HostConfig.StorageOpt == nil { - container.HostConfig.StorageOpt = make(map[string]string) - } - for _, v := range daemon.configStore.GraphOptions { - opt := strings.SplitN(v, "=", 2) - if _, ok := container.HostConfig.StorageOpt[opt[0]]; !ok { - container.HostConfig.StorageOpt[opt[0]] = opt[1] - } - } - } - - // Set RWLayer for container after mount labels have been set - rwLayer, err := daemon.imageService.CreateLayer(container, setupInitLayer(daemon.idMappings)) - if err != nil { - return nil, errdefs.System(err) - } - container.RWLayer = rwLayer - - rootIDs := daemon.idMappings.RootPair() - if err := idtools.MkdirAndChown(container.Root, 0700, rootIDs); err != nil { - return nil, err - } - if err := idtools.MkdirAndChown(container.CheckpointDir(), 0700, rootIDs); err != nil { - return nil, err - } - - if err := daemon.setHostConfig(container, params.HostConfig); err != nil { - return nil, err - } - - if err := daemon.createContainerOSSpecificSettings(container, params.Config, params.HostConfig); err != nil { - return nil, err - } - - var endpointsConfigs map[string]*networktypes.EndpointSettings - if params.NetworkingConfig != nil { - endpointsConfigs = params.NetworkingConfig.EndpointsConfig - } - // Make sure NetworkMode has an acceptable value. We do this to ensure - // backwards API compatibility. - runconfig.SetDefaultNetModeIfBlank(container.HostConfig) - - daemon.updateContainerNetworkSettings(container, endpointsConfigs) - if err := daemon.Register(container); err != nil { - return nil, err - } - stateCtr.set(container.ID, "stopped") - daemon.LogContainerEvent(container, "create") - return container, nil -} - -func toHostConfigSelinuxLabels(labels []string) []string { - for i, l := range labels { - labels[i] = "label=" + l - } - return labels -} - -func (daemon *Daemon) generateSecurityOpt(hostConfig *containertypes.HostConfig) ([]string, error) { - for _, opt := range hostConfig.SecurityOpt { - con := strings.Split(opt, "=") - if con[0] == "label" { - // Caller overrode SecurityOpts - return nil, nil - } - } - ipcMode := hostConfig.IpcMode - pidMode := hostConfig.PidMode - privileged := hostConfig.Privileged - if ipcMode.IsHost() || pidMode.IsHost() || privileged { - return toHostConfigSelinuxLabels(label.DisableSecOpt()), nil - } - - var ipcLabel []string - var pidLabel []string - ipcContainer := ipcMode.Container() - pidContainer := pidMode.Container() - if ipcContainer != "" { - c, err := daemon.GetContainer(ipcContainer) - if err != nil { - return nil, err - } - ipcLabel = label.DupSecOpt(c.ProcessLabel) - if pidContainer == "" { - return toHostConfigSelinuxLabels(ipcLabel), err - } - } - if pidContainer != "" { - c, err := daemon.GetContainer(pidContainer) - if err != nil { - return nil, err - } - - pidLabel = label.DupSecOpt(c.ProcessLabel) - if ipcContainer == "" { - return toHostConfigSelinuxLabels(pidLabel), err - } - } - - if pidLabel != nil && ipcLabel != nil { - for i := 0; i < len(pidLabel); i++ { - if pidLabel[i] != ipcLabel[i] { - return nil, fmt.Errorf("--ipc and --pid containers SELinux labels aren't the same") - } - } - return toHostConfigSelinuxLabels(pidLabel), nil - } - return nil, nil -} - -func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { - if img != nil && img.Config != nil { - if err := merge(config, img.Config); err != nil { - return err - } - } - // Reset the Entrypoint if it is [""] - if len(config.Entrypoint) == 1 && config.Entrypoint[0] == "" { - config.Entrypoint = nil - } - if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { - return fmt.Errorf("No command specified") - } - return nil -} - -// Checks if the client set configurations for more than one network while creating a container -// Also checks if the IPAMConfig is valid -func verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { - if nwConfig == nil || len(nwConfig.EndpointsConfig) == 0 { - return nil - } - if len(nwConfig.EndpointsConfig) == 1 { - for k, v := range nwConfig.EndpointsConfig { - if v == nil { - return errdefs.InvalidParameter(errors.Errorf("no EndpointSettings for %s", k)) - } - if v.IPAMConfig != nil { - if v.IPAMConfig.IPv4Address != "" && net.ParseIP(v.IPAMConfig.IPv4Address).To4() == nil { - return errors.Errorf("invalid IPv4 address: %s", v.IPAMConfig.IPv4Address) - } - if v.IPAMConfig.IPv6Address != "" { - n := net.ParseIP(v.IPAMConfig.IPv6Address) - // if the address is an invalid network address (ParseIP == nil) or if it is - // an IPv4 address (To4() != nil), then it is an invalid IPv6 address - if n == nil || n.To4() != nil { - return errors.Errorf("invalid IPv6 address: %s", v.IPAMConfig.IPv6Address) - } - } - } - } - return nil - } - l := make([]string, 0, len(nwConfig.EndpointsConfig)) - for k := range nwConfig.EndpointsConfig { - l = append(l, k) - } - return errors.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", ")) -} diff --git a/vendor/github.com/docker/docker/daemon/create_unix.go b/vendor/github.com/docker/docker/daemon/create_unix.go deleted file mode 100644 index eb9b65373..000000000 --- a/vendor/github.com/docker/docker/daemon/create_unix.go +++ /dev/null @@ -1,94 +0,0 @@ -// +build !windows - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - "os" - "path/filepath" - - containertypes "github.com/docker/docker/api/types/container" - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/container" - "github.com/docker/docker/oci" - "github.com/docker/docker/pkg/stringid" - volumeopts "github.com/docker/docker/volume/service/opts" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/sirupsen/logrus" -) - -// createContainerOSSpecificSettings performs host-OS specific container create functionality -func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { - if err := daemon.Mount(container); err != nil { - return err - } - defer daemon.Unmount(container) - - rootIDs := daemon.idMappings.RootPair() - if err := container.SetupWorkingDirectory(rootIDs); err != nil { - return err - } - - // Set the default masked and readonly paths with regard to the host config options if they are not set. - if hostConfig.MaskedPaths == nil && !hostConfig.Privileged { - hostConfig.MaskedPaths = oci.DefaultSpec().Linux.MaskedPaths // Set it to the default if nil - container.HostConfig.MaskedPaths = hostConfig.MaskedPaths - } - if hostConfig.ReadonlyPaths == nil && !hostConfig.Privileged { - hostConfig.ReadonlyPaths = oci.DefaultSpec().Linux.ReadonlyPaths // Set it to the default if nil - container.HostConfig.ReadonlyPaths = hostConfig.ReadonlyPaths - } - - for spec := range config.Volumes { - name := stringid.GenerateNonCryptoID() - destination := filepath.Clean(spec) - - // Skip volumes for which we already have something mounted on that - // destination because of a --volume-from. - if container.IsDestinationMounted(destination) { - continue - } - path, err := container.GetResourcePath(destination) - if err != nil { - return err - } - - stat, err := os.Stat(path) - if err == nil && !stat.IsDir() { - return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) - } - - v, err := daemon.volumes.Create(context.TODO(), name, hostConfig.VolumeDriver, volumeopts.WithCreateReference(container.ID)) - if err != nil { - return err - } - - if err := label.Relabel(v.Mountpoint, container.MountLabel, true); err != nil { - return err - } - - container.AddMountPointWithVolume(destination, &volumeWrapper{v: v, s: daemon.volumes}, true) - } - return daemon.populateVolumes(container) -} - -// populateVolumes copies data from the container's rootfs into the volume for non-binds. -// this is only called when the container is created. -func (daemon *Daemon) populateVolumes(c *container.Container) error { - for _, mnt := range c.MountPoints { - if mnt.Volume == nil { - continue - } - - if mnt.Type != mounttypes.TypeVolume || !mnt.CopyData { - continue - } - - logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name) - if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/create_windows.go b/vendor/github.com/docker/docker/daemon/create_windows.go deleted file mode 100644 index 37e425a01..000000000 --- a/vendor/github.com/docker/docker/daemon/create_windows.go +++ /dev/null @@ -1,93 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - "runtime" - - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/stringid" - volumemounts "github.com/docker/docker/volume/mounts" - volumeopts "github.com/docker/docker/volume/service/opts" -) - -// createContainerOSSpecificSettings performs host-OS specific container create functionality -func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { - - if container.OS == runtime.GOOS { - // Make sure the host config has the default daemon isolation if not specified by caller. - if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) { - hostConfig.Isolation = daemon.defaultIsolation - } - } else { - // LCOW must be a Hyper-V container as you can't run a shared kernel when one - // is a Windows kernel, the other is a Linux kernel. - if containertypes.Isolation.IsProcess(containertypes.Isolation(hostConfig.Isolation)) { - return fmt.Errorf("process isolation is invalid for Linux containers on Windows") - } - hostConfig.Isolation = "hyperv" - } - parser := volumemounts.NewParser(container.OS) - for spec := range config.Volumes { - - mp, err := parser.ParseMountRaw(spec, hostConfig.VolumeDriver) - if err != nil { - return fmt.Errorf("Unrecognised volume spec: %v", err) - } - - // If the mountpoint doesn't have a name, generate one. - if len(mp.Name) == 0 { - mp.Name = stringid.GenerateNonCryptoID() - } - - // Skip volumes for which we already have something mounted on that - // destination because of a --volume-from. - if container.IsDestinationMounted(mp.Destination) { - continue - } - - volumeDriver := hostConfig.VolumeDriver - - // Create the volume in the volume driver. If it doesn't exist, - // a new one will be created. - v, err := daemon.volumes.Create(context.TODO(), mp.Name, volumeDriver, volumeopts.WithCreateReference(container.ID)) - if err != nil { - return err - } - - // FIXME Windows: This code block is present in the Linux version and - // allows the contents to be copied to the container FS prior to it - // being started. However, the function utilizes the FollowSymLinkInScope - // path which does not cope with Windows volume-style file paths. There - // is a separate effort to resolve this (@swernli), so this processing - // is deferred for now. A case where this would be useful is when - // a dockerfile includes a VOLUME statement, but something is created - // in that directory during the dockerfile processing. What this means - // on Windows for TP5 is that in that scenario, the contents will not - // copied, but that's (somewhat) OK as HCS will bomb out soon after - // at it doesn't support mapped directories which have contents in the - // destination path anyway. - // - // Example for repro later: - // FROM windowsservercore - // RUN mkdir c:\myvol - // RUN copy c:\windows\system32\ntdll.dll c:\myvol - // VOLUME "c:\myvol" - // - // Then - // docker build -t vol . - // docker run -it --rm vol cmd <-- This is where HCS will error out. - // - // // never attempt to copy existing content in a container FS to a shared volume - // if v.DriverName() == volume.DefaultDriverName { - // if err := container.CopyImagePathContent(v, mp.Destination); err != nil { - // return err - // } - // } - - // Add it to container.MountPoints - container.AddMountPointWithVolume(mp.Destination, &volumeWrapper{v: v, s: daemon.volumes}, mp.RW) - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/daemon.go b/vendor/github.com/docker/docker/daemon/daemon.go deleted file mode 100644 index 43b7731a3..000000000 --- a/vendor/github.com/docker/docker/daemon/daemon.go +++ /dev/null @@ -1,1315 +0,0 @@ -// Package daemon exposes the functions that occur on the host server -// that the Docker daemon is running. -// -// In implementing the various functions of the daemon, there is often -// a method-specific struct for configuring the runtime behavior. -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - "io/ioutil" - "net" - "os" - "path" - "path/filepath" - "runtime" - "strings" - "sync" - "time" - - "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/builder" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/config" - "github.com/docker/docker/daemon/discovery" - "github.com/docker/docker/daemon/events" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/daemon/images" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/network" - "github.com/docker/docker/errdefs" - "github.com/sirupsen/logrus" - // register graph drivers - _ "github.com/docker/docker/daemon/graphdriver/register" - "github.com/docker/docker/daemon/stats" - dmetadata "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/migrate/v1" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/locker" - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/truncindex" - "github.com/docker/docker/plugin" - pluginexec "github.com/docker/docker/plugin/executor/containerd" - refstore "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/docker/runconfig" - volumesservice "github.com/docker/docker/volume/service" - "github.com/docker/libnetwork" - "github.com/docker/libnetwork/cluster" - nwconfig "github.com/docker/libnetwork/config" - "github.com/pkg/errors" -) - -// ContainersNamespace is the name of the namespace used for users containers -const ContainersNamespace = "moby" - -var ( - errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform") -) - -// Daemon holds information about the Docker daemon. -type Daemon struct { - ID string - repository string - containers container.Store - containersReplica container.ViewDB - execCommands *exec.Store - imageService *images.ImageService - idIndex *truncindex.TruncIndex - configStore *config.Config - statsCollector *stats.Collector - defaultLogConfig containertypes.LogConfig - RegistryService registry.Service - EventsService *events.Events - netController libnetwork.NetworkController - volumes *volumesservice.VolumesService - discoveryWatcher discovery.Reloader - root string - seccompEnabled bool - apparmorEnabled bool - shutdown bool - idMappings *idtools.IDMappings - // TODO: move graphDrivers field to an InfoService - graphDrivers map[string]string // By operating system - - PluginStore *plugin.Store // todo: remove - pluginManager *plugin.Manager - linkIndex *linkIndex - containerd libcontainerd.Client - defaultIsolation containertypes.Isolation // Default isolation mode on Windows - clusterProvider cluster.Provider - cluster Cluster - genericResources []swarm.GenericResource - metricsPluginListener net.Listener - - machineMemory uint64 - - seccompProfile []byte - seccompProfilePath string - - diskUsageRunning int32 - pruneRunning int32 - hosts map[string]bool // hosts stores the addresses the daemon is listening on - startupDone chan struct{} - - attachmentStore network.AttachmentStore - attachableNetworkLock *locker.Locker -} - -// StoreHosts stores the addresses the daemon is listening on -func (daemon *Daemon) StoreHosts(hosts []string) { - if daemon.hosts == nil { - daemon.hosts = make(map[string]bool) - } - for _, h := range hosts { - daemon.hosts[h] = true - } -} - -// HasExperimental returns whether the experimental features of the daemon are enabled or not -func (daemon *Daemon) HasExperimental() bool { - return daemon.configStore != nil && daemon.configStore.Experimental -} - -func (daemon *Daemon) restore() error { - containers := make(map[string]*container.Container) - - logrus.Info("Loading containers: start.") - - dir, err := ioutil.ReadDir(daemon.repository) - if err != nil { - return err - } - - for _, v := range dir { - id := v.Name() - container, err := daemon.load(id) - if err != nil { - logrus.Errorf("Failed to load container %v: %v", id, err) - continue - } - if !system.IsOSSupported(container.OS) { - logrus.Errorf("Failed to load container %v: %s (%q)", id, system.ErrNotSupportedOperatingSystem, container.OS) - continue - } - // Ignore the container if it does not support the current driver being used by the graph - currentDriverForContainerOS := daemon.graphDrivers[container.OS] - if (container.Driver == "" && currentDriverForContainerOS == "aufs") || container.Driver == currentDriverForContainerOS { - rwlayer, err := daemon.imageService.GetLayerByID(container.ID, container.OS) - if err != nil { - logrus.Errorf("Failed to load container mount %v: %v", id, err) - continue - } - container.RWLayer = rwlayer - logrus.Debugf("Loaded container %v, isRunning: %v", container.ID, container.IsRunning()) - - containers[container.ID] = container - } else { - logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) - } - } - - removeContainers := make(map[string]*container.Container) - restartContainers := make(map[*container.Container]chan struct{}) - activeSandboxes := make(map[string]interface{}) - for id, c := range containers { - if err := daemon.registerName(c); err != nil { - logrus.Errorf("Failed to register container name %s: %s", c.ID, err) - delete(containers, id) - continue - } - if err := daemon.Register(c); err != nil { - logrus.Errorf("Failed to register container %s: %s", c.ID, err) - delete(containers, id) - continue - } - - // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. - // We should rewrite it to use the daemon defaults. - // Fixes https://github.com/docker/docker/issues/22536 - if c.HostConfig.LogConfig.Type == "" { - if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { - logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) - continue - } - } - } - - var ( - wg sync.WaitGroup - mapLock sync.Mutex - ) - for _, c := range containers { - wg.Add(1) - go func(c *container.Container) { - defer wg.Done() - daemon.backportMountSpec(c) - if err := daemon.checkpointAndSave(c); err != nil { - logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk") - } - - daemon.setStateCounter(c) - - logrus.WithFields(logrus.Fields{ - "container": c.ID, - "running": c.IsRunning(), - "paused": c.IsPaused(), - }).Debug("restoring container") - - var ( - err error - alive bool - ec uint32 - exitedAt time.Time - ) - - alive, _, err = daemon.containerd.Restore(context.Background(), c.ID, c.InitializeStdio) - if err != nil && !errdefs.IsNotFound(err) { - logrus.Errorf("Failed to restore container %s with containerd: %s", c.ID, err) - return - } - if !alive { - ec, exitedAt, err = daemon.containerd.DeleteTask(context.Background(), c.ID) - if err != nil && !errdefs.IsNotFound(err) { - logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID) - return - } - } else if !daemon.configStore.LiveRestoreEnabled { - if err := daemon.kill(c, c.StopSignal()); err != nil && !errdefs.IsNotFound(err) { - logrus.WithError(err).WithField("container", c.ID).Error("error shutting down container") - return - } - } - - if c.IsRunning() || c.IsPaused() { - c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking - - if c.IsPaused() && alive { - s, err := daemon.containerd.Status(context.Background(), c.ID) - if err != nil { - logrus.WithError(err).WithField("container", c.ID). - Errorf("Failed to get container status") - } else { - logrus.WithField("container", c.ID).WithField("state", s). - Info("restored container paused") - switch s { - case libcontainerd.StatusPaused, libcontainerd.StatusPausing: - // nothing to do - case libcontainerd.StatusStopped: - alive = false - case libcontainerd.StatusUnknown: - logrus.WithField("container", c.ID). - Error("Unknown status for container during restore") - default: - // running - c.Lock() - c.Paused = false - daemon.setStateCounter(c) - if err := c.CheckpointTo(daemon.containersReplica); err != nil { - logrus.WithError(err).WithField("container", c.ID). - Error("Failed to update stopped container state") - } - c.Unlock() - } - } - } - - if !alive { - c.Lock() - c.SetStopped(&container.ExitStatus{ExitCode: int(ec), ExitedAt: exitedAt}) - daemon.Cleanup(c) - if err := c.CheckpointTo(daemon.containersReplica); err != nil { - logrus.Errorf("Failed to update stopped container %s state: %v", c.ID, err) - } - c.Unlock() - } - - // we call Mount and then Unmount to get BaseFs of the container - if err := daemon.Mount(c); err != nil { - // The mount is unlikely to fail. However, in case mount fails - // the container should be allowed to restore here. Some functionalities - // (like docker exec -u user) might be missing but container is able to be - // stopped/restarted/removed. - // See #29365 for related information. - // The error is only logged here. - logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err) - } else { - if err := daemon.Unmount(c); err != nil { - logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err) - } - } - - c.ResetRestartManager(false) - if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { - options, err := daemon.buildSandboxOptions(c) - if err != nil { - logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) - } - mapLock.Lock() - activeSandboxes[c.NetworkSettings.SandboxID] = options - mapLock.Unlock() - } - } - - // get list of containers we need to restart - - // Do not autostart containers which - // has endpoints in a swarm scope - // network yet since the cluster is - // not initialized yet. We will start - // it after the cluster is - // initialized. - if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore { - mapLock.Lock() - restartContainers[c] = make(chan struct{}) - mapLock.Unlock() - } else if c.HostConfig != nil && c.HostConfig.AutoRemove { - mapLock.Lock() - removeContainers[c.ID] = c - mapLock.Unlock() - } - - c.Lock() - if c.RemovalInProgress { - // We probably crashed in the middle of a removal, reset - // the flag. - // - // We DO NOT remove the container here as we do not - // know if the user had requested for either the - // associated volumes, network links or both to also - // be removed. So we put the container in the "dead" - // state and leave further processing up to them. - logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) - c.RemovalInProgress = false - c.Dead = true - if err := c.CheckpointTo(daemon.containersReplica); err != nil { - logrus.Errorf("Failed to update RemovalInProgress container %s state: %v", c.ID, err) - } - } - c.Unlock() - }(c) - } - wg.Wait() - daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) - if err != nil { - return fmt.Errorf("Error initializing network controller: %v", err) - } - - // Now that all the containers are registered, register the links - for _, c := range containers { - if err := daemon.registerLinks(c, c.HostConfig); err != nil { - logrus.Errorf("failed to register link for container %s: %v", c.ID, err) - } - } - - group := sync.WaitGroup{} - for c, notifier := range restartContainers { - group.Add(1) - - go func(c *container.Container, chNotify chan struct{}) { - defer group.Done() - - logrus.Debugf("Starting container %s", c.ID) - - // ignore errors here as this is a best effort to wait for children to be - // running before we try to start the container - children := daemon.children(c) - timeout := time.After(5 * time.Second) - for _, child := range children { - if notifier, exists := restartContainers[child]; exists { - select { - case <-notifier: - case <-timeout: - } - } - } - - // Make sure networks are available before starting - daemon.waitForNetworks(c) - if err := daemon.containerStart(c, "", "", true); err != nil { - logrus.Errorf("Failed to start container %s: %s", c.ID, err) - } - close(chNotify) - }(c, notifier) - - } - group.Wait() - - removeGroup := sync.WaitGroup{} - for id := range removeContainers { - removeGroup.Add(1) - go func(cid string) { - if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { - logrus.Errorf("Failed to remove container %s: %s", cid, err) - } - removeGroup.Done() - }(id) - } - removeGroup.Wait() - - // any containers that were started above would already have had this done, - // however we need to now prepare the mountpoints for the rest of the containers as well. - // This shouldn't cause any issue running on the containers that already had this run. - // This must be run after any containers with a restart policy so that containerized plugins - // can have a chance to be running before we try to initialize them. - for _, c := range containers { - // if the container has restart policy, do not - // prepare the mountpoints since it has been done on restarting. - // This is to speed up the daemon start when a restart container - // has a volume and the volume driver is not available. - if _, ok := restartContainers[c]; ok { - continue - } else if _, ok := removeContainers[c.ID]; ok { - // container is automatically removed, skip it. - continue - } - - group.Add(1) - go func(c *container.Container) { - defer group.Done() - if err := daemon.prepareMountPoints(c); err != nil { - logrus.Error(err) - } - }(c) - } - - group.Wait() - - logrus.Info("Loading containers: done.") - - return nil -} - -// RestartSwarmContainers restarts any autostart container which has a -// swarm endpoint. -func (daemon *Daemon) RestartSwarmContainers() { - group := sync.WaitGroup{} - for _, c := range daemon.List() { - if !c.IsRunning() && !c.IsPaused() { - // Autostart all the containers which has a - // swarm endpoint now that the cluster is - // initialized. - if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore { - group.Add(1) - go func(c *container.Container) { - defer group.Done() - if err := daemon.containerStart(c, "", "", true); err != nil { - logrus.Error(err) - } - }(c) - } - } - - } - group.Wait() -} - -// waitForNetworks is used during daemon initialization when starting up containers -// It ensures that all of a container's networks are available before the daemon tries to start the container. -// In practice it just makes sure the discovery service is available for containers which use a network that require discovery. -func (daemon *Daemon) waitForNetworks(c *container.Container) { - if daemon.discoveryWatcher == nil { - return - } - // Make sure if the container has a network that requires discovery that the discovery service is available before starting - for netName := range c.NetworkSettings.Networks { - // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready - // Most likely this is because the K/V store used for discovery is in a container and needs to be started - if _, err := daemon.netController.NetworkByName(netName); err != nil { - if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { - continue - } - // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host - // FIXME: why is this slow??? - logrus.Debugf("Container %s waiting for network to be ready", c.Name) - select { - case <-daemon.discoveryWatcher.ReadyCh(): - case <-time.After(60 * time.Second): - } - return - } - } -} - -func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { - return daemon.linkIndex.children(c) -} - -// parents returns the names of the parent containers of the container -// with the given name. -func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { - return daemon.linkIndex.parents(c) -} - -func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { - fullName := path.Join(parent.Name, alias) - if err := daemon.containersReplica.ReserveName(fullName, child.ID); err != nil { - if err == container.ErrNameReserved { - logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) - return nil - } - return err - } - daemon.linkIndex.link(parent, child, fullName) - return nil -} - -// DaemonJoinsCluster informs the daemon has joined the cluster and provides -// the handler to query the cluster component -func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) { - daemon.setClusterProvider(clusterProvider) -} - -// DaemonLeavesCluster informs the daemon has left the cluster -func (daemon *Daemon) DaemonLeavesCluster() { - // Daemon is in charge of removing the attachable networks with - // connected containers when the node leaves the swarm - daemon.clearAttachableNetworks() - // We no longer need the cluster provider, stop it now so that - // the network agent will stop listening to cluster events. - daemon.setClusterProvider(nil) - // Wait for the networking cluster agent to stop - daemon.netController.AgentStopWait() - // Daemon is in charge of removing the ingress network when the - // node leaves the swarm. Wait for job to be done or timeout. - // This is called also on graceful daemon shutdown. We need to - // wait, because the ingress release has to happen before the - // network controller is stopped. - if done, err := daemon.ReleaseIngress(); err == nil { - select { - case <-done: - case <-time.After(5 * time.Second): - logrus.Warnf("timeout while waiting for ingress network removal") - } - } else { - logrus.Warnf("failed to initiate ingress network removal: %v", err) - } - - daemon.attachmentStore.ClearAttachments() -} - -// setClusterProvider sets a component for querying the current cluster state. -func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) { - daemon.clusterProvider = clusterProvider - daemon.netController.SetClusterProvider(clusterProvider) - daemon.attachableNetworkLock = locker.New() -} - -// IsSwarmCompatible verifies if the current daemon -// configuration is compatible with the swarm mode -func (daemon *Daemon) IsSwarmCompatible() error { - if daemon.configStore == nil { - return nil - } - return daemon.configStore.IsSwarmCompatible() -} - -// NewDaemon sets up everything for the daemon to be able to service -// requests from the webserver. -func NewDaemon(config *config.Config, registryService registry.Service, containerdRemote libcontainerd.Remote, pluginStore *plugin.Store) (daemon *Daemon, err error) { - setDefaultMtu(config) - - // Ensure that we have a correct root key limit for launching containers. - if err := ModifyRootKeyLimit(); err != nil { - logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err) - } - - // Ensure we have compatible and valid configuration options - if err := verifyDaemonSettings(config); err != nil { - return nil, err - } - - // Do we have a disabled network? - config.DisableBridge = isBridgeNetworkDisabled(config) - - // Verify the platform is supported as a daemon - if !platformSupported { - return nil, errSystemNotSupported - } - - // Validate platform-specific requirements - if err := checkSystem(); err != nil { - return nil, err - } - - idMappings, err := setupRemappedRoot(config) - if err != nil { - return nil, err - } - rootIDs := idMappings.RootPair() - if err := setupDaemonProcess(config); err != nil { - return nil, err - } - - // set up the tmpDir to use a canonical path - tmp, err := prepareTempDir(config.Root, rootIDs) - if err != nil { - return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) - } - realTmp, err := getRealPath(tmp) - if err != nil { - return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) - } - if runtime.GOOS == "windows" { - if _, err := os.Stat(realTmp); err != nil && os.IsNotExist(err) { - if err := system.MkdirAll(realTmp, 0700, ""); err != nil { - return nil, fmt.Errorf("Unable to create the TempDir (%s): %s", realTmp, err) - } - } - os.Setenv("TEMP", realTmp) - os.Setenv("TMP", realTmp) - } else { - os.Setenv("TMPDIR", realTmp) - } - - d := &Daemon{ - configStore: config, - PluginStore: pluginStore, - startupDone: make(chan struct{}), - } - // Ensure the daemon is properly shutdown if there is a failure during - // initialization - defer func() { - if err != nil { - if err := d.Shutdown(); err != nil { - logrus.Error(err) - } - } - }() - - if err := d.setGenericResources(config); err != nil { - return nil, err - } - // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event - // on Windows to dump Go routine stacks - stackDumpDir := config.Root - if execRoot := config.GetExecRoot(); execRoot != "" { - stackDumpDir = execRoot - } - d.setupDumpStackTrap(stackDumpDir) - - if err := d.setupSeccompProfile(); err != nil { - return nil, err - } - - // Set the default isolation mode (only applicable on Windows) - if err := d.setDefaultIsolation(); err != nil { - return nil, fmt.Errorf("error setting default isolation mode: %v", err) - } - - if err := configureMaxThreads(config); err != nil { - logrus.Warnf("Failed to configure golang's threads limit: %v", err) - } - - if err := ensureDefaultAppArmorProfile(); err != nil { - logrus.Errorf(err.Error()) - } - - daemonRepo := filepath.Join(config.Root, "containers") - if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil { - return nil, err - } - - // Create the directory where we'll store the runtime scripts (i.e. in - // order to support runtimeArgs) - daemonRuntimes := filepath.Join(config.Root, "runtimes") - if err := system.MkdirAll(daemonRuntimes, 0700, ""); err != nil { - return nil, err - } - if err := d.loadRuntimes(); err != nil { - return nil, err - } - - if runtime.GOOS == "windows" { - if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil { - return nil, err - } - } - - // On Windows we don't support the environment variable, or a user supplied graphdriver - // as Windows has no choice in terms of which graphdrivers to use. It's a case of - // running Windows containers on Windows - windowsfilter, running Linux containers on Windows, - // lcow. Unix platforms however run a single graphdriver for all containers, and it can - // be set through an environment variable, a daemon start parameter, or chosen through - // initialization of the layerstore through driver priority order for example. - d.graphDrivers = make(map[string]string) - layerStores := make(map[string]layer.Store) - if runtime.GOOS == "windows" { - d.graphDrivers[runtime.GOOS] = "windowsfilter" - if system.LCOWSupported() { - d.graphDrivers["linux"] = "lcow" - } - } else { - driverName := os.Getenv("DOCKER_DRIVER") - if driverName == "" { - driverName = config.GraphDriver - } else { - logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName) - } - d.graphDrivers[runtime.GOOS] = driverName // May still be empty. Layerstore init determines instead. - } - - d.RegistryService = registryService - logger.RegisterPluginGetter(d.PluginStore) - - metricsSockPath, err := d.listenMetricsSock() - if err != nil { - return nil, err - } - registerMetricsPluginCallback(d.PluginStore, metricsSockPath) - - createPluginExec := func(m *plugin.Manager) (plugin.Executor, error) { - return pluginexec.New(getPluginExecRoot(config.Root), containerdRemote, m) - } - - // Plugin system initialization should happen before restore. Do not change order. - d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{ - Root: filepath.Join(config.Root, "plugins"), - ExecRoot: getPluginExecRoot(config.Root), - Store: d.PluginStore, - CreateExecutor: createPluginExec, - RegistryService: registryService, - LiveRestoreEnabled: config.LiveRestoreEnabled, - LogPluginEvent: d.LogPluginEvent, // todo: make private - AuthzMiddleware: config.AuthzMiddleware, - }) - if err != nil { - return nil, errors.Wrap(err, "couldn't create plugin manager") - } - - if err := d.setupDefaultLogConfig(); err != nil { - return nil, err - } - - for operatingSystem, gd := range d.graphDrivers { - layerStores[operatingSystem], err = layer.NewStoreFromOptions(layer.StoreOptions{ - Root: config.Root, - MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), - GraphDriver: gd, - GraphDriverOptions: config.GraphOptions, - IDMappings: idMappings, - PluginGetter: d.PluginStore, - ExperimentalEnabled: config.Experimental, - OS: operatingSystem, - }) - if err != nil { - return nil, err - } - } - - // As layerstore initialization may set the driver - for os := range d.graphDrivers { - d.graphDrivers[os] = layerStores[os].DriverName() - } - - // Configure and validate the kernels security support. Note this is a Linux/FreeBSD - // operation only, so it is safe to pass *just* the runtime OS graphdriver. - if err := configureKernelSecuritySupport(config, d.graphDrivers[runtime.GOOS]); err != nil { - return nil, err - } - - imageRoot := filepath.Join(config.Root, "image", d.graphDrivers[runtime.GOOS]) - ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) - if err != nil { - return nil, err - } - - lgrMap := make(map[string]image.LayerGetReleaser) - for os, ls := range layerStores { - lgrMap[os] = ls - } - imageStore, err := image.NewImageStore(ifs, lgrMap) - if err != nil { - return nil, err - } - - d.volumes, err = volumesservice.NewVolumeService(config.Root, d.PluginStore, rootIDs, d) - if err != nil { - return nil, err - } - - trustKey, err := loadOrCreateTrustKey(config.TrustKeyPath) - if err != nil { - return nil, err - } - - trustDir := filepath.Join(config.Root, "trust") - - if err := system.MkdirAll(trustDir, 0700, ""); err != nil { - return nil, err - } - - // We have a single tag/reference store for the daemon globally. However, it's - // stored under the graphdriver. On host platforms which only support a single - // container OS, but multiple selectable graphdrivers, this means depending on which - // graphdriver is chosen, the global reference store is under there. For - // platforms which support multiple container operating systems, this is slightly - // more problematic as where does the global ref store get located? Fortunately, - // for Windows, which is currently the only daemon supporting multiple container - // operating systems, the list of graphdrivers available isn't user configurable. - // For backwards compatibility, we just put it under the windowsfilter - // directory regardless. - refStoreLocation := filepath.Join(imageRoot, `repositories.json`) - rs, err := refstore.NewReferenceStore(refStoreLocation) - if err != nil { - return nil, fmt.Errorf("Couldn't create reference store repository: %s", err) - } - - distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) - if err != nil { - return nil, err - } - - // No content-addressability migration on Windows as it never supported pre-CA - if runtime.GOOS != "windows" { - migrationStart := time.Now() - if err := v1.Migrate(config.Root, d.graphDrivers[runtime.GOOS], layerStores[runtime.GOOS], imageStore, rs, distributionMetadataStore); err != nil { - logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) - } - logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) - } - - // Discovery is only enabled when the daemon is launched with an address to advertise. When - // initialized, the daemon is registered and we can store the discovery backend as it's read-only - if err := d.initDiscovery(config); err != nil { - return nil, err - } - - sysInfo := sysinfo.New(false) - // Check if Devices cgroup is mounted, it is hard requirement for container security, - // on Linux. - if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { - return nil, errors.New("Devices cgroup isn't mounted") - } - - d.ID = trustKey.PublicKey().KeyID() - d.repository = daemonRepo - d.containers = container.NewMemoryStore() - if d.containersReplica, err = container.NewViewDB(); err != nil { - return nil, err - } - d.execCommands = exec.NewStore() - d.idIndex = truncindex.NewTruncIndex([]string{}) - d.statsCollector = d.newStatsCollector(1 * time.Second) - - d.EventsService = events.New() - d.root = config.Root - d.idMappings = idMappings - d.seccompEnabled = sysInfo.Seccomp - d.apparmorEnabled = sysInfo.AppArmor - - d.linkIndex = newLinkIndex() - - // TODO: imageStore, distributionMetadataStore, and ReferenceStore are only - // used above to run migration. They could be initialized in ImageService - // if migration is called from daemon/images. layerStore might move as well. - d.imageService = images.NewImageService(images.ImageServiceConfig{ - ContainerStore: d.containers, - DistributionMetadataStore: distributionMetadataStore, - EventsService: d.EventsService, - ImageStore: imageStore, - LayerStores: layerStores, - MaxConcurrentDownloads: *config.MaxConcurrentDownloads, - MaxConcurrentUploads: *config.MaxConcurrentUploads, - ReferenceStore: rs, - RegistryService: registryService, - TrustKey: trustKey, - }) - - go d.execCommandGC() - - d.containerd, err = containerdRemote.NewClient(ContainersNamespace, d) - if err != nil { - return nil, err - } - - if err := d.restore(); err != nil { - return nil, err - } - close(d.startupDone) - - // FIXME: this method never returns an error - info, _ := d.SystemInfo() - - engineInfo.WithValues( - dockerversion.Version, - dockerversion.GitCommit, - info.Architecture, - info.Driver, - info.KernelVersion, - info.OperatingSystem, - info.OSType, - info.ID, - ).Set(1) - engineCpus.Set(float64(info.NCPU)) - engineMemory.Set(float64(info.MemTotal)) - - gd := "" - for os, driver := range d.graphDrivers { - if len(gd) > 0 { - gd += ", " - } - gd += driver - if len(d.graphDrivers) > 1 { - gd = fmt.Sprintf("%s (%s)", gd, os) - } - } - logrus.WithFields(logrus.Fields{ - "version": dockerversion.Version, - "commit": dockerversion.GitCommit, - "graphdriver(s)": gd, - }).Info("Docker daemon") - - return d, nil -} - -func (daemon *Daemon) waitForStartupDone() { - <-daemon.startupDone -} - -func (daemon *Daemon) shutdownContainer(c *container.Container) error { - stopTimeout := c.StopTimeout() - - // If container failed to exit in stopTimeout seconds of SIGTERM, then using the force - if err := daemon.containerStop(c, stopTimeout); err != nil { - return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) - } - - // Wait without timeout for the container to exit. - // Ignore the result. - <-c.Wait(context.Background(), container.WaitConditionNotRunning) - return nil -} - -// ShutdownTimeout returns the timeout (in seconds) before containers are forcibly -// killed during shutdown. The default timeout can be configured both on the daemon -// and per container, and the longest timeout will be used. A grace-period of -// 5 seconds is added to the configured timeout. -// -// A negative (-1) timeout means "indefinitely", which means that containers -// are not forcibly killed, and the daemon shuts down after all containers exit. -func (daemon *Daemon) ShutdownTimeout() int { - shutdownTimeout := daemon.configStore.ShutdownTimeout - if shutdownTimeout < 0 { - return -1 - } - if daemon.containers == nil { - return shutdownTimeout - } - - graceTimeout := 5 - for _, c := range daemon.containers.List() { - stopTimeout := c.StopTimeout() - if stopTimeout < 0 { - return -1 - } - if stopTimeout+graceTimeout > shutdownTimeout { - shutdownTimeout = stopTimeout + graceTimeout - } - } - return shutdownTimeout -} - -// Shutdown stops the daemon. -func (daemon *Daemon) Shutdown() error { - daemon.shutdown = true - // Keep mounts and networking running on daemon shutdown if - // we are to keep containers running and restore them. - - if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { - // check if there are any running containers, if none we should do some cleanup - if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { - // metrics plugins still need some cleanup - daemon.cleanupMetricsPlugins() - return nil - } - } - - if daemon.containers != nil { - logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", daemon.configStore.ShutdownTimeout) - logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.ShutdownTimeout()) - daemon.containers.ApplyAll(func(c *container.Container) { - if !c.IsRunning() { - return - } - logrus.Debugf("stopping %s", c.ID) - if err := daemon.shutdownContainer(c); err != nil { - logrus.Errorf("Stop container error: %v", err) - return - } - if mountid, err := daemon.imageService.GetLayerMountID(c.ID, c.OS); err == nil { - daemon.cleanupMountsByID(mountid) - } - logrus.Debugf("container stopped %s", c.ID) - }) - } - - if daemon.volumes != nil { - if err := daemon.volumes.Shutdown(); err != nil { - logrus.Errorf("Error shutting down volume store: %v", err) - } - } - - if daemon.imageService != nil { - daemon.imageService.Cleanup() - } - - // If we are part of a cluster, clean up cluster's stuff - if daemon.clusterProvider != nil { - logrus.Debugf("start clean shutdown of cluster resources...") - daemon.DaemonLeavesCluster() - } - - daemon.cleanupMetricsPlugins() - - // Shutdown plugins after containers and layerstore. Don't change the order. - daemon.pluginShutdown() - - // trigger libnetwork Stop only if it's initialized - if daemon.netController != nil { - daemon.netController.Stop() - } - - return daemon.cleanupMounts() -} - -// Mount sets container.BaseFS -// (is it not set coming in? why is it unset?) -func (daemon *Daemon) Mount(container *container.Container) error { - if container.RWLayer == nil { - return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil") - } - dir, err := container.RWLayer.Mount(container.GetMountLabel()) - if err != nil { - return err - } - logrus.Debugf("container mounted via layerStore: %v", dir) - - if container.BaseFS != nil && container.BaseFS.Path() != dir.Path() { - // The mount path reported by the graph driver should always be trusted on Windows, since the - // volume path for a given mounted layer may change over time. This should only be an error - // on non-Windows operating systems. - if runtime.GOOS != "windows" { - daemon.Unmount(container) - return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", - daemon.imageService.GraphDriverForOS(container.OS), container.ID, container.BaseFS, dir) - } - } - container.BaseFS = dir // TODO: combine these fields - return nil -} - -// Unmount unsets the container base filesystem -func (daemon *Daemon) Unmount(container *container.Container) error { - if container.RWLayer == nil { - return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil") - } - if err := container.RWLayer.Unmount(); err != nil { - logrus.Errorf("Error unmounting container %s: %s", container.ID, err) - return err - } - - return nil -} - -// Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker. -func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) { - var v4Subnets []net.IPNet - var v6Subnets []net.IPNet - - managedNetworks := daemon.netController.Networks() - - for _, managedNetwork := range managedNetworks { - v4infos, v6infos := managedNetwork.Info().IpamInfo() - for _, info := range v4infos { - if info.IPAMData.Pool != nil { - v4Subnets = append(v4Subnets, *info.IPAMData.Pool) - } - } - for _, info := range v6infos { - if info.IPAMData.Pool != nil { - v6Subnets = append(v6Subnets, *info.IPAMData.Pool) - } - } - } - - return v4Subnets, v6Subnets -} - -// prepareTempDir prepares and returns the default directory to use -// for temporary files. -// If it doesn't exist, it is created. If it exists, its content is removed. -func prepareTempDir(rootDir string, rootIDs idtools.IDPair) (string, error) { - var tmpDir string - if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { - tmpDir = filepath.Join(rootDir, "tmp") - newName := tmpDir + "-old" - if err := os.Rename(tmpDir, newName); err == nil { - go func() { - if err := os.RemoveAll(newName); err != nil { - logrus.Warnf("failed to delete old tmp directory: %s", newName) - } - }() - } else if !os.IsNotExist(err) { - logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err) - if err := os.RemoveAll(tmpDir); err != nil { - logrus.Warnf("failed to delete old tmp directory: %s", tmpDir) - } - } - } - // We don't remove the content of tmpdir if it's not the default, - // it may hold things that do not belong to us. - return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIDs) -} - -func (daemon *Daemon) setGenericResources(conf *config.Config) error { - genericResources, err := config.ParseGenericResources(conf.NodeGenericResources) - if err != nil { - return err - } - - daemon.genericResources = genericResources - - return nil -} - -func setDefaultMtu(conf *config.Config) { - // do nothing if the config does not have the default 0 value. - if conf.Mtu != 0 { - return - } - conf.Mtu = config.DefaultNetworkMtu -} - -// IsShuttingDown tells whether the daemon is shutting down or not -func (daemon *Daemon) IsShuttingDown() bool { - return daemon.shutdown -} - -// initDiscovery initializes the discovery watcher for this daemon. -func (daemon *Daemon) initDiscovery(conf *config.Config) error { - advertise, err := config.ParseClusterAdvertiseSettings(conf.ClusterStore, conf.ClusterAdvertise) - if err != nil { - if err == discovery.ErrDiscoveryDisabled { - return nil - } - return err - } - - conf.ClusterAdvertise = advertise - discoveryWatcher, err := discovery.Init(conf.ClusterStore, conf.ClusterAdvertise, conf.ClusterOpts) - if err != nil { - return fmt.Errorf("discovery initialization failed (%v)", err) - } - - daemon.discoveryWatcher = discoveryWatcher - return nil -} - -func isBridgeNetworkDisabled(conf *config.Config) bool { - return conf.BridgeConfig.Iface == config.DisableNetworkBridge -} - -func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { - options := []nwconfig.Option{} - if dconfig == nil { - return options, nil - } - - options = append(options, nwconfig.OptionExperimental(dconfig.Experimental)) - options = append(options, nwconfig.OptionDataDir(dconfig.Root)) - options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot())) - - dd := runconfig.DefaultDaemonNetworkMode() - dn := runconfig.DefaultDaemonNetworkMode().NetworkName() - options = append(options, nwconfig.OptionDefaultDriver(string(dd))) - options = append(options, nwconfig.OptionDefaultNetwork(dn)) - - if strings.TrimSpace(dconfig.ClusterStore) != "" { - kv := strings.Split(dconfig.ClusterStore, "://") - if len(kv) != 2 { - return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL") - } - options = append(options, nwconfig.OptionKVProvider(kv[0])) - options = append(options, nwconfig.OptionKVProviderURL(kv[1])) - } - if len(dconfig.ClusterOpts) > 0 { - options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) - } - - if daemon.discoveryWatcher != nil { - options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) - } - - if dconfig.ClusterAdvertise != "" { - options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) - } - - options = append(options, nwconfig.OptionLabels(dconfig.Labels)) - options = append(options, driverOptions(dconfig)...) - - if len(dconfig.NetworkConfig.DefaultAddressPools.Value()) > 0 { - options = append(options, nwconfig.OptionDefaultAddressPoolConfig(dconfig.NetworkConfig.DefaultAddressPools.Value())) - } - - if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 { - options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) - } - - if pg != nil { - options = append(options, nwconfig.OptionPluginGetter(pg)) - } - - options = append(options, nwconfig.OptionNetworkControlPlaneMTU(dconfig.NetworkControlPlaneMTU)) - - return options, nil -} - -// GetCluster returns the cluster -func (daemon *Daemon) GetCluster() Cluster { - return daemon.cluster -} - -// SetCluster sets the cluster -func (daemon *Daemon) SetCluster(cluster Cluster) { - daemon.cluster = cluster -} - -func (daemon *Daemon) pluginShutdown() { - manager := daemon.pluginManager - // Check for a valid manager object. In error conditions, daemon init can fail - // and shutdown called, before plugin manager is initialized. - if manager != nil { - manager.Shutdown() - } -} - -// PluginManager returns current pluginManager associated with the daemon -func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method - return daemon.pluginManager -} - -// PluginGetter returns current pluginStore associated with the daemon -func (daemon *Daemon) PluginGetter() *plugin.Store { - return daemon.PluginStore -} - -// CreateDaemonRoot creates the root for the daemon -func CreateDaemonRoot(config *config.Config) error { - // get the canonical path to the Docker root directory - var realRoot string - if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { - realRoot = config.Root - } else { - realRoot, err = getRealPath(config.Root) - if err != nil { - return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) - } - } - - idMappings, err := setupRemappedRoot(config) - if err != nil { - return err - } - return setupDaemonRoot(config, realRoot, idMappings.RootPair()) -} - -// checkpointAndSave grabs a container lock to safely call container.CheckpointTo -func (daemon *Daemon) checkpointAndSave(container *container.Container) error { - container.Lock() - defer container.Unlock() - if err := container.CheckpointTo(daemon.containersReplica); err != nil { - return fmt.Errorf("Error saving container state: %v", err) - } - return nil -} - -// because the CLI sends a -1 when it wants to unset the swappiness value -// we need to clear it on the server side -func fixMemorySwappiness(resources *containertypes.Resources) { - if resources.MemorySwappiness != nil && *resources.MemorySwappiness == -1 { - resources.MemorySwappiness = nil - } -} - -// GetAttachmentStore returns current attachment store associated with the daemon -func (daemon *Daemon) GetAttachmentStore() *network.AttachmentStore { - return &daemon.attachmentStore -} - -// IDMappings returns uid/gid mappings for the builder -func (daemon *Daemon) IDMappings() *idtools.IDMappings { - return daemon.idMappings -} - -// ImageService returns the Daemon's ImageService -func (daemon *Daemon) ImageService() *images.ImageService { - return daemon.imageService -} - -// BuilderBackend returns the backend used by builder -func (daemon *Daemon) BuilderBackend() builder.Backend { - return struct { - *Daemon - *images.ImageService - }{daemon, daemon.imageService} -} diff --git a/vendor/github.com/docker/docker/daemon/daemon_linux.go b/vendor/github.com/docker/docker/daemon/daemon_linux.go deleted file mode 100644 index 7cb672753..000000000 --- a/vendor/github.com/docker/docker/daemon/daemon_linux.go +++ /dev/null @@ -1,133 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "bufio" - "fmt" - "io" - "os" - "regexp" - "strings" - - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/mount" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// On Linux, plugins use a static path for storing execution state, -// instead of deriving path from daemon's exec-root. This is because -// plugin socket files are created here and they cannot exceed max -// path length of 108 bytes. -func getPluginExecRoot(root string) string { - return "/run/docker/plugins" -} - -func (daemon *Daemon) cleanupMountsByID(id string) error { - logrus.Debugf("Cleaning up old mountid %s: start.", id) - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return err - } - defer f.Close() - - return daemon.cleanupMountsFromReaderByID(f, id, mount.Unmount) -} - -func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, unmount func(target string) error) error { - if daemon.root == "" { - return nil - } - var errors []string - - regexps := getCleanPatterns(id) - sc := bufio.NewScanner(reader) - for sc.Scan() { - if fields := strings.Fields(sc.Text()); len(fields) >= 4 { - if mnt := fields[4]; strings.HasPrefix(mnt, daemon.root) { - for _, p := range regexps { - if p.MatchString(mnt) { - if err := unmount(mnt); err != nil { - logrus.Error(err) - errors = append(errors, err.Error()) - } - } - } - } - } - } - - if err := sc.Err(); err != nil { - return err - } - - if len(errors) > 0 { - return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n")) - } - - logrus.Debugf("Cleaning up old mountid %v: done.", id) - return nil -} - -// cleanupMounts umounts used by container resources and the daemon root mount -func (daemon *Daemon) cleanupMounts() error { - if err := daemon.cleanupMountsByID(""); err != nil { - return err - } - - info, err := mount.GetMounts(mount.SingleEntryFilter(daemon.root)) - if err != nil { - return errors.Wrap(err, "error reading mount table for cleanup") - } - - if len(info) < 1 { - // no mount found, we're done here - return nil - } - - // `info.Root` here is the root mountpoint of the passed in path (`daemon.root`). - // The ony cases that need to be cleaned up is when the daemon has performed a - // `mount --bind /daemon/root /daemon/root && mount --make-shared /daemon/root` - // This is only done when the daemon is started up and `/daemon/root` is not - // already on a shared mountpoint. - if !shouldUnmountRoot(daemon.root, info[0]) { - return nil - } - - unmountFile := getUnmountOnShutdownPath(daemon.configStore) - if _, err := os.Stat(unmountFile); err != nil { - return nil - } - - logrus.WithField("mountpoint", daemon.root).Debug("unmounting daemon root") - if err := mount.Unmount(daemon.root); err != nil { - return err - } - return os.Remove(unmountFile) -} - -func getCleanPatterns(id string) (regexps []*regexp.Regexp) { - var patterns []string - if id == "" { - id = "[0-9a-f]{64}" - patterns = append(patterns, "containers/"+id+"/shm") - } - patterns = append(patterns, "aufs/mnt/"+id+"$", "overlay/"+id+"/merged$", "zfs/graph/"+id+"$") - for _, p := range patterns { - r, err := regexp.Compile(p) - if err == nil { - regexps = append(regexps, r) - } - } - return -} - -func getRealPath(path string) (string, error) { - return fileutils.ReadSymlinkedDirectory(path) -} - -func shouldUnmountRoot(root string, info *mount.Info) bool { - if !strings.HasSuffix(root, info.Root) { - return false - } - return hasMountinfoOption(info.Optional, sharedPropagationOption) -} diff --git a/vendor/github.com/docker/docker/daemon/daemon_unix.go b/vendor/github.com/docker/docker/daemon/daemon_unix.go deleted file mode 100644 index e2c77610d..000000000 --- a/vendor/github.com/docker/docker/daemon/daemon_unix.go +++ /dev/null @@ -1,1523 +0,0 @@ -// +build linux freebsd - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "bufio" - "context" - "fmt" - "io/ioutil" - "net" - "os" - "path/filepath" - "runtime" - "runtime/debug" - "strconv" - "strings" - "time" - - containerd_cgroups "github.com/containerd/cgroups" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/blkiodev" - pblkiodev "github.com/docker/docker/api/types/blkiodev" - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/config" - "github.com/docker/docker/daemon/initlayer" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/runconfig" - volumemounts "github.com/docker/docker/volume/mounts" - "github.com/docker/libnetwork" - nwconfig "github.com/docker/libnetwork/config" - "github.com/docker/libnetwork/drivers/bridge" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/netutils" - "github.com/docker/libnetwork/options" - lntypes "github.com/docker/libnetwork/types" - "github.com/opencontainers/runc/libcontainer/cgroups" - rsystem "github.com/opencontainers/runc/libcontainer/system" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/vishvananda/netlink" - "golang.org/x/sys/unix" -) - -const ( - // DefaultShimBinary is the default shim to be used by containerd if none - // is specified - DefaultShimBinary = "docker-containerd-shim" - - // DefaultRuntimeBinary is the default runtime to be used by - // containerd if none is specified - DefaultRuntimeBinary = "docker-runc" - - // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 - linuxMinCPUShares = 2 - linuxMaxCPUShares = 262144 - platformSupported = true - // It's not kernel limit, we want this 4M limit to supply a reasonable functional container - linuxMinMemory = 4194304 - // constants for remapped root settings - defaultIDSpecifier = "default" - defaultRemappedID = "dockremap" - - // constant for cgroup drivers - cgroupFsDriver = "cgroupfs" - cgroupSystemdDriver = "systemd" - - // DefaultRuntimeName is the default runtime to be used by - // containerd if none is specified - DefaultRuntimeName = "docker-runc" -) - -type containerGetter interface { - GetContainer(string) (*container.Container, error) -} - -func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory { - memory := specs.LinuxMemory{} - - if config.Memory > 0 { - memory.Limit = &config.Memory - } - - if config.MemoryReservation > 0 { - memory.Reservation = &config.MemoryReservation - } - - if config.MemorySwap > 0 { - memory.Swap = &config.MemorySwap - } - - if config.MemorySwappiness != nil { - swappiness := uint64(*config.MemorySwappiness) - memory.Swappiness = &swappiness - } - - if config.OomKillDisable != nil { - memory.DisableOOMKiller = config.OomKillDisable - } - - if config.KernelMemory != 0 { - memory.Kernel = &config.KernelMemory - } - - return &memory -} - -func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) { - cpu := specs.LinuxCPU{} - - if config.CPUShares < 0 { - return nil, fmt.Errorf("shares: invalid argument") - } - if config.CPUShares >= 0 { - shares := uint64(config.CPUShares) - cpu.Shares = &shares - } - - if config.CpusetCpus != "" { - cpu.Cpus = config.CpusetCpus - } - - if config.CpusetMems != "" { - cpu.Mems = config.CpusetMems - } - - if config.NanoCPUs > 0 { - // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt - period := uint64(100 * time.Millisecond / time.Microsecond) - quota := config.NanoCPUs * int64(period) / 1e9 - cpu.Period = &period - cpu.Quota = "a - } - - if config.CPUPeriod != 0 { - period := uint64(config.CPUPeriod) - cpu.Period = &period - } - - if config.CPUQuota != 0 { - q := config.CPUQuota - cpu.Quota = &q - } - - if config.CPURealtimePeriod != 0 { - period := uint64(config.CPURealtimePeriod) - cpu.RealtimePeriod = &period - } - - if config.CPURealtimeRuntime != 0 { - c := config.CPURealtimeRuntime - cpu.RealtimeRuntime = &c - } - - return &cpu, nil -} - -func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) { - var stat unix.Stat_t - var blkioWeightDevices []specs.LinuxWeightDevice - - for _, weightDevice := range config.BlkioWeightDevice { - if err := unix.Stat(weightDevice.Path, &stat); err != nil { - return nil, err - } - weight := weightDevice.Weight - d := specs.LinuxWeightDevice{Weight: &weight} - d.Major = int64(stat.Rdev / 256) - d.Minor = int64(stat.Rdev % 256) - blkioWeightDevices = append(blkioWeightDevices, d) - } - - return blkioWeightDevices, nil -} - -func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { - container.NoNewPrivileges = daemon.configStore.NoNewPrivileges - return parseSecurityOpt(container, hostConfig) -} - -func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { - var ( - labelOpts []string - err error - ) - - for _, opt := range config.SecurityOpt { - if opt == "no-new-privileges" { - container.NoNewPrivileges = true - continue - } - if opt == "disable" { - labelOpts = append(labelOpts, "disable") - continue - } - - var con []string - if strings.Contains(opt, "=") { - con = strings.SplitN(opt, "=", 2) - } else if strings.Contains(opt, ":") { - con = strings.SplitN(opt, ":", 2) - logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.") - } - if len(con) != 2 { - return fmt.Errorf("invalid --security-opt 1: %q", opt) - } - - switch con[0] { - case "label": - labelOpts = append(labelOpts, con[1]) - case "apparmor": - container.AppArmorProfile = con[1] - case "seccomp": - container.SeccompProfile = con[1] - case "no-new-privileges": - noNewPrivileges, err := strconv.ParseBool(con[1]) - if err != nil { - return fmt.Errorf("invalid --security-opt 2: %q", opt) - } - container.NoNewPrivileges = noNewPrivileges - default: - return fmt.Errorf("invalid --security-opt 2: %q", opt) - } - } - - container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) - return err -} - -func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) { - var throttleDevices []specs.LinuxThrottleDevice - var stat unix.Stat_t - - for _, d := range devs { - if err := unix.Stat(d.Path, &stat); err != nil { - return nil, err - } - d := specs.LinuxThrottleDevice{Rate: d.Rate} - d.Major = int64(stat.Rdev / 256) - d.Minor = int64(stat.Rdev % 256) - throttleDevices = append(throttleDevices, d) - } - - return throttleDevices, nil -} - -func checkKernel() error { - // Check for unsupported kernel versions - // FIXME: it would be cleaner to not test for specific versions, but rather - // test for specific functionalities. - // Unfortunately we can't test for the feature "does not cause a kernel panic" - // without actually causing a kernel panic, so we need this workaround until - // the circumstances of pre-3.10 crashes are clearer. - // For details see https://github.com/docker/docker/issues/407 - // Docker 1.11 and above doesn't actually run on kernels older than 3.4, - // due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4). - if !kernel.CheckKernelVersion(3, 10, 0) { - v, _ := kernel.GetKernelVersion() - if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { - logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String()) - } - } - return nil -} - -// adaptContainerSettings is called during container creation to modify any -// settings necessary in the HostConfig structure. -func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { - if adjustCPUShares && hostConfig.CPUShares > 0 { - // Handle unsupported CPUShares - if hostConfig.CPUShares < linuxMinCPUShares { - logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) - hostConfig.CPUShares = linuxMinCPUShares - } else if hostConfig.CPUShares > linuxMaxCPUShares { - logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) - hostConfig.CPUShares = linuxMaxCPUShares - } - } - if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { - // By default, MemorySwap is set to twice the size of Memory. - hostConfig.MemorySwap = hostConfig.Memory * 2 - } - if hostConfig.ShmSize == 0 { - hostConfig.ShmSize = config.DefaultShmSize - if daemon.configStore != nil { - hostConfig.ShmSize = int64(daemon.configStore.ShmSize) - } - } - // Set default IPC mode, if unset for container - if hostConfig.IpcMode.IsEmpty() { - m := config.DefaultIpcMode - if daemon.configStore != nil { - m = daemon.configStore.IpcMode - } - hostConfig.IpcMode = containertypes.IpcMode(m) - } - - adaptSharedNamespaceContainer(daemon, hostConfig) - - var err error - opts, err := daemon.generateSecurityOpt(hostConfig) - if err != nil { - return err - } - hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, opts...) - if hostConfig.OomKillDisable == nil { - defaultOomKillDisable := false - hostConfig.OomKillDisable = &defaultOomKillDisable - } - - return nil -} - -// adaptSharedNamespaceContainer replaces container name with its ID in hostConfig. -// To be more precisely, it modifies `container:name` to `container:ID` of PidMode, IpcMode -// and NetworkMode. -// -// When a container shares its namespace with another container, use ID can keep the namespace -// sharing connection between the two containers even the another container is renamed. -func adaptSharedNamespaceContainer(daemon containerGetter, hostConfig *containertypes.HostConfig) { - containerPrefix := "container:" - if hostConfig.PidMode.IsContainer() { - pidContainer := hostConfig.PidMode.Container() - // if there is any error returned here, we just ignore it and leave it to be - // handled in the following logic - if c, err := daemon.GetContainer(pidContainer); err == nil { - hostConfig.PidMode = containertypes.PidMode(containerPrefix + c.ID) - } - } - if hostConfig.IpcMode.IsContainer() { - ipcContainer := hostConfig.IpcMode.Container() - if c, err := daemon.GetContainer(ipcContainer); err == nil { - hostConfig.IpcMode = containertypes.IpcMode(containerPrefix + c.ID) - } - } - if hostConfig.NetworkMode.IsContainer() { - netContainer := hostConfig.NetworkMode.ConnectedContainer() - if c, err := daemon.GetContainer(netContainer); err == nil { - hostConfig.NetworkMode = containertypes.NetworkMode(containerPrefix + c.ID) - } - } -} - -func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) { - warnings := []string{} - fixMemorySwappiness(resources) - - // memory subsystem checks and adjustments - if resources.Memory != 0 && resources.Memory < linuxMinMemory { - return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") - } - if resources.Memory > 0 && !sysInfo.MemoryLimit { - warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") - logrus.Warn("Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.") - resources.Memory = 0 - resources.MemorySwap = -1 - } - if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { - warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.") - logrus.Warn("Your kernel does not support swap limit capabilities,or the cgroup is not mounted. Memory limited without swap.") - resources.MemorySwap = -1 - } - if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { - return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") - } - if resources.Memory == 0 && resources.MemorySwap > 0 && !update { - return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") - } - if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness { - warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.") - logrus.Warn("Your kernel does not support memory swappiness capabilities, or the cgroup is not mounted. Memory swappiness discarded.") - resources.MemorySwappiness = nil - } - if resources.MemorySwappiness != nil { - swappiness := *resources.MemorySwappiness - if swappiness < 0 || swappiness > 100 { - return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) - } - } - if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { - warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") - logrus.Warn("Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.") - resources.MemoryReservation = 0 - } - if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { - return warnings, fmt.Errorf("Minimum memory reservation allowed is 4MB") - } - if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { - return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage") - } - if resources.KernelMemory > 0 && !sysInfo.KernelMemory { - warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") - logrus.Warn("Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.") - resources.KernelMemory = 0 - } - if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { - return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") - } - if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) { - warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") - logrus.Warn("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") - } - if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { - // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point - // warning the caller if they already wanted the feature to be off - if *resources.OomKillDisable { - warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.") - logrus.Warn("Your kernel does not support OomKillDisable. OomKillDisable discarded.") - } - resources.OomKillDisable = nil - } - - if resources.PidsLimit != 0 && !sysInfo.PidsLimit { - warnings = append(warnings, "Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") - logrus.Warn("Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.") - resources.PidsLimit = 0 - } - - // cpu subsystem checks and adjustments - if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 { - return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set") - } - if resources.NanoCPUs > 0 && resources.CPUQuota > 0 { - return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set") - } - if resources.NanoCPUs > 0 && (!sysInfo.CPUCfsPeriod || !sysInfo.CPUCfsQuota) { - return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU cfs period/quota or the cgroup is not mounted") - } - // The highest precision we could get on Linux is 0.001, by setting - // cpu.cfs_period_us=1000ms - // cpu.cfs_quota=1ms - // See the following link for details: - // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt - // Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error. - // The error message is 0.01 so that this is consistent with Windows - if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { - return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) - } - - if resources.CPUShares > 0 && !sysInfo.CPUShares { - warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") - logrus.Warn("Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.") - resources.CPUShares = 0 - } - if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { - warnings = append(warnings, "Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") - logrus.Warn("Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.") - resources.CPUPeriod = 0 - } - if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { - return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") - } - if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota { - warnings = append(warnings, "Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") - logrus.Warn("Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.") - resources.CPUQuota = 0 - } - if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { - return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") - } - if resources.CPUPercent > 0 { - warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS)) - logrus.Warnf("%s does not support CPU percent. Percent discarded.", runtime.GOOS) - resources.CPUPercent = 0 - } - - // cpuset subsystem checks and adjustments - if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { - warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") - logrus.Warn("Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.") - resources.CpusetCpus = "" - resources.CpusetMems = "" - } - cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) - if err != nil { - return warnings, fmt.Errorf("Invalid value %s for cpuset cpus", resources.CpusetCpus) - } - if !cpusAvailable { - return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) - } - memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) - if err != nil { - return warnings, fmt.Errorf("Invalid value %s for cpuset mems", resources.CpusetMems) - } - if !memsAvailable { - return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) - } - - // blkio subsystem checks and adjustments - if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { - warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") - logrus.Warn("Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.") - resources.BlkioWeight = 0 - } - if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { - return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") - } - if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { - return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) - } - if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { - warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") - logrus.Warn("Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.") - resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} - } - if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { - warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.") - logrus.Warn("Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded") - resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} - } - if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { - warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") - logrus.Warn("Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.") - resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} - - } - if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { - warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.") - logrus.Warn("Your kernel does not support IOPS Block I/O read limit in IO or the cgroup is not mounted. Block I/O IOPS read limit discarded.") - resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} - } - if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { - warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") - logrus.Warn("Your kernel does not support IOPS Block I/O write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.") - resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} - } - - return warnings, nil -} - -func (daemon *Daemon) getCgroupDriver() string { - cgroupDriver := cgroupFsDriver - - if UsingSystemd(daemon.configStore) { - cgroupDriver = cgroupSystemdDriver - } - return cgroupDriver -} - -// getCD gets the raw value of the native.cgroupdriver option, if set. -func getCD(config *config.Config) string { - for _, option := range config.ExecOptions { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { - continue - } - return val - } - return "" -} - -// VerifyCgroupDriver validates native.cgroupdriver -func VerifyCgroupDriver(config *config.Config) error { - cd := getCD(config) - if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { - return nil - } - return fmt.Errorf("native.cgroupdriver option %s not supported", cd) -} - -// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd -func UsingSystemd(config *config.Config) bool { - return getCD(config) == cgroupSystemdDriver -} - -// verifyPlatformContainerSettings performs platform-specific validation of the -// hostconfig and config structures. -func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { - var warnings []string - sysInfo := sysinfo.New(true) - - w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update) - - // no matter err is nil or not, w could have data in itself. - warnings = append(warnings, w...) - - if err != nil { - return warnings, err - } - - if hostConfig.ShmSize < 0 { - return warnings, fmt.Errorf("SHM size can not be less than 0") - } - - if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { - return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) - } - - // ip-forwarding does not affect container with '--net=host' (or '--net=none') - if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { - warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") - logrus.Warn("IPv4 forwarding is disabled. Networking will not work") - } - // check for various conflicting options with user namespaces - if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { - if hostConfig.Privileged { - return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces. You must run the container in the host namespace when running privileged mode") - } - if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() { - return warnings, fmt.Errorf("cannot share the host's network namespace when user namespaces are enabled") - } - if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() { - return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled") - } - } - if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { - // CgroupParent for systemd cgroup should be named as "xxx.slice" - if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { - return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") - } - } - if hostConfig.Runtime == "" { - hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() - } - - if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { - return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) - } - - parser := volumemounts.NewParser(runtime.GOOS) - for dest := range hostConfig.Tmpfs { - if err := parser.ValidateTmpfsMountDestination(dest); err != nil { - return warnings, err - } - } - - return warnings, nil -} - -func (daemon *Daemon) loadRuntimes() error { - return daemon.initRuntimes(daemon.configStore.Runtimes) -} - -func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error) { - runtimeDir := filepath.Join(daemon.configStore.Root, "runtimes") - // Remove old temp directory if any - os.RemoveAll(runtimeDir + "-old") - tmpDir, err := ioutils.TempDir(daemon.configStore.Root, "gen-runtimes") - if err != nil { - return errors.Wrapf(err, "failed to get temp dir to generate runtime scripts") - } - defer func() { - if err != nil { - if err1 := os.RemoveAll(tmpDir); err1 != nil { - logrus.WithError(err1).WithField("dir", tmpDir). - Warnf("failed to remove tmp dir") - } - return - } - - if err = os.Rename(runtimeDir, runtimeDir+"-old"); err != nil { - return - } - if err = os.Rename(tmpDir, runtimeDir); err != nil { - err = errors.Wrapf(err, "failed to setup runtimes dir, new containers may not start") - return - } - if err = os.RemoveAll(runtimeDir + "-old"); err != nil { - logrus.WithError(err).WithField("dir", tmpDir). - Warnf("failed to remove old runtimes dir") - } - }() - - for name, rt := range runtimes { - if len(rt.Args) == 0 { - continue - } - - script := filepath.Join(tmpDir, name) - content := fmt.Sprintf("#!/bin/sh\n%s %s $@\n", rt.Path, strings.Join(rt.Args, " ")) - if err := ioutil.WriteFile(script, []byte(content), 0700); err != nil { - return err - } - } - return nil -} - -// verifyDaemonSettings performs validation of daemon config struct -func verifyDaemonSettings(conf *config.Config) error { - // Check for mutually incompatible config options - if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" { - return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") - } - if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication { - return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") - } - if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq { - conf.BridgeConfig.EnableIPMasq = false - } - if err := VerifyCgroupDriver(conf); err != nil { - return err - } - if conf.CgroupParent != "" && UsingSystemd(conf) { - if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") { - return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") - } - } - - if conf.DefaultRuntime == "" { - conf.DefaultRuntime = config.StockRuntimeName - } - if conf.Runtimes == nil { - conf.Runtimes = make(map[string]types.Runtime) - } - conf.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeName} - - return nil -} - -// checkSystem validates platform-specific requirements -func checkSystem() error { - if os.Geteuid() != 0 { - return fmt.Errorf("The Docker daemon needs to be run as root") - } - return checkKernel() -} - -// configureMaxThreads sets the Go runtime max threads threshold -// which is 90% of the kernel setting from /proc/sys/kernel/threads-max -func configureMaxThreads(config *config.Config) error { - mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max") - if err != nil { - return err - } - mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) - if err != nil { - return err - } - maxThreads := (mtint / 100) * 90 - debug.SetMaxThreads(maxThreads) - logrus.Debugf("Golang's threads limit set to %d", maxThreads) - return nil -} - -func overlaySupportsSelinux() (bool, error) { - f, err := os.Open("/proc/kallsyms") - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - defer f.Close() - - var symAddr, symType, symName, text string - - s := bufio.NewScanner(f) - for s.Scan() { - if err := s.Err(); err != nil { - return false, err - } - - text = s.Text() - if _, err := fmt.Sscanf(text, "%s %s %s", &symAddr, &symType, &symName); err != nil { - return false, fmt.Errorf("Scanning '%s' failed: %s", text, err) - } - - // Check for presence of symbol security_inode_copy_up. - if symName == "security_inode_copy_up" { - return true, nil - } - } - return false, nil -} - -// configureKernelSecuritySupport configures and validates security support for the kernel -func configureKernelSecuritySupport(config *config.Config, driverName string) error { - if config.EnableSelinuxSupport { - if !selinuxEnabled() { - logrus.Warn("Docker could not enable SELinux on the host system") - return nil - } - - if driverName == "overlay" || driverName == "overlay2" { - // If driver is overlay or overlay2, make sure kernel - // supports selinux with overlay. - supported, err := overlaySupportsSelinux() - if err != nil { - return err - } - - if !supported { - logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName) - } - } - } else { - selinuxSetDisabled() - } - return nil -} - -func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { - netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes) - if err != nil { - return nil, err - } - - controller, err := libnetwork.New(netOptions...) - if err != nil { - return nil, fmt.Errorf("error obtaining controller instance: %v", err) - } - - if len(activeSandboxes) > 0 { - logrus.Info("There are old running containers, the network config will not take affect") - return controller, nil - } - - // Initialize default network on "null" - if n, _ := controller.NetworkByName("none"); n == nil { - if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { - return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) - } - } - - // Initialize default network on "host" - if n, _ := controller.NetworkByName("host"); n == nil { - if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { - return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) - } - } - - // Clear stale bridge network - if n, err := controller.NetworkByName("bridge"); err == nil { - if err = n.Delete(); err != nil { - return nil, fmt.Errorf("could not delete the default bridge network: %v", err) - } - if len(config.NetworkConfig.DefaultAddressPools.Value()) > 0 && !daemon.configStore.LiveRestoreEnabled { - removeDefaultBridgeInterface() - } - } - - if !config.DisableBridge { - // Initialize default driver "bridge" - if err := initBridgeDriver(controller, config); err != nil { - return nil, err - } - } else { - removeDefaultBridgeInterface() - } - - return controller, nil -} - -func driverOptions(config *config.Config) []nwconfig.Option { - bridgeConfig := options.Generic{ - "EnableIPForwarding": config.BridgeConfig.EnableIPForward, - "EnableIPTables": config.BridgeConfig.EnableIPTables, - "EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy, - "UserlandProxyPath": config.BridgeConfig.UserlandProxyPath} - bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig} - - dOptions := []nwconfig.Option{} - dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption)) - return dOptions -} - -func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { - bridgeName := bridge.DefaultBridgeName - if config.BridgeConfig.Iface != "" { - bridgeName = config.BridgeConfig.Iface - } - netOption := map[string]string{ - bridge.BridgeName: bridgeName, - bridge.DefaultBridge: strconv.FormatBool(true), - netlabel.DriverMTU: strconv.Itoa(config.Mtu), - bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq), - bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication), - } - - // --ip processing - if config.BridgeConfig.DefaultIP != nil { - netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String() - } - - var ( - ipamV4Conf *libnetwork.IpamConf - ipamV6Conf *libnetwork.IpamConf - ) - - ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} - - nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) - if err != nil { - return errors.Wrap(err, "list bridge addresses failed") - } - - nw := nwList[0] - if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" { - _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) - if err != nil { - return errors.Wrap(err, "parse CIDR failed") - } - // Iterate through in case there are multiple addresses for the bridge - for _, entry := range nwList { - if fCIDR.Contains(entry.IP) { - nw = entry - break - } - } - } - - ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() - hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) - if hip.IsGlobalUnicast() { - ipamV4Conf.Gateway = nw.IP.String() - } - - if config.BridgeConfig.IP != "" { - ipamV4Conf.PreferredPool = config.BridgeConfig.IP - ip, _, err := net.ParseCIDR(config.BridgeConfig.IP) - if err != nil { - return err - } - ipamV4Conf.Gateway = ip.String() - } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { - logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) - } - - if config.BridgeConfig.FixedCIDR != "" { - _, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR) - if err != nil { - return err - } - - ipamV4Conf.SubPool = fCIDR.String() - } - - if config.BridgeConfig.DefaultGatewayIPv4 != nil { - ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String() - } - - var deferIPv6Alloc bool - if config.BridgeConfig.FixedCIDRv6 != "" { - _, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6) - if err != nil { - return err - } - - // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has - // at least 48 host bits, we need to guarantee the current behavior where the containers' - // IPv6 addresses will be constructed based on the containers' interface MAC address. - // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints - // on this network until after the driver has created the endpoint and returned the - // constructed address. Libnetwork will then reserve this address with the ipam driver. - ones, _ := fCIDRv6.Mask.Size() - deferIPv6Alloc = ones <= 80 - - if ipamV6Conf == nil { - ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} - } - ipamV6Conf.PreferredPool = fCIDRv6.String() - - // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 - // address belongs to the same network, we need to inform libnetwork about it, so - // that it can be reserved with IPAM and it will not be given away to somebody else - for _, nw6 := range nw6List { - if fCIDRv6.Contains(nw6.IP) { - ipamV6Conf.Gateway = nw6.IP.String() - break - } - } - } - - if config.BridgeConfig.DefaultGatewayIPv6 != nil { - if ipamV6Conf == nil { - ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} - } - ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String() - } - - v4Conf := []*libnetwork.IpamConf{ipamV4Conf} - v6Conf := []*libnetwork.IpamConf{} - if ipamV6Conf != nil { - v6Conf = append(v6Conf, ipamV6Conf) - } - // Initialize default network on "bridge" with the same name - _, err = controller.NewNetwork("bridge", "bridge", "", - libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6), - libnetwork.NetworkOptionDriverOpts(netOption), - libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), - libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) - if err != nil { - return fmt.Errorf("Error creating default \"bridge\" network: %v", err) - } - return nil -} - -// Remove default bridge interface if present (--bridge=none use case) -func removeDefaultBridgeInterface() { - if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil { - if err := netlink.LinkDel(lnk); err != nil { - logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err) - } - } -} - -func setupInitLayer(idMappings *idtools.IDMappings) func(containerfs.ContainerFS) error { - return func(initPath containerfs.ContainerFS) error { - return initlayer.Setup(initPath, idMappings.RootPair()) - } -} - -// Parse the remapped root (user namespace) option, which can be one of: -// username - valid username from /etc/passwd -// username:groupname - valid username; valid groupname from /etc/group -// uid - 32-bit unsigned int valid Linux UID value -// uid:gid - uid value; 32-bit unsigned int Linux GID value -// -// If no groupname is specified, and a username is specified, an attempt -// will be made to lookup a gid for that username as a groupname -// -// If names are used, they are verified to exist in passwd/group -func parseRemappedRoot(usergrp string) (string, string, error) { - - var ( - userID, groupID int - username, groupname string - ) - - idparts := strings.Split(usergrp, ":") - if len(idparts) > 2 { - return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) - } - - if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { - // must be a uid; take it as valid - userID = int(uid) - luser, err := idtools.LookupUID(userID) - if err != nil { - return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) - } - username = luser.Name - if len(idparts) == 1 { - // if the uid was numeric and no gid was specified, take the uid as the gid - groupID = userID - lgrp, err := idtools.LookupGID(groupID) - if err != nil { - return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) - } - groupname = lgrp.Name - } - } else { - lookupName := idparts[0] - // special case: if the user specified "default", they want Docker to create or - // use (after creation) the "dockremap" user/group for root remapping - if lookupName == defaultIDSpecifier { - lookupName = defaultRemappedID - } - luser, err := idtools.LookupUser(lookupName) - if err != nil && idparts[0] != defaultIDSpecifier { - // error if the name requested isn't the special "dockremap" ID - return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) - } else if err != nil { - // special case-- if the username == "default", then we have been asked - // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} - // ranges will be used for the user and group mappings in user namespaced containers - _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) - if err == nil { - return defaultRemappedID, defaultRemappedID, nil - } - return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) - } - username = luser.Name - if len(idparts) == 1 { - // we only have a string username, and no group specified; look up gid from username as group - group, err := idtools.LookupGroup(lookupName) - if err != nil { - return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) - } - groupname = group.Name - } - } - - if len(idparts) == 2 { - // groupname or gid is separately specified and must be resolved - // to an unsigned 32-bit gid - if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { - // must be a gid, take it as valid - groupID = int(gid) - lgrp, err := idtools.LookupGID(groupID) - if err != nil { - return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) - } - groupname = lgrp.Name - } else { - // not a number; attempt a lookup - if _, err := idtools.LookupGroup(idparts[1]); err != nil { - return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) - } - groupname = idparts[1] - } - } - return username, groupname, nil -} - -func setupRemappedRoot(config *config.Config) (*idtools.IDMappings, error) { - if runtime.GOOS != "linux" && config.RemappedRoot != "" { - return nil, fmt.Errorf("User namespaces are only supported on Linux") - } - - // if the daemon was started with remapped root option, parse - // the config option to the int uid,gid values - if config.RemappedRoot != "" { - username, groupname, err := parseRemappedRoot(config.RemappedRoot) - if err != nil { - return nil, err - } - if username == "root" { - // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op - // effectively - logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") - return &idtools.IDMappings{}, nil - } - logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) - // update remapped root setting now that we have resolved them to actual names - config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) - - mappings, err := idtools.NewIDMappings(username, groupname) - if err != nil { - return nil, errors.Wrapf(err, "Can't create ID mappings: %v") - } - return mappings, nil - } - return &idtools.IDMappings{}, nil -} - -func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPair) error { - config.Root = rootDir - // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) - // so that syscalls executing as non-root, operating on subdirectories of the graph root - // (e.g. mounted layers of a container) can traverse this path. - // The user namespace support will create subdirectories for the remapped root host uid:gid - // pair owned by that same uid:gid pair for proper write access to those needed metadata and - // layer content subtrees. - if _, err := os.Stat(rootDir); err == nil { - // root current exists; verify the access bits are correct by setting them - if err = os.Chmod(rootDir, 0711); err != nil { - return err - } - } else if os.IsNotExist(err) { - // no root exists yet, create it 0711 with root:root ownership - if err := os.MkdirAll(rootDir, 0711); err != nil { - return err - } - } - - // if user namespaces are enabled we will create a subtree underneath the specified root - // with any/all specified remapped root uid/gid options on the daemon creating - // a new subdirectory with ownership set to the remapped uid/gid (so as to allow - // `chdir()` to work for containers namespaced to that uid/gid) - if config.RemappedRoot != "" { - config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootIDs.UID, rootIDs.GID)) - logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) - // Create the root directory if it doesn't exist - if err := idtools.MkdirAllAndChown(config.Root, 0700, rootIDs); err != nil { - return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) - } - // we also need to verify that any pre-existing directories in the path to - // the graphroot won't block access to remapped root--if any pre-existing directory - // has strict permissions that don't allow "x", container start will fail, so - // better to warn and fail now - dirPath := config.Root - for { - dirPath = filepath.Dir(dirPath) - if dirPath == "/" { - break - } - if !idtools.CanAccess(dirPath, rootIDs) { - return fmt.Errorf("a subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories", config.Root) - } - } - } - - if err := setupDaemonRootPropagation(config); err != nil { - logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior") - } - return nil -} - -func setupDaemonRootPropagation(cfg *config.Config) error { - rootParentMount, options, err := getSourceMount(cfg.Root) - if err != nil { - return errors.Wrap(err, "error getting daemon root's parent mount") - } - - var cleanupOldFile bool - cleanupFile := getUnmountOnShutdownPath(cfg) - defer func() { - if !cleanupOldFile { - return - } - if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) { - logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file") - } - }() - - if hasMountinfoOption(options, sharedPropagationOption, slavePropagationOption) { - cleanupOldFile = true - return nil - } - - if err := mount.MakeShared(cfg.Root); err != nil { - return errors.Wrap(err, "could not setup daemon root propagation to shared") - } - - // check the case where this may have already been a mount to itself. - // If so then the daemon only performed a remount and should not try to unmount this later. - if rootParentMount == cfg.Root { - cleanupOldFile = true - return nil - } - - if err := ioutil.WriteFile(cleanupFile, nil, 0600); err != nil { - return errors.Wrap(err, "error writing file to signal mount cleanup on shutdown") - } - return nil -} - -// getUnmountOnShutdownPath generates the path to used when writing the file that signals to the daemon that on shutdown -// the daemon root should be unmounted. -func getUnmountOnShutdownPath(config *config.Config) string { - return filepath.Join(config.ExecRoot, "unmount-on-shutdown") -} - -// registerLinks writes the links to a file. -func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { - if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { - return nil - } - - for _, l := range hostConfig.Links { - name, alias, err := opts.ParseLink(l) - if err != nil { - return err - } - child, err := daemon.GetContainer(name) - if err != nil { - return errors.Wrapf(err, "could not get container for %s", name) - } - for child.HostConfig.NetworkMode.IsContainer() { - parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) - child, err = daemon.GetContainer(parts[1]) - if err != nil { - return errors.Wrapf(err, "Could not get container for %s", parts[1]) - } - } - if child.HostConfig.NetworkMode.IsHost() { - return runconfig.ErrConflictHostNetworkAndLinks - } - if err := daemon.registerLink(container, child, alias); err != nil { - return err - } - } - - // After we load all the links into the daemon - // set them to nil on the hostconfig - _, err := container.WriteHostConfig() - return err -} - -// conditionalMountOnStart is a platform specific helper function during the -// container start to call mount. -func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { - return daemon.Mount(container) -} - -// conditionalUnmountOnCleanup is a platform specific helper function called -// during the cleanup of a container to unmount. -func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { - return daemon.Unmount(container) -} - -func copyBlkioEntry(entries []*containerd_cgroups.BlkIOEntry) []types.BlkioStatEntry { - out := make([]types.BlkioStatEntry, len(entries)) - for i, re := range entries { - out[i] = types.BlkioStatEntry{ - Major: re.Major, - Minor: re.Minor, - Op: re.Op, - Value: re.Value, - } - } - return out -} - -func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { - if !c.IsRunning() { - return nil, errNotRunning(c.ID) - } - cs, err := daemon.containerd.Stats(context.Background(), c.ID) - if err != nil { - if strings.Contains(err.Error(), "container not found") { - return nil, containerNotFound(c.ID) - } - return nil, err - } - s := &types.StatsJSON{} - s.Read = cs.Read - stats := cs.Metrics - if stats.Blkio != nil { - s.BlkioStats = types.BlkioStats{ - IoServiceBytesRecursive: copyBlkioEntry(stats.Blkio.IoServiceBytesRecursive), - IoServicedRecursive: copyBlkioEntry(stats.Blkio.IoServicedRecursive), - IoQueuedRecursive: copyBlkioEntry(stats.Blkio.IoQueuedRecursive), - IoServiceTimeRecursive: copyBlkioEntry(stats.Blkio.IoServiceTimeRecursive), - IoWaitTimeRecursive: copyBlkioEntry(stats.Blkio.IoWaitTimeRecursive), - IoMergedRecursive: copyBlkioEntry(stats.Blkio.IoMergedRecursive), - IoTimeRecursive: copyBlkioEntry(stats.Blkio.IoTimeRecursive), - SectorsRecursive: copyBlkioEntry(stats.Blkio.SectorsRecursive), - } - } - if stats.CPU != nil { - s.CPUStats = types.CPUStats{ - CPUUsage: types.CPUUsage{ - TotalUsage: stats.CPU.Usage.Total, - PercpuUsage: stats.CPU.Usage.PerCPU, - UsageInKernelmode: stats.CPU.Usage.Kernel, - UsageInUsermode: stats.CPU.Usage.User, - }, - ThrottlingData: types.ThrottlingData{ - Periods: stats.CPU.Throttling.Periods, - ThrottledPeriods: stats.CPU.Throttling.ThrottledPeriods, - ThrottledTime: stats.CPU.Throttling.ThrottledTime, - }, - } - } - - if stats.Memory != nil { - raw := make(map[string]uint64) - raw["cache"] = stats.Memory.Cache - raw["rss"] = stats.Memory.RSS - raw["rss_huge"] = stats.Memory.RSSHuge - raw["mapped_file"] = stats.Memory.MappedFile - raw["dirty"] = stats.Memory.Dirty - raw["writeback"] = stats.Memory.Writeback - raw["pgpgin"] = stats.Memory.PgPgIn - raw["pgpgout"] = stats.Memory.PgPgOut - raw["pgfault"] = stats.Memory.PgFault - raw["pgmajfault"] = stats.Memory.PgMajFault - raw["inactive_anon"] = stats.Memory.InactiveAnon - raw["active_anon"] = stats.Memory.ActiveAnon - raw["inactive_file"] = stats.Memory.InactiveFile - raw["active_file"] = stats.Memory.ActiveFile - raw["unevictable"] = stats.Memory.Unevictable - raw["hierarchical_memory_limit"] = stats.Memory.HierarchicalMemoryLimit - raw["hierarchical_memsw_limit"] = stats.Memory.HierarchicalSwapLimit - raw["total_cache"] = stats.Memory.TotalCache - raw["total_rss"] = stats.Memory.TotalRSS - raw["total_rss_huge"] = stats.Memory.TotalRSSHuge - raw["total_mapped_file"] = stats.Memory.TotalMappedFile - raw["total_dirty"] = stats.Memory.TotalDirty - raw["total_writeback"] = stats.Memory.TotalWriteback - raw["total_pgpgin"] = stats.Memory.TotalPgPgIn - raw["total_pgpgout"] = stats.Memory.TotalPgPgOut - raw["total_pgfault"] = stats.Memory.TotalPgFault - raw["total_pgmajfault"] = stats.Memory.TotalPgMajFault - raw["total_inactive_anon"] = stats.Memory.TotalInactiveAnon - raw["total_active_anon"] = stats.Memory.TotalActiveAnon - raw["total_inactive_file"] = stats.Memory.TotalInactiveFile - raw["total_active_file"] = stats.Memory.TotalActiveFile - raw["total_unevictable"] = stats.Memory.TotalUnevictable - - if stats.Memory.Usage != nil { - s.MemoryStats = types.MemoryStats{ - Stats: raw, - Usage: stats.Memory.Usage.Usage, - MaxUsage: stats.Memory.Usage.Max, - Limit: stats.Memory.Usage.Limit, - Failcnt: stats.Memory.Usage.Failcnt, - } - } else { - s.MemoryStats = types.MemoryStats{ - Stats: raw, - } - } - - // if the container does not set memory limit, use the machineMemory - if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 { - s.MemoryStats.Limit = daemon.machineMemory - } - } - - if stats.Pids != nil { - s.PidsStats = types.PidsStats{ - Current: stats.Pids.Current, - Limit: stats.Pids.Limit, - } - } - - return s, nil -} - -// setDefaultIsolation determines the default isolation mode for the -// daemon to run in. This is only applicable on Windows -func (daemon *Daemon) setDefaultIsolation() error { - return nil -} - -// setupDaemonProcess sets various settings for the daemon's process -func setupDaemonProcess(config *config.Config) error { - // setup the daemons oom_score_adj - if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil { - return err - } - if err := setMayDetachMounts(); err != nil { - logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter") - } - return nil -} - -// This is used to allow removal of mountpoints that may be mounted in other -// namespaces on RHEL based kernels starting from RHEL 7.4. -// Without this setting, removals on these RHEL based kernels may fail with -// "device or resource busy". -// This setting is not available in upstream kernels as it is not configurable, -// but has been in the upstream kernels since 3.15. -func setMayDetachMounts() error { - f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return errors.Wrap(err, "error opening may_detach_mounts kernel config file") - } - defer f.Close() - - _, err = f.WriteString("1") - if os.IsPermission(err) { - // Setting may_detach_mounts does not work in an - // unprivileged container. Ignore the error, but log - // it if we appear not to be in that situation. - if !rsystem.RunningInUserNS() { - logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1") - } - return nil - } - return err -} - -func setupOOMScoreAdj(score int) error { - f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) - if err != nil { - return err - } - defer f.Close() - stringScore := strconv.Itoa(score) - _, err = f.WriteString(stringScore) - if os.IsPermission(err) { - // Setting oom_score_adj does not work in an - // unprivileged container. Ignore the error, but log - // it if we appear not to be in that situation. - if !rsystem.RunningInUserNS() { - logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore) - } - return nil - } - - return err -} - -func (daemon *Daemon) initCgroupsPath(path string) error { - if path == "/" || path == "." { - return nil - } - - if daemon.configStore.CPURealtimePeriod == 0 && daemon.configStore.CPURealtimeRuntime == 0 { - return nil - } - - // Recursively create cgroup to ensure that the system and all parent cgroups have values set - // for the period and runtime as this limits what the children can be set to. - daemon.initCgroupsPath(filepath.Dir(path)) - - mnt, root, err := cgroups.FindCgroupMountpointAndRoot("cpu") - if err != nil { - return err - } - // When docker is run inside docker, the root is based of the host cgroup. - // Should this be handled in runc/libcontainer/cgroups ? - if strings.HasPrefix(root, "/docker/") { - root = "/" - } - - path = filepath.Join(mnt, root, path) - sysinfo := sysinfo.New(true) - if err := maybeCreateCPURealTimeFile(sysinfo.CPURealtimePeriod, daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil { - return err - } - return maybeCreateCPURealTimeFile(sysinfo.CPURealtimeRuntime, daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path) -} - -func maybeCreateCPURealTimeFile(sysinfoPresent bool, configValue int64, file string, path string) error { - if sysinfoPresent && configValue != 0 { - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - if err := ioutil.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700); err != nil { - return err - } - } - return nil -} - -func (daemon *Daemon) setupSeccompProfile() error { - if daemon.configStore.SeccompProfile != "" { - daemon.seccompProfilePath = daemon.configStore.SeccompProfile - b, err := ioutil.ReadFile(daemon.configStore.SeccompProfile) - if err != nil { - return fmt.Errorf("opening seccomp profile (%s) failed: %v", daemon.configStore.SeccompProfile, err) - } - daemon.seccompProfile = b - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/daemon_unsupported.go b/vendor/github.com/docker/docker/daemon/daemon_unsupported.go deleted file mode 100644 index ee680b641..000000000 --- a/vendor/github.com/docker/docker/daemon/daemon_unsupported.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !linux,!freebsd,!windows - -package daemon // import "github.com/docker/docker/daemon" - -const platformSupported = false diff --git a/vendor/github.com/docker/docker/daemon/daemon_windows.go b/vendor/github.com/docker/docker/daemon/daemon_windows.go deleted file mode 100644 index 1f801032d..000000000 --- a/vendor/github.com/docker/docker/daemon/daemon_windows.go +++ /dev/null @@ -1,655 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - "github.com/Microsoft/hcsshim" - "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/config" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/platform" - "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/runconfig" - "github.com/docker/libnetwork" - nwconfig "github.com/docker/libnetwork/config" - "github.com/docker/libnetwork/datastore" - winlibnetwork "github.com/docker/libnetwork/drivers/windows" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/options" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" - "golang.org/x/sys/windows/svc/mgr" -) - -const ( - defaultNetworkSpace = "172.16.0.0/12" - platformSupported = true - windowsMinCPUShares = 1 - windowsMaxCPUShares = 10000 - windowsMinCPUPercent = 1 - windowsMaxCPUPercent = 100 -) - -// Windows has no concept of an execution state directory. So use config.Root here. -func getPluginExecRoot(root string) string { - return filepath.Join(root, "plugins") -} - -func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error { - return parseSecurityOpt(container, hostConfig) -} - -func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { - return nil -} - -func setupInitLayer(idMappings *idtools.IDMappings) func(containerfs.ContainerFS) error { - return nil -} - -func checkKernel() error { - return nil -} - -func (daemon *Daemon) getCgroupDriver() string { - return "" -} - -// adaptContainerSettings is called during container creation to modify any -// settings necessary in the HostConfig structure. -func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { - if hostConfig == nil { - return nil - } - - return nil -} - -func verifyContainerResources(resources *containertypes.Resources, isHyperv bool) ([]string, error) { - warnings := []string{} - fixMemorySwappiness(resources) - if !isHyperv { - // The processor resource controls are mutually exclusive on - // Windows Server Containers, the order of precedence is - // CPUCount first, then CPUShares, and CPUPercent last. - if resources.CPUCount > 0 { - if resources.CPUShares > 0 { - warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") - logrus.Warn("Conflicting options: CPU count takes priority over CPU shares on Windows Server Containers. CPU shares discarded") - resources.CPUShares = 0 - } - if resources.CPUPercent > 0 { - warnings = append(warnings, "Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") - logrus.Warn("Conflicting options: CPU count takes priority over CPU percent on Windows Server Containers. CPU percent discarded") - resources.CPUPercent = 0 - } - } else if resources.CPUShares > 0 { - if resources.CPUPercent > 0 { - warnings = append(warnings, "Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") - logrus.Warn("Conflicting options: CPU shares takes priority over CPU percent on Windows Server Containers. CPU percent discarded") - resources.CPUPercent = 0 - } - } - } - - if resources.CPUShares < 0 || resources.CPUShares > windowsMaxCPUShares { - return warnings, fmt.Errorf("range of CPUShares is from %d to %d", windowsMinCPUShares, windowsMaxCPUShares) - } - if resources.CPUPercent < 0 || resources.CPUPercent > windowsMaxCPUPercent { - return warnings, fmt.Errorf("range of CPUPercent is from %d to %d", windowsMinCPUPercent, windowsMaxCPUPercent) - } - if resources.CPUCount < 0 { - return warnings, fmt.Errorf("invalid CPUCount: CPUCount cannot be negative") - } - - if resources.NanoCPUs > 0 && resources.CPUPercent > 0 { - return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Percent cannot both be set") - } - if resources.NanoCPUs > 0 && resources.CPUShares > 0 { - return warnings, fmt.Errorf("conflicting options: Nano CPUs and CPU Shares cannot both be set") - } - // The precision we could get is 0.01, because on Windows we have to convert to CPUPercent. - // We don't set the lower limit here and it is up to the underlying platform (e.g., Windows) to return an error. - if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 { - return warnings, fmt.Errorf("range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU()) - } - - osv := system.GetOSVersion() - if resources.NanoCPUs > 0 && isHyperv && osv.Build < 16175 { - leftoverNanoCPUs := resources.NanoCPUs % 1e9 - if leftoverNanoCPUs != 0 && resources.NanoCPUs > 1e9 { - resources.NanoCPUs = ((resources.NanoCPUs + 1e9/2) / 1e9) * 1e9 - warningString := fmt.Sprintf("Your current OS version does not support Hyper-V containers with NanoCPUs greater than 1000000000 but not divisible by 1000000000. NanoCPUs rounded to %d", resources.NanoCPUs) - warnings = append(warnings, warningString) - logrus.Warn(warningString) - } - } - - if len(resources.BlkioDeviceReadBps) > 0 { - return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadBps") - } - if len(resources.BlkioDeviceReadIOps) > 0 { - return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceReadIOps") - } - if len(resources.BlkioDeviceWriteBps) > 0 { - return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteBps") - } - if len(resources.BlkioDeviceWriteIOps) > 0 { - return warnings, fmt.Errorf("invalid option: Windows does not support BlkioDeviceWriteIOps") - } - if resources.BlkioWeight > 0 { - return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeight") - } - if len(resources.BlkioWeightDevice) > 0 { - return warnings, fmt.Errorf("invalid option: Windows does not support BlkioWeightDevice") - } - if resources.CgroupParent != "" { - return warnings, fmt.Errorf("invalid option: Windows does not support CgroupParent") - } - if resources.CPUPeriod != 0 { - return warnings, fmt.Errorf("invalid option: Windows does not support CPUPeriod") - } - if resources.CpusetCpus != "" { - return warnings, fmt.Errorf("invalid option: Windows does not support CpusetCpus") - } - if resources.CpusetMems != "" { - return warnings, fmt.Errorf("invalid option: Windows does not support CpusetMems") - } - if resources.KernelMemory != 0 { - return warnings, fmt.Errorf("invalid option: Windows does not support KernelMemory") - } - if resources.MemoryReservation != 0 { - return warnings, fmt.Errorf("invalid option: Windows does not support MemoryReservation") - } - if resources.MemorySwap != 0 { - return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwap") - } - if resources.MemorySwappiness != nil { - return warnings, fmt.Errorf("invalid option: Windows does not support MemorySwappiness") - } - if resources.OomKillDisable != nil && *resources.OomKillDisable { - return warnings, fmt.Errorf("invalid option: Windows does not support OomKillDisable") - } - if resources.PidsLimit != 0 { - return warnings, fmt.Errorf("invalid option: Windows does not support PidsLimit") - } - if len(resources.Ulimits) != 0 { - return warnings, fmt.Errorf("invalid option: Windows does not support Ulimits") - } - return warnings, nil -} - -// verifyPlatformContainerSettings performs platform-specific validation of the -// hostconfig and config structures. -func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { - warnings := []string{} - - hyperv := daemon.runAsHyperVContainer(hostConfig) - if !hyperv && system.IsWindowsClient() && !system.IsIoTCore() { - // @engine maintainers. This block should not be removed. It partially enforces licensing - // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. - return warnings, fmt.Errorf("Windows client operating systems only support Hyper-V containers") - } - - w, err := verifyContainerResources(&hostConfig.Resources, hyperv) - warnings = append(warnings, w...) - return warnings, err -} - -// verifyDaemonSettings performs validation of daemon config struct -func verifyDaemonSettings(config *config.Config) error { - return nil -} - -// checkSystem validates platform-specific requirements -func checkSystem() error { - // Validate the OS version. Note that docker.exe must be manifested for this - // call to return the correct version. - osv := system.GetOSVersion() - if osv.MajorVersion < 10 { - return fmt.Errorf("This version of Windows does not support the docker daemon") - } - if osv.Build < 14393 { - return fmt.Errorf("The docker daemon requires build 14393 or later of Windows Server 2016 or Windows 10") - } - - vmcompute := windows.NewLazySystemDLL("vmcompute.dll") - if vmcompute.Load() != nil { - return fmt.Errorf("failed to load vmcompute.dll, ensure that the Containers feature is installed") - } - - // Ensure that the required Host Network Service and vmcompute services - // are running. Docker will fail in unexpected ways if this is not present. - var requiredServices = []string{"hns", "vmcompute"} - if err := ensureServicesInstalled(requiredServices); err != nil { - return errors.Wrap(err, "a required service is not installed, ensure the Containers feature is installed") - } - - return nil -} - -func ensureServicesInstalled(services []string) error { - m, err := mgr.Connect() - if err != nil { - return err - } - defer m.Disconnect() - for _, service := range services { - s, err := m.OpenService(service) - if err != nil { - return errors.Wrapf(err, "failed to open service %s", service) - } - s.Close() - } - return nil -} - -// configureKernelSecuritySupport configures and validate security support for the kernel -func configureKernelSecuritySupport(config *config.Config, driverName string) error { - return nil -} - -// configureMaxThreads sets the Go runtime max threads threshold -func configureMaxThreads(config *config.Config) error { - return nil -} - -func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { - netOptions, err := daemon.networkOptions(config, nil, nil) - if err != nil { - return nil, err - } - controller, err := libnetwork.New(netOptions...) - if err != nil { - return nil, fmt.Errorf("error obtaining controller instance: %v", err) - } - - hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") - if err != nil { - return nil, err - } - - // Remove networks not present in HNS - for _, v := range controller.Networks() { - options := v.Info().DriverOptions() - hnsid := options[winlibnetwork.HNSID] - found := false - - for _, v := range hnsresponse { - if v.Id == hnsid { - found = true - break - } - } - - if !found { - // global networks should not be deleted by local HNS - if v.Info().Scope() != datastore.GlobalScope { - err = v.Delete() - if err != nil { - logrus.Errorf("Error occurred when removing network %v", err) - } - } - } - } - - _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) - if err != nil { - return nil, err - } - - defaultNetworkExists := false - - if network, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { - options := network.Info().DriverOptions() - for _, v := range hnsresponse { - if options[winlibnetwork.HNSID] == v.Id { - defaultNetworkExists = true - break - } - } - } - - // discover and add HNS networks to windows - // network that exist are removed and added again - for _, v := range hnsresponse { - if strings.ToLower(v.Type) == "private" { - continue // workaround for HNS reporting unsupported networks - } - var n libnetwork.Network - s := func(current libnetwork.Network) bool { - options := current.Info().DriverOptions() - if options[winlibnetwork.HNSID] == v.Id { - n = current - return true - } - return false - } - - controller.WalkNetworks(s) - - drvOptions := make(map[string]string) - - if n != nil { - // global networks should not be deleted by local HNS - if n.Info().Scope() == datastore.GlobalScope { - continue - } - v.Name = n.Name() - // This will not cause network delete from HNS as the network - // is not yet populated in the libnetwork windows driver - - // restore option if it existed before - drvOptions = n.Info().DriverOptions() - n.Delete() - } - netOption := map[string]string{ - winlibnetwork.NetworkName: v.Name, - winlibnetwork.HNSID: v.Id, - } - - // add persisted driver options - for k, v := range drvOptions { - if k != winlibnetwork.NetworkName && k != winlibnetwork.HNSID { - netOption[k] = v - } - } - - v4Conf := []*libnetwork.IpamConf{} - for _, subnet := range v.Subnets { - ipamV4Conf := libnetwork.IpamConf{} - ipamV4Conf.PreferredPool = subnet.AddressPrefix - ipamV4Conf.Gateway = subnet.GatewayAddress - v4Conf = append(v4Conf, &ipamV4Conf) - } - - name := v.Name - - // If there is no nat network create one from the first NAT network - // encountered if it doesn't already exist - if !defaultNetworkExists && - runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) && - n == nil { - name = runconfig.DefaultDaemonNetworkMode().NetworkName() - defaultNetworkExists = true - } - - v6Conf := []*libnetwork.IpamConf{} - _, err := controller.NewNetwork(strings.ToLower(v.Type), name, "", - libnetwork.NetworkOptionGeneric(options.Generic{ - netlabel.GenericData: netOption, - }), - libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), - ) - - if err != nil { - logrus.Errorf("Error occurred when creating network %v", err) - } - } - - if !config.DisableBridge { - // Initialize default driver "bridge" - if err := initBridgeDriver(controller, config); err != nil { - return nil, err - } - } - - return controller, nil -} - -func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error { - if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { - return nil - } - - netOption := map[string]string{ - winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), - } - - var ipamOption libnetwork.NetworkOption - var subnetPrefix string - - if config.BridgeConfig.FixedCIDR != "" { - subnetPrefix = config.BridgeConfig.FixedCIDR - } else { - // TP5 doesn't support properly detecting subnet - osv := system.GetOSVersion() - if osv.Build < 14360 { - subnetPrefix = defaultNetworkSpace - } - } - - if subnetPrefix != "" { - ipamV4Conf := libnetwork.IpamConf{} - ipamV4Conf.PreferredPool = subnetPrefix - v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} - v6Conf := []*libnetwork.IpamConf{} - ipamOption = libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil) - } - - _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", - libnetwork.NetworkOptionGeneric(options.Generic{ - netlabel.GenericData: netOption, - }), - ipamOption, - ) - - if err != nil { - return fmt.Errorf("Error creating default network: %v", err) - } - - return nil -} - -// registerLinks sets up links between containers and writes the -// configuration out for persistence. As of Windows TP4, links are not supported. -func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { - return nil -} - -func (daemon *Daemon) cleanupMountsByID(in string) error { - return nil -} - -func (daemon *Daemon) cleanupMounts() error { - return nil -} - -func setupRemappedRoot(config *config.Config) (*idtools.IDMappings, error) { - return &idtools.IDMappings{}, nil -} - -func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPair) error { - config.Root = rootDir - // Create the root directory if it doesn't exists - if err := system.MkdirAllWithACL(config.Root, 0, system.SddlAdministratorsLocalSystem); err != nil { - return err - } - return nil -} - -// runasHyperVContainer returns true if we are going to run as a Hyper-V container -func (daemon *Daemon) runAsHyperVContainer(hostConfig *containertypes.HostConfig) bool { - if hostConfig.Isolation.IsDefault() { - // Container is set to use the default, so take the default from the daemon configuration - return daemon.defaultIsolation.IsHyperV() - } - - // Container is requesting an isolation mode. Honour it. - return hostConfig.Isolation.IsHyperV() - -} - -// conditionalMountOnStart is a platform specific helper function during the -// container start to call mount. -func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { - // Bail out now for Linux containers. We cannot mount the containers filesystem on the - // host as it is a non-Windows filesystem. - if system.LCOWSupported() && container.OS != "windows" { - return nil - } - - // We do not mount if a Hyper-V container as it needs to be mounted inside the - // utility VM, not the host. - if !daemon.runAsHyperVContainer(container.HostConfig) { - return daemon.Mount(container) - } - return nil -} - -// conditionalUnmountOnCleanup is a platform specific helper function called -// during the cleanup of a container to unmount. -func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { - // Bail out now for Linux containers - if system.LCOWSupported() && container.OS != "windows" { - return nil - } - - // We do not unmount if a Hyper-V container - if !daemon.runAsHyperVContainer(container.HostConfig) { - return daemon.Unmount(container) - } - return nil -} - -func driverOptions(config *config.Config) []nwconfig.Option { - return []nwconfig.Option{} -} - -func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { - if !c.IsRunning() { - return nil, errNotRunning(c.ID) - } - - // Obtain the stats from HCS via libcontainerd - stats, err := daemon.containerd.Stats(context.Background(), c.ID) - if err != nil { - if strings.Contains(err.Error(), "container not found") { - return nil, containerNotFound(c.ID) - } - return nil, err - } - - // Start with an empty structure - s := &types.StatsJSON{} - s.Stats.Read = stats.Read - s.Stats.NumProcs = platform.NumProcs() - - if stats.HCSStats != nil { - hcss := stats.HCSStats - // Populate the CPU/processor statistics - s.CPUStats = types.CPUStats{ - CPUUsage: types.CPUUsage{ - TotalUsage: hcss.Processor.TotalRuntime100ns, - UsageInKernelmode: hcss.Processor.RuntimeKernel100ns, - UsageInUsermode: hcss.Processor.RuntimeKernel100ns, - }, - } - - // Populate the memory statistics - s.MemoryStats = types.MemoryStats{ - Commit: hcss.Memory.UsageCommitBytes, - CommitPeak: hcss.Memory.UsageCommitPeakBytes, - PrivateWorkingSet: hcss.Memory.UsagePrivateWorkingSetBytes, - } - - // Populate the storage statistics - s.StorageStats = types.StorageStats{ - ReadCountNormalized: hcss.Storage.ReadCountNormalized, - ReadSizeBytes: hcss.Storage.ReadSizeBytes, - WriteCountNormalized: hcss.Storage.WriteCountNormalized, - WriteSizeBytes: hcss.Storage.WriteSizeBytes, - } - - // Populate the network statistics - s.Networks = make(map[string]types.NetworkStats) - for _, nstats := range hcss.Network { - s.Networks[nstats.EndpointId] = types.NetworkStats{ - RxBytes: nstats.BytesReceived, - RxPackets: nstats.PacketsReceived, - RxDropped: nstats.DroppedPacketsIncoming, - TxBytes: nstats.BytesSent, - TxPackets: nstats.PacketsSent, - TxDropped: nstats.DroppedPacketsOutgoing, - } - } - } - return s, nil -} - -// setDefaultIsolation determine the default isolation mode for the -// daemon to run in. This is only applicable on Windows -func (daemon *Daemon) setDefaultIsolation() error { - daemon.defaultIsolation = containertypes.Isolation("process") - // On client SKUs, default to Hyper-V. Note that IoT reports as a client SKU - // but it should not be treated as such. - if system.IsWindowsClient() && !system.IsIoTCore() { - daemon.defaultIsolation = containertypes.Isolation("hyperv") - } - for _, option := range daemon.configStore.ExecOptions { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return err - } - key = strings.ToLower(key) - switch key { - - case "isolation": - if !containertypes.Isolation(val).IsValid() { - return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) - } - if containertypes.Isolation(val).IsHyperV() { - daemon.defaultIsolation = containertypes.Isolation("hyperv") - } - if containertypes.Isolation(val).IsProcess() { - if system.IsWindowsClient() && !system.IsIoTCore() { - // @engine maintainers. This block should not be removed. It partially enforces licensing - // restrictions on Windows. Ping @jhowardmsft if there are concerns or PRs to change this. - return fmt.Errorf("Windows client operating systems only support Hyper-V containers") - } - daemon.defaultIsolation = containertypes.Isolation("process") - } - default: - return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) - } - } - - logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) - return nil -} - -func setupDaemonProcess(config *config.Config) error { - return nil -} - -func (daemon *Daemon) setupSeccompProfile() error { - return nil -} - -func getRealPath(path string) (string, error) { - if system.IsIoTCore() { - // Due to https://github.com/golang/go/issues/20506, path expansion - // does not work correctly on the default IoT Core configuration. - // TODO @darrenstahlmsft remove this once golang/go/20506 is fixed - return path, nil - } - return fileutils.ReadSymlinkedDirectory(path) -} - -func (daemon *Daemon) loadRuntimes() error { - return nil -} - -func (daemon *Daemon) initRuntimes(_ map[string]types.Runtime) error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_unix.go b/vendor/github.com/docker/docker/daemon/debugtrap_unix.go deleted file mode 100644 index c8abe69bb..000000000 --- a/vendor/github.com/docker/docker/daemon/debugtrap_unix.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !windows - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "os" - "os/signal" - - stackdump "github.com/docker/docker/pkg/signal" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -func (d *Daemon) setupDumpStackTrap(root string) { - c := make(chan os.Signal, 1) - signal.Notify(c, unix.SIGUSR1) - go func() { - for range c { - path, err := stackdump.DumpStacks(root) - if err != nil { - logrus.WithError(err).Error("failed to write goroutines dump") - } else { - logrus.Infof("goroutine stacks written to %s", path) - } - } - }() -} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go b/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go deleted file mode 100644 index e83d51f59..000000000 --- a/vendor/github.com/docker/docker/daemon/debugtrap_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux,!darwin,!freebsd,!windows - -package daemon // import "github.com/docker/docker/daemon" - -func (d *Daemon) setupDumpStackTrap(_ string) { - return -} diff --git a/vendor/github.com/docker/docker/daemon/debugtrap_windows.go b/vendor/github.com/docker/docker/daemon/debugtrap_windows.go deleted file mode 100644 index b438d0381..000000000 --- a/vendor/github.com/docker/docker/daemon/debugtrap_windows.go +++ /dev/null @@ -1,46 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "os" - "unsafe" - - winio "github.com/Microsoft/go-winio" - "github.com/docker/docker/pkg/signal" - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -func (d *Daemon) setupDumpStackTrap(root string) { - // Windows does not support signals like *nix systems. So instead of - // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be - // signaled. ACL'd to builtin administrators and local system - event := "Global\\docker-daemon-" + fmt.Sprint(os.Getpid()) - ev, _ := windows.UTF16PtrFromString(event) - sd, err := winio.SddlToSecurityDescriptor("D:P(A;;GA;;;BA)(A;;GA;;;SY)") - if err != nil { - logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", event, err.Error()) - return - } - var sa windows.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) - h, err := windows.CreateEvent(&sa, 0, 0, ev) - if h == 0 || err != nil { - logrus.Errorf("failed to create debug stackdump event %s: %s", event, err.Error()) - return - } - go func() { - logrus.Debugf("Stackdump - waiting signal at %s", event) - for { - windows.WaitForSingleObject(h, windows.INFINITE) - path, err := signal.DumpStacks(root) - if err != nil { - logrus.WithError(err).Error("failed to write goroutines dump") - } else { - logrus.Infof("goroutine stacks written to %s", path) - } - } - }() -} diff --git a/vendor/github.com/docker/docker/daemon/delete.go b/vendor/github.com/docker/docker/daemon/delete.go deleted file mode 100644 index 2ccbff05f..000000000 --- a/vendor/github.com/docker/docker/daemon/delete.go +++ /dev/null @@ -1,152 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "os" - "path" - "strings" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/container" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// ContainerRm removes the container id from the filesystem. An error -// is returned if the container is not found, or if the remove -// fails. If the remove succeeds, the container name is released, and -// network links are removed. -func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error { - start := time.Now() - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - // Container state RemovalInProgress should be used to avoid races. - if inProgress := container.SetRemovalInProgress(); inProgress { - err := fmt.Errorf("removal of container %s is already in progress", name) - return errdefs.Conflict(err) - } - defer container.ResetRemovalInProgress() - - // check if container wasn't deregistered by previous rm since Get - if c := daemon.containers.Get(container.ID); c == nil { - return nil - } - - if config.RemoveLink { - return daemon.rmLink(container, name) - } - - err = daemon.cleanupContainer(container, config.ForceRemove, config.RemoveVolume) - containerActions.WithValues("delete").UpdateSince(start) - - return err -} - -func (daemon *Daemon) rmLink(container *container.Container, name string) error { - if name[0] != '/' { - name = "/" + name - } - parent, n := path.Split(name) - if parent == "/" { - return fmt.Errorf("Conflict, cannot remove the default name of the container") - } - - parent = strings.TrimSuffix(parent, "/") - pe, err := daemon.containersReplica.Snapshot().GetID(parent) - if err != nil { - return fmt.Errorf("Cannot get parent %s for name %s", parent, name) - } - - daemon.releaseName(name) - parentContainer, _ := daemon.GetContainer(pe) - if parentContainer != nil { - daemon.linkIndex.unlink(name, container, parentContainer) - if err := daemon.updateNetwork(parentContainer); err != nil { - logrus.Debugf("Could not update network to remove link %s: %v", n, err) - } - } - return nil -} - -// cleanupContainer unregisters a container from the daemon, stops stats -// collection and cleanly removes contents and metadata from the filesystem. -func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove, removeVolume bool) (err error) { - if container.IsRunning() { - if !forceRemove { - state := container.StateString() - procedure := "Stop the container before attempting removal or force remove" - if state == "paused" { - procedure = "Unpause and then " + strings.ToLower(procedure) - } - err := fmt.Errorf("You cannot remove a %s container %s. %s", state, container.ID, procedure) - return errdefs.Conflict(err) - } - if err := daemon.Kill(container); err != nil { - return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err) - } - } - if !system.IsOSSupported(container.OS) { - return fmt.Errorf("cannot remove %s: %s ", container.ID, system.ErrNotSupportedOperatingSystem) - } - - // stop collection of stats for the container regardless - // if stats are currently getting collected. - daemon.statsCollector.StopCollection(container) - - if err = daemon.containerStop(container, 3); err != nil { - return err - } - - // Mark container dead. We don't want anybody to be restarting it. - container.Lock() - container.Dead = true - - // Save container state to disk. So that if error happens before - // container meta file got removed from disk, then a restart of - // docker should not make a dead container alive. - if err := container.CheckpointTo(daemon.containersReplica); err != nil && !os.IsNotExist(err) { - logrus.Errorf("Error saving dying container to disk: %v", err) - } - container.Unlock() - - // When container creation fails and `RWLayer` has not been created yet, we - // do not call `ReleaseRWLayer` - if container.RWLayer != nil { - err := daemon.imageService.ReleaseLayer(container.RWLayer, container.OS) - if err != nil { - err = errors.Wrapf(err, "container %s", container.ID) - container.SetRemovalError(err) - return err - } - container.RWLayer = nil - } - - if err := system.EnsureRemoveAll(container.Root); err != nil { - e := errors.Wrapf(err, "unable to remove filesystem for %s", container.ID) - container.SetRemovalError(e) - return e - } - - linkNames := daemon.linkIndex.delete(container) - selinuxFreeLxcContexts(container.ProcessLabel) - daemon.idIndex.Delete(container.ID) - daemon.containers.Delete(container.ID) - daemon.containersReplica.Delete(container) - if e := daemon.removeMountPoints(container, removeVolume); e != nil { - logrus.Error(e) - } - for _, name := range linkNames { - daemon.releaseName(name) - } - container.SetRemoved() - stateCtr.del(container.ID) - - daemon.LogContainerEvent(container, "destroy") - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/dependency.go b/vendor/github.com/docker/docker/daemon/dependency.go deleted file mode 100644 index 45275dbf4..000000000 --- a/vendor/github.com/docker/docker/daemon/dependency.go +++ /dev/null @@ -1,17 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/docker/swarmkit/agent/exec" -) - -// SetContainerDependencyStore sets the dependency store backend for the container -func (daemon *Daemon) SetContainerDependencyStore(name string, store exec.DependencyGetter) error { - c, err := daemon.GetContainer(name) - if err != nil { - return err - } - - c.DependencyStore = store - - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/discovery/discovery.go b/vendor/github.com/docker/docker/daemon/discovery/discovery.go deleted file mode 100644 index 092c57638..000000000 --- a/vendor/github.com/docker/docker/daemon/discovery/discovery.go +++ /dev/null @@ -1,202 +0,0 @@ -package discovery // import "github.com/docker/docker/daemon/discovery" - -import ( - "errors" - "fmt" - "strconv" - "time" - - "github.com/docker/docker/pkg/discovery" - "github.com/sirupsen/logrus" - - // Register the libkv backends for discovery. - _ "github.com/docker/docker/pkg/discovery/kv" -) - -const ( - // defaultDiscoveryHeartbeat is the default value for discovery heartbeat interval. - defaultDiscoveryHeartbeat = 20 * time.Second - // defaultDiscoveryTTLFactor is the default TTL factor for discovery - defaultDiscoveryTTLFactor = 3 -) - -// ErrDiscoveryDisabled is an error returned if the discovery is disabled -var ErrDiscoveryDisabled = errors.New("discovery is disabled") - -// Reloader is the discovery reloader of the daemon -type Reloader interface { - discovery.Watcher - Stop() - Reload(backend, address string, clusterOpts map[string]string) error - ReadyCh() <-chan struct{} -} - -type daemonDiscoveryReloader struct { - backend discovery.Backend - ticker *time.Ticker - term chan bool - readyCh chan struct{} -} - -func (d *daemonDiscoveryReloader) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - return d.backend.Watch(stopCh) -} - -func (d *daemonDiscoveryReloader) ReadyCh() <-chan struct{} { - return d.readyCh -} - -func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, error) { - var ( - heartbeat = defaultDiscoveryHeartbeat - ttl = defaultDiscoveryTTLFactor * defaultDiscoveryHeartbeat - ) - - if hb, ok := clusterOpts["discovery.heartbeat"]; ok { - h, err := strconv.Atoi(hb) - if err != nil { - return time.Duration(0), time.Duration(0), err - } - - if h <= 0 { - return time.Duration(0), time.Duration(0), - fmt.Errorf("discovery.heartbeat must be positive") - } - - heartbeat = time.Duration(h) * time.Second - ttl = defaultDiscoveryTTLFactor * heartbeat - } - - if tstr, ok := clusterOpts["discovery.ttl"]; ok { - t, err := strconv.Atoi(tstr) - if err != nil { - return time.Duration(0), time.Duration(0), err - } - - if t <= 0 { - return time.Duration(0), time.Duration(0), - fmt.Errorf("discovery.ttl must be positive") - } - - ttl = time.Duration(t) * time.Second - - if _, ok := clusterOpts["discovery.heartbeat"]; !ok { - heartbeat = time.Duration(t) * time.Second / time.Duration(defaultDiscoveryTTLFactor) - } - - if ttl <= heartbeat { - return time.Duration(0), time.Duration(0), - fmt.Errorf("discovery.ttl timer must be greater than discovery.heartbeat") - } - } - - return heartbeat, ttl, nil -} - -// Init initializes the nodes discovery subsystem by connecting to the specified backend -// and starts a registration loop to advertise the current node under the specified address. -func Init(backendAddress, advertiseAddress string, clusterOpts map[string]string) (Reloader, error) { - heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) - if err != nil { - return nil, err - } - - reloader := &daemonDiscoveryReloader{ - backend: backend, - ticker: time.NewTicker(heartbeat), - term: make(chan bool), - readyCh: make(chan struct{}), - } - // We call Register() on the discovery backend in a loop for the whole lifetime of the daemon, - // but we never actually Watch() for nodes appearing and disappearing for the moment. - go reloader.advertiseHeartbeat(advertiseAddress) - return reloader, nil -} - -// advertiseHeartbeat registers the current node against the discovery backend using the specified -// address. The function never returns, as registration against the backend comes with a TTL and -// requires regular heartbeats. -func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) { - var ready bool - if err := d.initHeartbeat(address); err == nil { - ready = true - close(d.readyCh) - } else { - logrus.WithError(err).Debug("First discovery heartbeat failed") - } - - for { - select { - case <-d.ticker.C: - if err := d.backend.Register(address); err != nil { - logrus.Warnf("Registering as %q in discovery failed: %v", address, err) - } else { - if !ready { - close(d.readyCh) - ready = true - } - } - case <-d.term: - return - } - } -} - -// initHeartbeat is used to do the first heartbeat. It uses a tight loop until -// either the timeout period is reached or the heartbeat is successful and returns. -func (d *daemonDiscoveryReloader) initHeartbeat(address string) error { - // Setup a short ticker until the first heartbeat has succeeded - t := time.NewTicker(500 * time.Millisecond) - defer t.Stop() - // timeout makes sure that after a period of time we stop being so aggressive trying to reach the discovery service - timeout := time.After(60 * time.Second) - - for { - select { - case <-timeout: - return errors.New("timeout waiting for initial discovery") - case <-d.term: - return errors.New("terminated") - case <-t.C: - if err := d.backend.Register(address); err == nil { - return nil - } - } - } -} - -// Reload makes the watcher to stop advertising and reconfigures it to advertise in a new address. -func (d *daemonDiscoveryReloader) Reload(backendAddress, advertiseAddress string, clusterOpts map[string]string) error { - d.Stop() - - heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) - if err != nil { - return err - } - - d.backend = backend - d.ticker = time.NewTicker(heartbeat) - d.readyCh = make(chan struct{}) - - go d.advertiseHeartbeat(advertiseAddress) - return nil -} - -// Stop terminates the discovery advertising. -func (d *daemonDiscoveryReloader) Stop() { - d.ticker.Stop() - d.term <- true -} - -func parseDiscoveryOptions(backendAddress string, clusterOpts map[string]string) (time.Duration, discovery.Backend, error) { - heartbeat, ttl, err := discoveryOpts(clusterOpts) - if err != nil { - return 0, nil, err - } - - backend, err := discovery.New(backendAddress, heartbeat, ttl, clusterOpts) - if err != nil { - return 0, nil, err - } - return heartbeat, backend, nil -} diff --git a/vendor/github.com/docker/docker/daemon/disk_usage.go b/vendor/github.com/docker/docker/daemon/disk_usage.go deleted file mode 100644 index 5bec60d17..000000000 --- a/vendor/github.com/docker/docker/daemon/disk_usage.go +++ /dev/null @@ -1,50 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - "sync/atomic" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -// SystemDiskUsage returns information about the daemon data disk usage -func (daemon *Daemon) SystemDiskUsage(ctx context.Context) (*types.DiskUsage, error) { - if !atomic.CompareAndSwapInt32(&daemon.diskUsageRunning, 0, 1) { - return nil, fmt.Errorf("a disk usage operation is already running") - } - defer atomic.StoreInt32(&daemon.diskUsageRunning, 0) - - // Retrieve container list - allContainers, err := daemon.Containers(&types.ContainerListOptions{ - Size: true, - All: true, - }) - if err != nil { - return nil, fmt.Errorf("failed to retrieve container list: %v", err) - } - - // Get all top images with extra attributes - allImages, err := daemon.imageService.Images(filters.NewArgs(), false, true) - if err != nil { - return nil, fmt.Errorf("failed to retrieve image list: %v", err) - } - - localVolumes, err := daemon.volumes.LocalVolumesSize(ctx) - if err != nil { - return nil, err - } - - allLayersSize, err := daemon.imageService.LayerDiskUsage(ctx) - if err != nil { - return nil, err - } - - return &types.DiskUsage{ - LayersSize: allLayersSize, - Containers: allContainers, - Volumes: localVolumes, - Images: allImages, - }, nil -} diff --git a/vendor/github.com/docker/docker/daemon/errors.go b/vendor/github.com/docker/docker/daemon/errors.go deleted file mode 100644 index 6d02af3d5..000000000 --- a/vendor/github.com/docker/docker/daemon/errors.go +++ /dev/null @@ -1,155 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "strings" - "syscall" - - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" - "google.golang.org/grpc" -) - -func errNotRunning(id string) error { - return errdefs.Conflict(errors.Errorf("Container %s is not running", id)) -} - -func containerNotFound(id string) error { - return objNotFoundError{"container", id} -} - -type objNotFoundError struct { - object string - id string -} - -func (e objNotFoundError) Error() string { - return "No such " + e.object + ": " + e.id -} - -func (e objNotFoundError) NotFound() {} - -func errContainerIsRestarting(containerID string) error { - cause := errors.Errorf("Container %s is restarting, wait until the container is running", containerID) - return errdefs.Conflict(cause) -} - -func errExecNotFound(id string) error { - return objNotFoundError{"exec instance", id} -} - -func errExecPaused(id string) error { - cause := errors.Errorf("Container %s is paused, unpause the container before exec", id) - return errdefs.Conflict(cause) -} - -func errNotPaused(id string) error { - cause := errors.Errorf("Container %s is already paused", id) - return errdefs.Conflict(cause) -} - -type nameConflictError struct { - id string - name string -} - -func (e nameConflictError) Error() string { - return fmt.Sprintf("Conflict. The container name %q is already in use by container %q. You have to remove (or rename) that container to be able to reuse that name.", e.name, e.id) -} - -func (nameConflictError) Conflict() {} - -type containerNotModifiedError struct { - running bool -} - -func (e containerNotModifiedError) Error() string { - if e.running { - return "Container is already started" - } - return "Container is already stopped" -} - -func (e containerNotModifiedError) NotModified() {} - -type invalidIdentifier string - -func (e invalidIdentifier) Error() string { - return fmt.Sprintf("invalid name or ID supplied: %q", string(e)) -} - -func (invalidIdentifier) InvalidParameter() {} - -type duplicateMountPointError string - -func (e duplicateMountPointError) Error() string { - return "Duplicate mount point: " + string(e) -} -func (duplicateMountPointError) InvalidParameter() {} - -type containerFileNotFound struct { - file string - container string -} - -func (e containerFileNotFound) Error() string { - return "Could not find the file " + e.file + " in container " + e.container -} - -func (containerFileNotFound) NotFound() {} - -type invalidFilter struct { - filter string - value interface{} -} - -func (e invalidFilter) Error() string { - msg := "Invalid filter '" + e.filter - if e.value != nil { - msg += fmt.Sprintf("=%s", e.value) - } - return msg + "'" -} - -func (e invalidFilter) InvalidParameter() {} - -type startInvalidConfigError string - -func (e startInvalidConfigError) Error() string { - return string(e) -} - -func (e startInvalidConfigError) InvalidParameter() {} // Is this right??? - -func translateContainerdStartErr(cmd string, setExitCode func(int), err error) error { - errDesc := grpc.ErrorDesc(err) - contains := func(s1, s2 string) bool { - return strings.Contains(strings.ToLower(s1), s2) - } - var retErr = errdefs.Unknown(errors.New(errDesc)) - // if we receive an internal error from the initial start of a container then lets - // return it instead of entering the restart loop - // set to 127 for container cmd not found/does not exist) - if contains(errDesc, cmd) && - (contains(errDesc, "executable file not found") || - contains(errDesc, "no such file or directory") || - contains(errDesc, "system cannot find the file specified")) { - setExitCode(127) - retErr = startInvalidConfigError(errDesc) - } - // set to 126 for container cmd can't be invoked errors - if contains(errDesc, syscall.EACCES.Error()) { - setExitCode(126) - retErr = startInvalidConfigError(errDesc) - } - - // attempted to mount a file onto a directory, or a directory onto a file, maybe from user specified bind mounts - if contains(errDesc, syscall.ENOTDIR.Error()) { - errDesc += ": Are you trying to mount a directory onto a file (or vice-versa)? Check if the specified host path exists and is the expected type" - setExitCode(127) - retErr = startInvalidConfigError(errDesc) - } - - // TODO: it would be nice to get some better errors from containerd so we can return better errors here - return retErr -} diff --git a/vendor/github.com/docker/docker/daemon/events.go b/vendor/github.com/docker/docker/daemon/events.go deleted file mode 100644 index cf1634a19..000000000 --- a/vendor/github.com/docker/docker/daemon/events.go +++ /dev/null @@ -1,308 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "strconv" - "strings" - "time" - - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/container" - daemonevents "github.com/docker/docker/daemon/events" - "github.com/docker/libnetwork" - swarmapi "github.com/docker/swarmkit/api" - gogotypes "github.com/gogo/protobuf/types" - "github.com/sirupsen/logrus" -) - -var ( - clusterEventAction = map[swarmapi.WatchActionKind]string{ - swarmapi.WatchActionKindCreate: "create", - swarmapi.WatchActionKindUpdate: "update", - swarmapi.WatchActionKindRemove: "remove", - } -) - -// LogContainerEvent generates an event related to a container with only the default attributes. -func (daemon *Daemon) LogContainerEvent(container *container.Container, action string) { - daemon.LogContainerEventWithAttributes(container, action, map[string]string{}) -} - -// LogContainerEventWithAttributes generates an event related to a container with specific given attributes. -func (daemon *Daemon) LogContainerEventWithAttributes(container *container.Container, action string, attributes map[string]string) { - copyAttributes(attributes, container.Config.Labels) - if container.Config.Image != "" { - attributes["image"] = container.Config.Image - } - attributes["name"] = strings.TrimLeft(container.Name, "/") - - actor := events.Actor{ - ID: container.ID, - Attributes: attributes, - } - daemon.EventsService.Log(action, events.ContainerEventType, actor) -} - -// LogPluginEvent generates an event related to a plugin with only the default attributes. -func (daemon *Daemon) LogPluginEvent(pluginID, refName, action string) { - daemon.LogPluginEventWithAttributes(pluginID, refName, action, map[string]string{}) -} - -// LogPluginEventWithAttributes generates an event related to a plugin with specific given attributes. -func (daemon *Daemon) LogPluginEventWithAttributes(pluginID, refName, action string, attributes map[string]string) { - attributes["name"] = refName - actor := events.Actor{ - ID: pluginID, - Attributes: attributes, - } - daemon.EventsService.Log(action, events.PluginEventType, actor) -} - -// LogVolumeEvent generates an event related to a volume. -func (daemon *Daemon) LogVolumeEvent(volumeID, action string, attributes map[string]string) { - actor := events.Actor{ - ID: volumeID, - Attributes: attributes, - } - daemon.EventsService.Log(action, events.VolumeEventType, actor) -} - -// LogNetworkEvent generates an event related to a network with only the default attributes. -func (daemon *Daemon) LogNetworkEvent(nw libnetwork.Network, action string) { - daemon.LogNetworkEventWithAttributes(nw, action, map[string]string{}) -} - -// LogNetworkEventWithAttributes generates an event related to a network with specific given attributes. -func (daemon *Daemon) LogNetworkEventWithAttributes(nw libnetwork.Network, action string, attributes map[string]string) { - attributes["name"] = nw.Name() - attributes["type"] = nw.Type() - actor := events.Actor{ - ID: nw.ID(), - Attributes: attributes, - } - daemon.EventsService.Log(action, events.NetworkEventType, actor) -} - -// LogDaemonEventWithAttributes generates an event related to the daemon itself with specific given attributes. -func (daemon *Daemon) LogDaemonEventWithAttributes(action string, attributes map[string]string) { - if daemon.EventsService != nil { - if info, err := daemon.SystemInfo(); err == nil && info.Name != "" { - attributes["name"] = info.Name - } - actor := events.Actor{ - ID: daemon.ID, - Attributes: attributes, - } - daemon.EventsService.Log(action, events.DaemonEventType, actor) - } -} - -// SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events. -func (daemon *Daemon) SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) { - ef := daemonevents.NewFilter(filter) - return daemon.EventsService.SubscribeTopic(since, until, ef) -} - -// UnsubscribeFromEvents stops the event subscription for a client by closing the -// channel where the daemon sends events to. -func (daemon *Daemon) UnsubscribeFromEvents(listener chan interface{}) { - daemon.EventsService.Evict(listener) -} - -// copyAttributes guarantees that labels are not mutated by event triggers. -func copyAttributes(attributes, labels map[string]string) { - if labels == nil { - return - } - for k, v := range labels { - attributes[k] = v - } -} - -// ProcessClusterNotifications gets changes from store and add them to event list -func (daemon *Daemon) ProcessClusterNotifications(ctx context.Context, watchStream chan *swarmapi.WatchMessage) { - for { - select { - case <-ctx.Done(): - return - case message, ok := <-watchStream: - if !ok { - logrus.Debug("cluster event channel has stopped") - return - } - daemon.generateClusterEvent(message) - } - } -} - -func (daemon *Daemon) generateClusterEvent(msg *swarmapi.WatchMessage) { - for _, event := range msg.Events { - if event.Object == nil { - logrus.Errorf("event without object: %v", event) - continue - } - switch v := event.Object.GetObject().(type) { - case *swarmapi.Object_Node: - daemon.logNodeEvent(event.Action, v.Node, event.OldObject.GetNode()) - case *swarmapi.Object_Service: - daemon.logServiceEvent(event.Action, v.Service, event.OldObject.GetService()) - case *swarmapi.Object_Network: - daemon.logNetworkEvent(event.Action, v.Network, event.OldObject.GetNetwork()) - case *swarmapi.Object_Secret: - daemon.logSecretEvent(event.Action, v.Secret, event.OldObject.GetSecret()) - case *swarmapi.Object_Config: - daemon.logConfigEvent(event.Action, v.Config, event.OldObject.GetConfig()) - default: - logrus.Warnf("unrecognized event: %v", event) - } - } -} - -func (daemon *Daemon) logNetworkEvent(action swarmapi.WatchActionKind, net *swarmapi.Network, oldNet *swarmapi.Network) { - attributes := map[string]string{ - "name": net.Spec.Annotations.Name, - } - eventTime := eventTimestamp(net.Meta, action) - daemon.logClusterEvent(action, net.ID, "network", attributes, eventTime) -} - -func (daemon *Daemon) logSecretEvent(action swarmapi.WatchActionKind, secret *swarmapi.Secret, oldSecret *swarmapi.Secret) { - attributes := map[string]string{ - "name": secret.Spec.Annotations.Name, - } - eventTime := eventTimestamp(secret.Meta, action) - daemon.logClusterEvent(action, secret.ID, "secret", attributes, eventTime) -} - -func (daemon *Daemon) logConfigEvent(action swarmapi.WatchActionKind, config *swarmapi.Config, oldConfig *swarmapi.Config) { - attributes := map[string]string{ - "name": config.Spec.Annotations.Name, - } - eventTime := eventTimestamp(config.Meta, action) - daemon.logClusterEvent(action, config.ID, "config", attributes, eventTime) -} - -func (daemon *Daemon) logNodeEvent(action swarmapi.WatchActionKind, node *swarmapi.Node, oldNode *swarmapi.Node) { - name := node.Spec.Annotations.Name - if name == "" && node.Description != nil { - name = node.Description.Hostname - } - attributes := map[string]string{ - "name": name, - } - eventTime := eventTimestamp(node.Meta, action) - // In an update event, display the changes in attributes - if action == swarmapi.WatchActionKindUpdate && oldNode != nil { - if node.Spec.Availability != oldNode.Spec.Availability { - attributes["availability.old"] = strings.ToLower(oldNode.Spec.Availability.String()) - attributes["availability.new"] = strings.ToLower(node.Spec.Availability.String()) - } - if node.Role != oldNode.Role { - attributes["role.old"] = strings.ToLower(oldNode.Role.String()) - attributes["role.new"] = strings.ToLower(node.Role.String()) - } - if node.Status.State != oldNode.Status.State { - attributes["state.old"] = strings.ToLower(oldNode.Status.State.String()) - attributes["state.new"] = strings.ToLower(node.Status.State.String()) - } - // This handles change within manager role - if node.ManagerStatus != nil && oldNode.ManagerStatus != nil { - // leader change - if node.ManagerStatus.Leader != oldNode.ManagerStatus.Leader { - if node.ManagerStatus.Leader { - attributes["leader.old"] = "false" - attributes["leader.new"] = "true" - } else { - attributes["leader.old"] = "true" - attributes["leader.new"] = "false" - } - } - if node.ManagerStatus.Reachability != oldNode.ManagerStatus.Reachability { - attributes["reachability.old"] = strings.ToLower(oldNode.ManagerStatus.Reachability.String()) - attributes["reachability.new"] = strings.ToLower(node.ManagerStatus.Reachability.String()) - } - } - } - - daemon.logClusterEvent(action, node.ID, "node", attributes, eventTime) -} - -func (daemon *Daemon) logServiceEvent(action swarmapi.WatchActionKind, service *swarmapi.Service, oldService *swarmapi.Service) { - attributes := map[string]string{ - "name": service.Spec.Annotations.Name, - } - eventTime := eventTimestamp(service.Meta, action) - - if action == swarmapi.WatchActionKindUpdate && oldService != nil { - // check image - if x, ok := service.Spec.Task.GetRuntime().(*swarmapi.TaskSpec_Container); ok { - containerSpec := x.Container - if y, ok := oldService.Spec.Task.GetRuntime().(*swarmapi.TaskSpec_Container); ok { - oldContainerSpec := y.Container - if containerSpec.Image != oldContainerSpec.Image { - attributes["image.old"] = oldContainerSpec.Image - attributes["image.new"] = containerSpec.Image - } - } else { - // This should not happen. - logrus.Errorf("service %s runtime changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.Task.GetRuntime(), service.Spec.Task.GetRuntime()) - } - } - // check replicated count change - if x, ok := service.Spec.GetMode().(*swarmapi.ServiceSpec_Replicated); ok { - replicas := x.Replicated.Replicas - if y, ok := oldService.Spec.GetMode().(*swarmapi.ServiceSpec_Replicated); ok { - oldReplicas := y.Replicated.Replicas - if replicas != oldReplicas { - attributes["replicas.old"] = strconv.FormatUint(oldReplicas, 10) - attributes["replicas.new"] = strconv.FormatUint(replicas, 10) - } - } else { - // This should not happen. - logrus.Errorf("service %s mode changed from %T to %T", service.Spec.Annotations.Name, oldService.Spec.GetMode(), service.Spec.GetMode()) - } - } - if service.UpdateStatus != nil { - if oldService.UpdateStatus == nil { - attributes["updatestate.new"] = strings.ToLower(service.UpdateStatus.State.String()) - } else if service.UpdateStatus.State != oldService.UpdateStatus.State { - attributes["updatestate.old"] = strings.ToLower(oldService.UpdateStatus.State.String()) - attributes["updatestate.new"] = strings.ToLower(service.UpdateStatus.State.String()) - } - } - } - daemon.logClusterEvent(action, service.ID, "service", attributes, eventTime) -} - -func (daemon *Daemon) logClusterEvent(action swarmapi.WatchActionKind, id, eventType string, attributes map[string]string, eventTime time.Time) { - actor := events.Actor{ - ID: id, - Attributes: attributes, - } - - jm := events.Message{ - Action: clusterEventAction[action], - Type: eventType, - Actor: actor, - Scope: "swarm", - Time: eventTime.UTC().Unix(), - TimeNano: eventTime.UTC().UnixNano(), - } - daemon.EventsService.PublishMessage(jm) -} - -func eventTimestamp(meta swarmapi.Meta, action swarmapi.WatchActionKind) time.Time { - var eventTime time.Time - switch action { - case swarmapi.WatchActionKindCreate: - eventTime, _ = gogotypes.TimestampFromProto(meta.CreatedAt) - case swarmapi.WatchActionKindUpdate: - eventTime, _ = gogotypes.TimestampFromProto(meta.UpdatedAt) - case swarmapi.WatchActionKindRemove: - // There is no timestamp from store message for remove operations. - // Use current time. - eventTime = time.Now() - } - return eventTime -} diff --git a/vendor/github.com/docker/docker/daemon/events/events.go b/vendor/github.com/docker/docker/daemon/events/events.go deleted file mode 100644 index 31af271fe..000000000 --- a/vendor/github.com/docker/docker/daemon/events/events.go +++ /dev/null @@ -1,165 +0,0 @@ -package events // import "github.com/docker/docker/daemon/events" - -import ( - "sync" - "time" - - eventtypes "github.com/docker/docker/api/types/events" - "github.com/docker/docker/pkg/pubsub" -) - -const ( - eventsLimit = 256 - bufferSize = 1024 -) - -// Events is pubsub channel for events generated by the engine. -type Events struct { - mu sync.Mutex - events []eventtypes.Message - pub *pubsub.Publisher -} - -// New returns new *Events instance -func New() *Events { - return &Events{ - events: make([]eventtypes.Message, 0, eventsLimit), - pub: pubsub.NewPublisher(100*time.Millisecond, bufferSize), - } -} - -// Subscribe adds new listener to events, returns slice of 256 stored -// last events, a channel in which you can expect new events (in form -// of interface{}, so you need type assertion), and a function to call -// to stop the stream of events. -func (e *Events) Subscribe() ([]eventtypes.Message, chan interface{}, func()) { - eventSubscribers.Inc() - e.mu.Lock() - current := make([]eventtypes.Message, len(e.events)) - copy(current, e.events) - l := e.pub.Subscribe() - e.mu.Unlock() - - cancel := func() { - e.Evict(l) - } - return current, l, cancel -} - -// SubscribeTopic adds new listener to events, returns slice of 256 stored -// last events, a channel in which you can expect new events (in form -// of interface{}, so you need type assertion). -func (e *Events) SubscribeTopic(since, until time.Time, ef *Filter) ([]eventtypes.Message, chan interface{}) { - eventSubscribers.Inc() - e.mu.Lock() - - var topic func(m interface{}) bool - if ef != nil && ef.filter.Len() > 0 { - topic = func(m interface{}) bool { return ef.Include(m.(eventtypes.Message)) } - } - - buffered := e.loadBufferedEvents(since, until, topic) - - var ch chan interface{} - if topic != nil { - ch = e.pub.SubscribeTopic(topic) - } else { - // Subscribe to all events if there are no filters - ch = e.pub.Subscribe() - } - - e.mu.Unlock() - return buffered, ch -} - -// Evict evicts listener from pubsub -func (e *Events) Evict(l chan interface{}) { - eventSubscribers.Dec() - e.pub.Evict(l) -} - -// Log creates a local scope message and publishes it -func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { - now := time.Now().UTC() - jm := eventtypes.Message{ - Action: action, - Type: eventType, - Actor: actor, - Scope: "local", - Time: now.Unix(), - TimeNano: now.UnixNano(), - } - - // fill deprecated fields for container and images - switch eventType { - case eventtypes.ContainerEventType: - jm.ID = actor.ID - jm.Status = action - jm.From = actor.Attributes["image"] - case eventtypes.ImageEventType: - jm.ID = actor.ID - jm.Status = action - } - - e.PublishMessage(jm) -} - -// PublishMessage broadcasts event to listeners. Each listener has 100 milliseconds to -// receive the event or it will be skipped. -func (e *Events) PublishMessage(jm eventtypes.Message) { - eventsCounter.Inc() - - e.mu.Lock() - if len(e.events) == cap(e.events) { - // discard oldest event - copy(e.events, e.events[1:]) - e.events[len(e.events)-1] = jm - } else { - e.events = append(e.events, jm) - } - e.mu.Unlock() - e.pub.Publish(jm) -} - -// SubscribersCount returns number of event listeners -func (e *Events) SubscribersCount() int { - return e.pub.Len() -} - -// loadBufferedEvents iterates over the cached events in the buffer -// and returns those that were emitted between two specific dates. -// It uses `time.Unix(seconds, nanoseconds)` to generate valid dates with those arguments. -// It filters those buffered messages with a topic function if it's not nil, otherwise it adds all messages. -func (e *Events) loadBufferedEvents(since, until time.Time, topic func(interface{}) bool) []eventtypes.Message { - var buffered []eventtypes.Message - if since.IsZero() && until.IsZero() { - return buffered - } - - var sinceNanoUnix int64 - if !since.IsZero() { - sinceNanoUnix = since.UnixNano() - } - - var untilNanoUnix int64 - if !until.IsZero() { - untilNanoUnix = until.UnixNano() - } - - for i := len(e.events) - 1; i >= 0; i-- { - ev := e.events[i] - - if ev.TimeNano < sinceNanoUnix { - break - } - - if untilNanoUnix > 0 && ev.TimeNano > untilNanoUnix { - continue - } - - if topic == nil || topic(ev) { - buffered = append([]eventtypes.Message{ev}, buffered...) - } - } - return buffered -} diff --git a/vendor/github.com/docker/docker/daemon/events/filter.go b/vendor/github.com/docker/docker/daemon/events/filter.go deleted file mode 100644 index da06f18b0..000000000 --- a/vendor/github.com/docker/docker/daemon/events/filter.go +++ /dev/null @@ -1,138 +0,0 @@ -package events // import "github.com/docker/docker/daemon/events" - -import ( - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" -) - -// Filter can filter out docker events from a stream -type Filter struct { - filter filters.Args -} - -// NewFilter creates a new Filter -func NewFilter(filter filters.Args) *Filter { - return &Filter{filter: filter} -} - -// Include returns true when the event ev is included by the filters -func (ef *Filter) Include(ev events.Message) bool { - return ef.matchEvent(ev) && - ef.filter.ExactMatch("type", ev.Type) && - ef.matchScope(ev.Scope) && - ef.matchDaemon(ev) && - ef.matchContainer(ev) && - ef.matchPlugin(ev) && - ef.matchVolume(ev) && - ef.matchNetwork(ev) && - ef.matchImage(ev) && - ef.matchNode(ev) && - ef.matchService(ev) && - ef.matchSecret(ev) && - ef.matchConfig(ev) && - ef.matchLabels(ev.Actor.Attributes) -} - -func (ef *Filter) matchEvent(ev events.Message) bool { - // #25798 if an event filter contains either health_status, exec_create or exec_start without a colon - // Let's to a FuzzyMatch instead of an ExactMatch. - if ef.filterContains("event", map[string]struct{}{"health_status": {}, "exec_create": {}, "exec_start": {}}) { - return ef.filter.FuzzyMatch("event", ev.Action) - } - return ef.filter.ExactMatch("event", ev.Action) -} - -func (ef *Filter) filterContains(field string, values map[string]struct{}) bool { - for _, v := range ef.filter.Get(field) { - if _, ok := values[v]; ok { - return true - } - } - return false -} - -func (ef *Filter) matchScope(scope string) bool { - if !ef.filter.Contains("scope") { - return true - } - return ef.filter.ExactMatch("scope", scope) -} - -func (ef *Filter) matchLabels(attributes map[string]string) bool { - if !ef.filter.Contains("label") { - return true - } - return ef.filter.MatchKVList("label", attributes) -} - -func (ef *Filter) matchDaemon(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.DaemonEventType) -} - -func (ef *Filter) matchContainer(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.ContainerEventType) -} - -func (ef *Filter) matchPlugin(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.PluginEventType) -} - -func (ef *Filter) matchVolume(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.VolumeEventType) -} - -func (ef *Filter) matchNetwork(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.NetworkEventType) -} - -func (ef *Filter) matchService(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.ServiceEventType) -} - -func (ef *Filter) matchNode(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.NodeEventType) -} - -func (ef *Filter) matchSecret(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.SecretEventType) -} - -func (ef *Filter) matchConfig(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.ConfigEventType) -} - -func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool { - return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) || - ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"]) -} - -// matchImage matches against both event.Actor.ID (for image events) -// and event.Actor.Attributes["image"] (for container events), so that any container that was created -// from an image will be included in the image events. Also compare both -// against the stripped repo name without any tags. -func (ef *Filter) matchImage(ev events.Message) bool { - id := ev.Actor.ID - nameAttr := "image" - var imageName string - - if ev.Type == events.ImageEventType { - nameAttr = "name" - } - - if n, ok := ev.Actor.Attributes[nameAttr]; ok { - imageName = n - } - return ef.filter.ExactMatch("image", id) || - ef.filter.ExactMatch("image", imageName) || - ef.filter.ExactMatch("image", stripTag(id)) || - ef.filter.ExactMatch("image", stripTag(imageName)) -} - -func stripTag(image string) string { - ref, err := reference.ParseNormalizedNamed(image) - if err != nil { - return image - } - return reference.FamiliarName(ref) -} diff --git a/vendor/github.com/docker/docker/daemon/events/metrics.go b/vendor/github.com/docker/docker/daemon/events/metrics.go deleted file mode 100644 index 199858d6e..000000000 --- a/vendor/github.com/docker/docker/daemon/events/metrics.go +++ /dev/null @@ -1,15 +0,0 @@ -package events // import "github.com/docker/docker/daemon/events" - -import "github.com/docker/go-metrics" - -var ( - eventsCounter metrics.Counter - eventSubscribers metrics.Gauge -) - -func init() { - ns := metrics.NewNamespace("engine", "daemon", nil) - eventsCounter = ns.NewCounter("events", "The number of events logged") - eventSubscribers = ns.NewGauge("events_subscribers", "The number of current subscribers to events", metrics.Total) - metrics.Register(ns) -} diff --git a/vendor/github.com/docker/docker/daemon/events/testutils/testutils.go b/vendor/github.com/docker/docker/daemon/events/testutils/testutils.go deleted file mode 100644 index b6766adb9..000000000 --- a/vendor/github.com/docker/docker/daemon/events/testutils/testutils.go +++ /dev/null @@ -1,76 +0,0 @@ -package testutils // import "github.com/docker/docker/daemon/events/testutils" - -import ( - "fmt" - "regexp" - "strings" - "time" - - "github.com/docker/docker/api/types/events" - timetypes "github.com/docker/docker/api/types/time" -) - -var ( - reTimestamp = `(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{9}(:?(:?(:?-|\+)\d{2}:\d{2})|Z))` - reEventType = `(?P\w+)` - reAction = `(?P\w+)` - reID = `(?P[^\s]+)` - reAttributes = `(\s\((?P[^\)]+)\))?` - reString = fmt.Sprintf(`\A%s\s%s\s%s\s%s%s\z`, reTimestamp, reEventType, reAction, reID, reAttributes) - - // eventCliRegexp is a regular expression that matches all possible event outputs in the cli - eventCliRegexp = regexp.MustCompile(reString) -) - -// ScanMap turns an event string like the default ones formatted in the cli output -// and turns it into map. -func ScanMap(text string) map[string]string { - matches := eventCliRegexp.FindAllStringSubmatch(text, -1) - md := map[string]string{} - if len(matches) == 0 { - return md - } - - names := eventCliRegexp.SubexpNames() - for i, n := range matches[0] { - md[names[i]] = n - } - return md -} - -// Scan turns an event string like the default ones formatted in the cli output -// and turns it into an event message. -func Scan(text string) (*events.Message, error) { - md := ScanMap(text) - if len(md) == 0 { - return nil, fmt.Errorf("text is not an event: %s", text) - } - - f, err := timetypes.GetTimestamp(md["timestamp"], time.Now()) - if err != nil { - return nil, err - } - - t, tn, err := timetypes.ParseTimestamps(f, -1) - if err != nil { - return nil, err - } - - attrs := make(map[string]string) - for _, a := range strings.SplitN(md["attributes"], ", ", -1) { - kv := strings.SplitN(a, "=", 2) - attrs[kv[0]] = kv[1] - } - - tu := time.Unix(t, tn) - return &events.Message{ - Time: t, - TimeNano: tu.UnixNano(), - Type: md["eventType"], - Action: md["action"], - Actor: events.Actor{ - ID: md["id"], - Attributes: attrs, - }, - }, nil -} diff --git a/vendor/github.com/docker/docker/daemon/exec.go b/vendor/github.com/docker/docker/daemon/exec.go deleted file mode 100644 index f0b43d725..000000000 --- a/vendor/github.com/docker/docker/daemon/exec.go +++ /dev/null @@ -1,324 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - "io" - "strings" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/docker/container" - "github.com/docker/docker/container/stream" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/term" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// Seconds to wait after sending TERM before trying KILL -const termProcessTimeout = 10 - -func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) { - // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. - container.ExecCommands.Add(config.ID, config) - // Storing execs in daemon for easy access via Engine API. - d.execCommands.Add(config.ID, config) -} - -// ExecExists looks up the exec instance and returns a bool if it exists or not. -// It will also return the error produced by `getConfig` -func (d *Daemon) ExecExists(name string) (bool, error) { - if _, err := d.getExecConfig(name); err != nil { - return false, err - } - return true, nil -} - -// getExecConfig looks up the exec instance by name. If the container associated -// with the exec instance is stopped or paused, it will return an error. -func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { - ec := d.execCommands.Get(name) - if ec == nil { - return nil, errExecNotFound(name) - } - - // If the exec is found but its container is not in the daemon's list of - // containers then it must have been deleted, in which case instead of - // saying the container isn't running, we should return a 404 so that - // the user sees the same error now that they will after the - // 5 minute clean-up loop is run which erases old/dead execs. - container := d.containers.Get(ec.ContainerID) - if container == nil { - return nil, containerNotFound(name) - } - if !container.IsRunning() { - return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String()) - } - if container.IsPaused() { - return nil, errExecPaused(container.ID) - } - if container.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) - } - return ec, nil -} - -func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) { - container.ExecCommands.Delete(execConfig.ID, execConfig.Pid) - d.execCommands.Delete(execConfig.ID, execConfig.Pid) -} - -func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { - container, err := d.GetContainer(name) - if err != nil { - return nil, err - } - - if !container.IsRunning() { - return nil, errNotRunning(container.ID) - } - if container.IsPaused() { - return nil, errExecPaused(name) - } - if container.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) - } - return container, nil -} - -// ContainerExecCreate sets up an exec in a running container. -func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) { - cntr, err := d.getActiveContainer(name) - if err != nil { - return "", err - } - - cmd := strslice.StrSlice(config.Cmd) - entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd) - - keys := []byte{} - if config.DetachKeys != "" { - keys, err = term.ToBytes(config.DetachKeys) - if err != nil { - err = fmt.Errorf("Invalid escape keys (%s) provided", config.DetachKeys) - return "", err - } - } - - execConfig := exec.NewConfig() - execConfig.OpenStdin = config.AttachStdin - execConfig.OpenStdout = config.AttachStdout - execConfig.OpenStderr = config.AttachStderr - execConfig.ContainerID = cntr.ID - execConfig.DetachKeys = keys - execConfig.Entrypoint = entrypoint - execConfig.Args = args - execConfig.Tty = config.Tty - execConfig.Privileged = config.Privileged - execConfig.User = config.User - execConfig.WorkingDir = config.WorkingDir - - linkedEnv, err := d.setupLinkedContainers(cntr) - if err != nil { - return "", err - } - execConfig.Env = container.ReplaceOrAppendEnvValues(cntr.CreateDaemonEnvironment(config.Tty, linkedEnv), config.Env) - if len(execConfig.User) == 0 { - execConfig.User = cntr.Config.User - } - if len(execConfig.WorkingDir) == 0 { - execConfig.WorkingDir = cntr.Config.WorkingDir - } - - d.registerExecCommand(cntr, execConfig) - - attributes := map[string]string{ - "execID": execConfig.ID, - } - d.LogContainerEventWithAttributes(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " "), attributes) - - return execConfig.ID, nil -} - -// ContainerExecStart starts a previously set up exec instance. The -// std streams are set up. -// If ctx is cancelled, the process is terminated. -func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.Reader, stdout io.Writer, stderr io.Writer) (err error) { - var ( - cStdin io.ReadCloser - cStdout, cStderr io.Writer - ) - - ec, err := d.getExecConfig(name) - if err != nil { - return errExecNotFound(name) - } - - ec.Lock() - if ec.ExitCode != nil { - ec.Unlock() - err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) - return errdefs.Conflict(err) - } - - if ec.Running { - ec.Unlock() - return errdefs.Conflict(fmt.Errorf("Error: Exec command %s is already running", ec.ID)) - } - ec.Running = true - ec.Unlock() - - c := d.containers.Get(ec.ContainerID) - logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) - attributes := map[string]string{ - "execID": ec.ID, - } - d.LogContainerEventWithAttributes(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " "), attributes) - - defer func() { - if err != nil { - ec.Lock() - ec.Running = false - exitCode := 126 - ec.ExitCode = &exitCode - if err := ec.CloseStreams(); err != nil { - logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err) - } - ec.Unlock() - c.ExecCommands.Delete(ec.ID, ec.Pid) - } - }() - - if ec.OpenStdin && stdin != nil { - r, w := io.Pipe() - go func() { - defer w.Close() - defer logrus.Debug("Closing buffered stdin pipe") - pools.Copy(w, stdin) - }() - cStdin = r - } - if ec.OpenStdout { - cStdout = stdout - } - if ec.OpenStderr { - cStderr = stderr - } - - if ec.OpenStdin { - ec.StreamConfig.NewInputPipes() - } else { - ec.StreamConfig.NewNopInputPipe() - } - - p := &specs.Process{ - Args: append([]string{ec.Entrypoint}, ec.Args...), - Env: ec.Env, - Terminal: ec.Tty, - Cwd: ec.WorkingDir, - } - if p.Cwd == "" { - p.Cwd = "/" - } - - if err := d.execSetPlatformOpt(c, ec, p); err != nil { - return err - } - - attachConfig := stream.AttachConfig{ - TTY: ec.Tty, - UseStdin: cStdin != nil, - UseStdout: cStdout != nil, - UseStderr: cStderr != nil, - Stdin: cStdin, - Stdout: cStdout, - Stderr: cStderr, - DetachKeys: ec.DetachKeys, - CloseStdin: true, - } - ec.StreamConfig.AttachStreams(&attachConfig) - attachErr := ec.StreamConfig.CopyStreams(ctx, &attachConfig) - - // Synchronize with libcontainerd event loop - ec.Lock() - c.ExecCommands.Lock() - systemPid, err := d.containerd.Exec(ctx, c.ID, ec.ID, p, cStdin != nil, ec.InitializeStdio) - // the exec context should be ready, or error happened. - // close the chan to notify readiness - close(ec.Started) - if err != nil { - c.ExecCommands.Unlock() - ec.Unlock() - return translateContainerdStartErr(ec.Entrypoint, ec.SetExitCode, err) - } - ec.Pid = systemPid - c.ExecCommands.Unlock() - ec.Unlock() - - select { - case <-ctx.Done(): - logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID) - d.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["TERM"])) - select { - case <-time.After(termProcessTimeout * time.Second): - logrus.Infof("Container %v, process %v failed to exit within %d seconds of signal TERM - using the force", c.ID, name, termProcessTimeout) - d.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["KILL"])) - case <-attachErr: - // TERM signal worked - } - return ctx.Err() - case err := <-attachErr: - if err != nil { - if _, ok := err.(term.EscapeError); !ok { - return errdefs.System(errors.Wrap(err, "exec attach failed")) - } - attributes := map[string]string{ - "execID": ec.ID, - } - d.LogContainerEventWithAttributes(c, "exec_detach", attributes) - } - } - return nil -} - -// execCommandGC runs a ticker to clean up the daemon references -// of exec configs that are no longer part of the container. -func (d *Daemon) execCommandGC() { - for range time.Tick(5 * time.Minute) { - var ( - cleaned int - liveExecCommands = d.containerExecIds() - ) - for id, config := range d.execCommands.Commands() { - if config.CanRemove { - cleaned++ - d.execCommands.Delete(id, config.Pid) - } else { - if _, exists := liveExecCommands[id]; !exists { - config.CanRemove = true - } - } - } - if cleaned > 0 { - logrus.Debugf("clean %d unused exec commands", cleaned) - } - } -} - -// containerExecIds returns a list of all the current exec ids that are in use -// and running inside a container. -func (d *Daemon) containerExecIds() map[string]struct{} { - ids := map[string]struct{}{} - for _, c := range d.containers.List() { - for _, id := range c.ExecCommands.List() { - ids[id] = struct{}{} - } - } - return ids -} diff --git a/vendor/github.com/docker/docker/daemon/exec/exec.go b/vendor/github.com/docker/docker/daemon/exec/exec.go deleted file mode 100644 index c036c46a0..000000000 --- a/vendor/github.com/docker/docker/daemon/exec/exec.go +++ /dev/null @@ -1,146 +0,0 @@ -package exec // import "github.com/docker/docker/daemon/exec" - -import ( - "runtime" - "sync" - - "github.com/containerd/containerd/cio" - "github.com/docker/docker/container/stream" - "github.com/docker/docker/pkg/stringid" - "github.com/sirupsen/logrus" -) - -// Config holds the configurations for execs. The Daemon keeps -// track of both running and finished execs so that they can be -// examined both during and after completion. -type Config struct { - sync.Mutex - Started chan struct{} - StreamConfig *stream.Config - ID string - Running bool - ExitCode *int - OpenStdin bool - OpenStderr bool - OpenStdout bool - CanRemove bool - ContainerID string - DetachKeys []byte - Entrypoint string - Args []string - Tty bool - Privileged bool - User string - WorkingDir string - Env []string - Pid int -} - -// NewConfig initializes the a new exec configuration -func NewConfig() *Config { - return &Config{ - ID: stringid.GenerateNonCryptoID(), - StreamConfig: stream.NewConfig(), - Started: make(chan struct{}), - } -} - -type rio struct { - cio.IO - - sc *stream.Config -} - -func (i *rio) Close() error { - i.IO.Close() - - return i.sc.CloseStreams() -} - -func (i *rio) Wait() { - i.sc.Wait() - - i.IO.Wait() -} - -// InitializeStdio is called by libcontainerd to connect the stdio. -func (c *Config) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) { - c.StreamConfig.CopyToPipe(iop) - - if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" { - if iop.Stdin != nil { - if err := iop.Stdin.Close(); err != nil { - logrus.Errorf("error closing exec stdin: %+v", err) - } - } - } - - return &rio{IO: iop, sc: c.StreamConfig}, nil -} - -// CloseStreams closes the stdio streams for the exec -func (c *Config) CloseStreams() error { - return c.StreamConfig.CloseStreams() -} - -// SetExitCode sets the exec config's exit code -func (c *Config) SetExitCode(code int) { - c.ExitCode = &code -} - -// Store keeps track of the exec configurations. -type Store struct { - byID map[string]*Config - sync.RWMutex -} - -// NewStore initializes a new exec store. -func NewStore() *Store { - return &Store{ - byID: make(map[string]*Config), - } -} - -// Commands returns the exec configurations in the store. -func (e *Store) Commands() map[string]*Config { - e.RLock() - byID := make(map[string]*Config, len(e.byID)) - for id, config := range e.byID { - byID[id] = config - } - e.RUnlock() - return byID -} - -// Add adds a new exec configuration to the store. -func (e *Store) Add(id string, Config *Config) { - e.Lock() - e.byID[id] = Config - e.Unlock() -} - -// Get returns an exec configuration by its id. -func (e *Store) Get(id string) *Config { - e.RLock() - res := e.byID[id] - e.RUnlock() - return res -} - -// Delete removes an exec configuration from the store. -func (e *Store) Delete(id string, pid int) { - e.Lock() - delete(e.byID, id) - e.Unlock() -} - -// List returns the list of exec ids in the store. -func (e *Store) List() []string { - var IDs []string - e.RLock() - for id := range e.byID { - IDs = append(IDs, id) - } - e.RUnlock() - return IDs -} diff --git a/vendor/github.com/docker/docker/daemon/exec_linux.go b/vendor/github.com/docker/docker/daemon/exec_linux.go deleted file mode 100644 index cd52f4886..000000000 --- a/vendor/github.com/docker/docker/daemon/exec_linux.go +++ /dev/null @@ -1,59 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/caps" - "github.com/docker/docker/daemon/exec" - "github.com/opencontainers/runc/libcontainer/apparmor" - "github.com/opencontainers/runtime-spec/specs-go" -) - -func (daemon *Daemon) execSetPlatformOpt(c *container.Container, ec *exec.Config, p *specs.Process) error { - if len(ec.User) > 0 { - uid, gid, additionalGids, err := getUser(c, ec.User) - if err != nil { - return err - } - p.User = specs.User{ - UID: uid, - GID: gid, - AdditionalGids: additionalGids, - } - } - if ec.Privileged { - if p.Capabilities == nil { - p.Capabilities = &specs.LinuxCapabilities{} - } - p.Capabilities.Bounding = caps.GetAllCapabilities() - p.Capabilities.Permitted = p.Capabilities.Bounding - p.Capabilities.Inheritable = p.Capabilities.Bounding - p.Capabilities.Effective = p.Capabilities.Bounding - } - if apparmor.IsEnabled() { - var appArmorProfile string - if c.AppArmorProfile != "" { - appArmorProfile = c.AppArmorProfile - } else if c.HostConfig.Privileged { - // `docker exec --privileged` does not currently disable AppArmor - // profiles. Privileged configuration of the container is inherited - appArmorProfile = "unconfined" - } else { - appArmorProfile = "docker-default" - } - - if appArmorProfile == "docker-default" { - // Unattended upgrades and other fun services can unload AppArmor - // profiles inadvertently. Since we cannot store our profile in - // /etc/apparmor.d, nor can we practically add other ways of - // telling the system to keep our profile loaded, in order to make - // sure that we keep the default profile enabled we dynamically - // reload it if necessary. - if err := ensureDefaultAppArmorProfile(); err != nil { - return err - } - } - p.ApparmorProfile = appArmorProfile - } - daemon.setRlimits(&specs.Spec{Process: p}, c) - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/exec_windows.go b/vendor/github.com/docker/docker/daemon/exec_windows.go deleted file mode 100644 index c37ea9f31..000000000 --- a/vendor/github.com/docker/docker/daemon/exec_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func (daemon *Daemon) execSetPlatformOpt(c *container.Container, ec *exec.Config, p *specs.Process) error { - // Process arguments need to be escaped before sending to OCI. - if c.OS == "windows" { - p.Args = escapeArgs(p.Args) - p.User.Username = ec.User - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/export.go b/vendor/github.com/docker/docker/daemon/export.go deleted file mode 100644 index 737e161ed..000000000 --- a/vendor/github.com/docker/docker/daemon/export.go +++ /dev/null @@ -1,86 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "io" - "runtime" - - "github.com/docker/docker/container" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/system" -) - -// ContainerExport writes the contents of the container to the given -// writer. An error is returned if the container cannot be found. -func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - if runtime.GOOS == "windows" && container.OS == "windows" { - return fmt.Errorf("the daemon on this operating system does not support exporting Windows containers") - } - - if container.IsDead() { - err := fmt.Errorf("You cannot export container %s which is Dead", container.ID) - return errdefs.Conflict(err) - } - - if container.IsRemovalInProgress() { - err := fmt.Errorf("You cannot export container %s which is being removed", container.ID) - return errdefs.Conflict(err) - } - - data, err := daemon.containerExport(container) - if err != nil { - return fmt.Errorf("Error exporting container %s: %v", name, err) - } - defer data.Close() - - // Stream the entire contents of the container (basically a volatile snapshot) - if _, err := io.Copy(out, data); err != nil { - return fmt.Errorf("Error exporting container %s: %v", name, err) - } - return nil -} - -func (daemon *Daemon) containerExport(container *container.Container) (arch io.ReadCloser, err error) { - if !system.IsOSSupported(container.OS) { - return nil, fmt.Errorf("cannot export %s: %s ", container.ID, system.ErrNotSupportedOperatingSystem) - } - rwlayer, err := daemon.imageService.GetLayerByID(container.ID, container.OS) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - daemon.imageService.ReleaseLayer(rwlayer, container.OS) - } - }() - - basefs, err := rwlayer.Mount(container.GetMountLabel()) - if err != nil { - return nil, err - } - - archive, err := archivePath(basefs, basefs.Path(), &archive.TarOptions{ - Compression: archive.Uncompressed, - UIDMaps: daemon.idMappings.UIDs(), - GIDMaps: daemon.idMappings.GIDs(), - }) - if err != nil { - rwlayer.Unmount() - return nil, err - } - arch = ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - rwlayer.Unmount() - daemon.imageService.ReleaseLayer(rwlayer, container.OS) - return err - }) - daemon.LogContainerEvent(container, "export") - return arch, err -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go deleted file mode 100644 index 915225277..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/aufs.go +++ /dev/null @@ -1,678 +0,0 @@ -// +build linux - -/* - -aufs driver directory structure - - . - ├── layers // Metadata of layers - │ ├── 1 - │ ├── 2 - │ └── 3 - ├── diff // Content of the layer - │ ├── 1 // Contains layers that need to be mounted for the id - │ ├── 2 - │ └── 3 - └── mnt // Mount points for the rw layers to be mounted - ├── 1 - ├── 2 - └── 3 - -*/ - -package aufs // import "github.com/docker/docker/daemon/graphdriver/aufs" - -import ( - "bufio" - "context" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/directory" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/locker" - mountpk "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" - "golang.org/x/sys/unix" -) - -var ( - // ErrAufsNotSupported is returned if aufs is not supported by the host. - ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") - // ErrAufsNested means aufs cannot be used bc we are in a user namespace - ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") - backingFs = "" - - enableDirpermLock sync.Once - enableDirperm bool - - logger = logrus.WithField("storage-driver", "aufs") -) - -func init() { - graphdriver.Register("aufs", Init) -} - -// Driver contains information about the filesystem mounted. -type Driver struct { - sync.Mutex - root string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter - pathCacheLock sync.Mutex - pathCache map[string]string - naiveDiff graphdriver.DiffDriver - locker *locker.Locker -} - -// Init returns a new AUFS driver. -// An error is returned if AUFS is not supported. -func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - // Try to load the aufs kernel module - if err := supportsAufs(); err != nil { - logger.Error(err) - return nil, graphdriver.ErrNotSupported - } - - // Perform feature detection on /var/lib/docker/aufs if it's an existing directory. - // This covers situations where /var/lib/docker/aufs is a mount, and on a different - // filesystem than /var/lib/docker. - // If the path does not exist, fall back to using /var/lib/docker for feature detection. - testdir := root - if _, err := os.Stat(testdir); os.IsNotExist(err) { - testdir = filepath.Dir(testdir) - } - - fsMagic, err := graphdriver.GetFSMagic(testdir) - if err != nil { - return nil, err - } - if fsName, ok := graphdriver.FsNames[fsMagic]; ok { - backingFs = fsName - } - - switch fsMagic { - case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: - logger.Errorf("AUFS is not supported over %s", backingFs) - return nil, graphdriver.ErrIncompatibleFS - } - - paths := []string{ - "mnt", - "diff", - "layers", - } - - a := &Driver{ - root: root, - uidMaps: uidMaps, - gidMaps: gidMaps, - pathCache: make(map[string]string), - ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), - locker: locker.New(), - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - // Create the root aufs driver dir - if err := idtools.MkdirAllAndChown(root, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { - return nil, err - } - - // Populate the dir structure - for _, p := range paths { - if err := idtools.MkdirAllAndChown(path.Join(root, p), 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { - return nil, err - } - } - - for _, path := range []string{"mnt", "diff"} { - p := filepath.Join(root, path) - entries, err := ioutil.ReadDir(p) - if err != nil { - logger.WithError(err).WithField("dir", p).Error("error reading dir entries") - continue - } - for _, entry := range entries { - if !entry.IsDir() { - continue - } - if strings.HasSuffix(entry.Name(), "-removing") { - logger.WithField("dir", entry.Name()).Debug("Cleaning up stale layer dir") - if err := system.EnsureRemoveAll(filepath.Join(p, entry.Name())); err != nil { - logger.WithField("dir", entry.Name()).WithError(err).Error("Error removing stale layer dir") - } - } - } - } - - a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps) - return a, nil -} - -// Return a nil error if the kernel supports aufs -// We cannot modprobe because inside dind modprobe fails -// to run -func supportsAufs() error { - // We can try to modprobe aufs first before looking at - // proc/filesystems for when aufs is supported - exec.Command("modprobe", "aufs").Run() - - if rsystem.RunningInUserNS() { - return ErrAufsNested - } - - f, err := os.Open("/proc/filesystems") - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if strings.Contains(s.Text(), "aufs") { - return nil - } - } - return ErrAufsNotSupported -} - -func (a *Driver) rootPath() string { - return a.root -} - -func (*Driver) String() string { - return "aufs" -} - -// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. -func (a *Driver) Status() [][2]string { - ids, _ := loadIds(path.Join(a.rootPath(), "layers")) - return [][2]string{ - {"Root Dir", a.rootPath()}, - {"Backing Filesystem", backingFs}, - {"Dirs", fmt.Sprintf("%d", len(ids))}, - {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, - } -} - -// GetMetadata not implemented -func (a *Driver) GetMetadata(id string) (map[string]string, error) { - return nil, nil -} - -// Exists returns true if the given id is registered with -// this driver -func (a *Driver) Exists(id string) bool { - if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { - return false - } - return true -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return a.Create(id, parent, opts) -} - -// Create three folders for each id -// mnt, layers, and diff -func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - - if opts != nil && len(opts.StorageOpt) != 0 { - return fmt.Errorf("--storage-opt is not supported for aufs") - } - - if err := a.createDirsFor(id); err != nil { - return err - } - // Write the layers metadata - f, err := os.Create(path.Join(a.rootPath(), "layers", id)) - if err != nil { - return err - } - defer f.Close() - - if parent != "" { - ids, err := getParentIDs(a.rootPath(), parent) - if err != nil { - return err - } - - if _, err := fmt.Fprintln(f, parent); err != nil { - return err - } - for _, i := range ids { - if _, err := fmt.Fprintln(f, i); err != nil { - return err - } - } - } - - return nil -} - -// createDirsFor creates two directories for the given id. -// mnt and diff -func (a *Driver) createDirsFor(id string) error { - paths := []string{ - "mnt", - "diff", - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) - if err != nil { - return err - } - // Directory permission is 0755. - // The path of directories are /mnt/ - // and /diff/ - for _, p := range paths { - if err := idtools.MkdirAllAndChown(path.Join(a.rootPath(), p, id), 0755, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { - return err - } - } - return nil -} - -// Remove will unmount and remove the given id. -func (a *Driver) Remove(id string) error { - a.locker.Lock(id) - defer a.locker.Unlock(id) - a.pathCacheLock.Lock() - mountpoint, exists := a.pathCache[id] - a.pathCacheLock.Unlock() - if !exists { - mountpoint = a.getMountpoint(id) - } - - logger := logger.WithField("layer", id) - - var retries int - for { - mounted, err := a.mounted(mountpoint) - if err != nil { - if os.IsNotExist(err) { - break - } - return err - } - if !mounted { - break - } - - err = a.unmount(mountpoint) - if err == nil { - break - } - - if err != unix.EBUSY { - return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint) - } - if retries >= 5 { - return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint) - } - // If unmount returns EBUSY, it could be a transient error. Sleep and retry. - retries++ - logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries) - time.Sleep(100 * time.Millisecond) - } - - // Remove the layers file for the id - if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { - return errors.Wrapf(err, "error removing layers dir for %s", id) - } - - if err := atomicRemove(a.getDiffPath(id)); err != nil { - return errors.Wrapf(err, "could not remove diff path for id %s", id) - } - - // Atomically remove each directory in turn by first moving it out of the - // way (so that docker doesn't find it anymore) before doing removal of - // the whole tree. - if err := atomicRemove(mountpoint); err != nil { - if errors.Cause(err) == unix.EBUSY { - logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY") - } - return errors.Wrapf(err, "could not remove mountpoint for id %s", id) - } - - a.pathCacheLock.Lock() - delete(a.pathCache, id) - a.pathCacheLock.Unlock() - return nil -} - -func atomicRemove(source string) error { - target := source + "-removing" - - err := os.Rename(source, target) - switch { - case err == nil, os.IsNotExist(err): - case os.IsExist(err): - // Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove - if _, e := os.Stat(source); !os.IsNotExist(e) { - return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up") - } - default: - return errors.Wrapf(err, "error preparing atomic delete") - } - - return system.EnsureRemoveAll(target) -} - -// Get returns the rootfs path for the id. -// This will mount the dir at its given path -func (a *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { - a.locker.Lock(id) - defer a.locker.Unlock(id) - parents, err := a.getParentLayerPaths(id) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - - a.pathCacheLock.Lock() - m, exists := a.pathCache[id] - a.pathCacheLock.Unlock() - - if !exists { - m = a.getDiffPath(id) - if len(parents) > 0 { - m = a.getMountpoint(id) - } - } - if count := a.ctr.Increment(m); count > 1 { - return containerfs.NewLocalContainerFS(m), nil - } - - // If a dir does not have a parent ( no layers )do not try to mount - // just return the diff path to the data - if len(parents) > 0 { - if err := a.mount(id, m, mountLabel, parents); err != nil { - return nil, err - } - } - - a.pathCacheLock.Lock() - a.pathCache[id] = m - a.pathCacheLock.Unlock() - return containerfs.NewLocalContainerFS(m), nil -} - -// Put unmounts and updates list of active mounts. -func (a *Driver) Put(id string) error { - a.locker.Lock(id) - defer a.locker.Unlock(id) - a.pathCacheLock.Lock() - m, exists := a.pathCache[id] - if !exists { - m = a.getMountpoint(id) - a.pathCache[id] = m - } - a.pathCacheLock.Unlock() - if count := a.ctr.Decrement(m); count > 0 { - return nil - } - - err := a.unmount(m) - if err != nil { - logger.Debugf("Failed to unmount %s aufs: %v", id, err) - } - return err -} - -// isParent returns if the passed in parent is the direct parent of the passed in layer -func (a *Driver) isParent(id, parent string) bool { - parents, _ := getParentIDs(a.rootPath(), id) - if parent == "" && len(parents) > 0 { - return false - } - return !(len(parents) > 0 && parent != parents[0]) -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) { - if !a.isParent(id, parent) { - return a.naiveDiff.Diff(id, parent) - } - - // AUFS doesn't need the parent layer to produce a diff. - return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ - Compression: archive.Uncompressed, - ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, - UIDMaps: a.uidMaps, - GIDMaps: a.gidMaps, - }) -} - -type fileGetNilCloser struct { - storage.FileGetter -} - -func (f fileGetNilCloser) Close() error { - return nil -} - -// DiffGetter returns a FileGetCloser that can read files from the directory that -// contains files for the layer differences. Used for direct access for tar-split. -func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { - p := path.Join(a.rootPath(), "diff", id) - return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil -} - -func (a *Driver) applyDiff(id string, diff io.Reader) error { - return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ - UIDMaps: a.uidMaps, - GIDMaps: a.gidMaps, - }) -} - -// DiffSize calculates the changes between the specified id -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (a *Driver) DiffSize(id, parent string) (size int64, err error) { - if !a.isParent(id, parent) { - return a.naiveDiff.DiffSize(id, parent) - } - // AUFS doesn't need the parent layer to calculate the diff size. - return directory.Size(context.TODO(), path.Join(a.rootPath(), "diff", id)) -} - -// ApplyDiff extracts the changeset from the given diff into the -// layer with the specified id and parent, returning the size of the -// new layer in bytes. -func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { - if !a.isParent(id, parent) { - return a.naiveDiff.ApplyDiff(id, parent, diff) - } - - // AUFS doesn't need the parent id to apply the diff if it is the direct parent. - if err = a.applyDiff(id, diff); err != nil { - return - } - - return a.DiffSize(id, parent) -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { - if !a.isParent(id, parent) { - return a.naiveDiff.Changes(id, parent) - } - - // AUFS doesn't have snapshots, so we need to get changes from all parent - // layers. - layers, err := a.getParentLayerPaths(id) - if err != nil { - return nil, err - } - return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) -} - -func (a *Driver) getParentLayerPaths(id string) ([]string, error) { - parentIds, err := getParentIDs(a.rootPath(), id) - if err != nil { - return nil, err - } - layers := make([]string, len(parentIds)) - - // Get the diff paths for all the parent ids - for i, p := range parentIds { - layers[i] = path.Join(a.rootPath(), "diff", p) - } - return layers, nil -} - -func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error { - a.Lock() - defer a.Unlock() - - // If the id is mounted or we get an error return - if mounted, err := a.mounted(target); err != nil || mounted { - return err - } - - rw := a.getDiffPath(id) - - if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { - return fmt.Errorf("error creating aufs mount to %s: %v", target, err) - } - return nil -} - -func (a *Driver) unmount(mountPath string) error { - a.Lock() - defer a.Unlock() - - if mounted, err := a.mounted(mountPath); err != nil || !mounted { - return err - } - return Unmount(mountPath) -} - -func (a *Driver) mounted(mountpoint string) (bool, error) { - return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) -} - -// Cleanup aufs and unmount all mountpoints -func (a *Driver) Cleanup() error { - var dirs []string - if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - return nil - } - dirs = append(dirs, path) - return nil - }); err != nil { - return err - } - - for _, m := range dirs { - if err := a.unmount(m); err != nil { - logger.Debugf("error unmounting %s: %s", m, err) - } - } - return mountpk.RecursiveUnmount(a.root) -} - -func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { - defer func() { - if err != nil { - Unmount(target) - } - }() - - // Mount options are clipped to page size(4096 bytes). If there are more - // layers then these are remounted individually using append. - - offset := 54 - if useDirperm() { - offset += len(",dirperm1") - } - b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel - bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) - - index := 0 - for ; index < len(ro); index++ { - layer := fmt.Sprintf(":%s=ro+wh", ro[index]) - if bp+len(layer) > len(b) { - break - } - bp += copy(b[bp:], layer) - } - - opts := "dio,xino=/dev/shm/aufs.xino" - if useDirperm() { - opts += ",dirperm1" - } - data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) - if err = mount("none", target, "aufs", 0, data); err != nil { - return - } - - for ; index < len(ro); index++ { - layer := fmt.Sprintf(":%s=ro+wh", ro[index]) - data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) - if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil { - return - } - } - - return -} - -// useDirperm checks dirperm1 mount option can be used with the current -// version of aufs. -func useDirperm() bool { - enableDirpermLock.Do(func() { - base, err := ioutil.TempDir("", "docker-aufs-base") - if err != nil { - logger.Errorf("error checking dirperm1: %v", err) - return - } - defer os.RemoveAll(base) - - union, err := ioutil.TempDir("", "docker-aufs-union") - if err != nil { - logger.Errorf("error checking dirperm1: %v", err) - return - } - defer os.RemoveAll(union) - - opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) - if err := mount("none", union, "aufs", 0, opts); err != nil { - return - } - enableDirperm = true - if err := Unmount(union); err != nil { - logger.Errorf("error checking dirperm1: failed to unmount %v", err) - } - }) - return enableDirperm -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go deleted file mode 100644 index e60be5e3c..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/dirs.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build linux - -package aufs // import "github.com/docker/docker/daemon/graphdriver/aufs" - -import ( - "bufio" - "io/ioutil" - "os" - "path" -) - -// Return all the directories -func loadIds(root string) ([]string, error) { - dirs, err := ioutil.ReadDir(root) - if err != nil { - return nil, err - } - var out []string - for _, d := range dirs { - if !d.IsDir() { - out = append(out, d.Name()) - } - } - return out, nil -} - -// Read the layers file for the current id and return all the -// layers represented by new lines in the file -// -// If there are no lines in the file then the id has no parent -// and an empty slice is returned. -func getParentIDs(root, id string) ([]string, error) { - f, err := os.Open(path.Join(root, "layers", id)) - if err != nil { - return nil, err - } - defer f.Close() - - var out []string - s := bufio.NewScanner(f) - - for s.Scan() { - if t := s.Text(); t != "" { - out = append(out, s.Text()) - } - } - return out, s.Err() -} - -func (a *Driver) getMountpoint(id string) string { - return path.Join(a.mntPath(), id) -} - -func (a *Driver) mntPath() string { - return path.Join(a.rootPath(), "mnt") -} - -func (a *Driver) getDiffPath(id string) string { - return path.Join(a.diffPath(), id) -} - -func (a *Driver) diffPath() string { - return path.Join(a.rootPath(), "diff") -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go deleted file mode 100644 index 9f5510380..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build linux - -package aufs // import "github.com/docker/docker/daemon/graphdriver/aufs" - -import ( - "os/exec" - - "golang.org/x/sys/unix" -) - -// Unmount the target specified. -func Unmount(target string) error { - if err := exec.Command("auplink", target, "flush").Run(); err != nil { - logger.WithError(err).Warnf("Couldn't run auplink before unmount %s", target) - } - return unix.Unmount(target, 0) -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go deleted file mode 100644 index 8d5ad8f32..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_linux.go +++ /dev/null @@ -1,7 +0,0 @@ -package aufs // import "github.com/docker/docker/daemon/graphdriver/aufs" - -import "golang.org/x/sys/unix" - -func mount(source string, target string, fstype string, flags uintptr, data string) error { - return unix.Mount(source, target, fstype, flags, data) -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go deleted file mode 100644 index cf7f58c29..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/aufs/mount_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux - -package aufs // import "github.com/docker/docker/daemon/graphdriver/aufs" - -import "errors" - -// MsRemount declared to specify a non-linux system mount. -const MsRemount = 0 - -func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { - return errors.New("mount is not implemented on this platform") -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go deleted file mode 100644 index cac624030..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go +++ /dev/null @@ -1,663 +0,0 @@ -// +build linux - -package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs" - -/* -#include -#include -#include -#include - -static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { - snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); -} -*/ -import "C" - -import ( - "fmt" - "io/ioutil" - "math" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "unsafe" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/system" - "github.com/docker/go-units" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -func init() { - graphdriver.Register("btrfs", Init) -} - -type btrfsOptions struct { - minSpace uint64 - size uint64 -} - -// Init returns a new BTRFS driver. -// An error is returned if BTRFS is not supported. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - - // Perform feature detection on /var/lib/docker/btrfs if it's an existing directory. - // This covers situations where /var/lib/docker/btrfs is a mount, and on a different - // filesystem than /var/lib/docker. - // If the path does not exist, fall back to using /var/lib/docker for feature detection. - testdir := home - if _, err := os.Stat(testdir); os.IsNotExist(err) { - testdir = filepath.Dir(testdir) - } - - fsMagic, err := graphdriver.GetFSMagic(testdir) - if err != nil { - return nil, err - } - - if fsMagic != graphdriver.FsMagicBtrfs { - return nil, graphdriver.ErrPrerequisites - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - if err := idtools.MkdirAllAndChown(home, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { - return nil, err - } - - opt, userDiskQuota, err := parseOptions(options) - if err != nil { - return nil, err - } - - driver := &Driver{ - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - options: opt, - } - - if userDiskQuota { - if err := driver.subvolEnableQuota(); err != nil { - return nil, err - } - } - - return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil -} - -func parseOptions(opt []string) (btrfsOptions, bool, error) { - var options btrfsOptions - userDiskQuota := false - for _, option := range opt { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return options, userDiskQuota, err - } - key = strings.ToLower(key) - switch key { - case "btrfs.min_space": - minSpace, err := units.RAMInBytes(val) - if err != nil { - return options, userDiskQuota, err - } - userDiskQuota = true - options.minSpace = uint64(minSpace) - default: - return options, userDiskQuota, fmt.Errorf("Unknown option %s", key) - } - } - return options, userDiskQuota, nil -} - -// Driver contains information about the filesystem mounted. -type Driver struct { - //root of the file system - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - options btrfsOptions - quotaEnabled bool - once sync.Once -} - -// String prints the name of the driver (btrfs). -func (d *Driver) String() string { - return "btrfs" -} - -// Status returns current driver information in a two dimensional string array. -// Output contains "Build Version" and "Library Version" of the btrfs libraries used. -// Version information can be used to check compatibility with your kernel. -func (d *Driver) Status() [][2]string { - status := [][2]string{} - if bv := btrfsBuildVersion(); bv != "-" { - status = append(status, [2]string{"Build Version", bv}) - } - if lv := btrfsLibVersion(); lv != -1 { - status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) - } - return status -} - -// GetMetadata returns empty metadata for this driver. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - return nil, nil -} - -// Cleanup unmounts the home directory. -func (d *Driver) Cleanup() error { - return d.subvolDisableQuota() -} - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func openDir(path string) (*C.DIR, error) { - Cpath := C.CString(path) - defer free(Cpath) - - dir := C.opendir(Cpath) - if dir == nil { - return nil, fmt.Errorf("Can't open dir") - } - return dir, nil -} - -func closeDir(dir *C.DIR) { - if dir != nil { - C.closedir(dir) - } -} - -func getDirFd(dir *C.DIR) uintptr { - return uintptr(C.dirfd(dir)) -} - -func subvolCreate(path, name string) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_vol_args - for i, c := range []byte(name) { - args.name[i] = C.char(c) - } - - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) - } - return nil -} - -func subvolSnapshot(src, dest, name string) error { - srcDir, err := openDir(src) - if err != nil { - return err - } - defer closeDir(srcDir) - - destDir, err := openDir(dest) - if err != nil { - return err - } - defer closeDir(destDir) - - var args C.struct_btrfs_ioctl_vol_args_v2 - args.fd = C.__s64(getDirFd(srcDir)) - - var cs = C.CString(name) - C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) - C.free(unsafe.Pointer(cs)) - - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) - } - return nil -} - -func isSubvolume(p string) (bool, error) { - var bufStat unix.Stat_t - if err := unix.Lstat(p, &bufStat); err != nil { - return false, err - } - - // return true if it is a btrfs subvolume - return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil -} - -func subvolDelete(dirpath, name string, quotaEnabled bool) error { - dir, err := openDir(dirpath) - if err != nil { - return err - } - defer closeDir(dir) - fullPath := path.Join(dirpath, name) - - var args C.struct_btrfs_ioctl_vol_args - - // walk the btrfs subvolumes - walkSubvolumes := func(p string, f os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) && p != fullPath { - // missing most likely because the path was a subvolume that got removed in the previous iteration - // since it's gone anyway, we don't care - return nil - } - return fmt.Errorf("error walking subvolumes: %v", err) - } - // we want to check children only so skip itself - // it will be removed after the filepath walk anyways - if f.IsDir() && p != fullPath { - sv, err := isSubvolume(p) - if err != nil { - return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) - } - if sv { - if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil { - return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) - } - } - } - return nil - } - if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { - return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) - } - - if quotaEnabled { - if qgroupid, err := subvolLookupQgroup(fullPath); err == nil { - var args C.struct_btrfs_ioctl_qgroup_create_args - args.qgroupid = C.__u64(qgroupid) - - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - logrus.WithField("storage-driver", "btrfs").Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error()) - } - } else { - logrus.WithField("storage-driver", "btrfs").Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error()) - } - } - - // all subvolumes have been removed - // now remove the one originally passed in - for i, c := range []byte(name) { - args.name[i] = C.char(c) - } - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) - } - return nil -} - -func (d *Driver) updateQuotaStatus() { - d.once.Do(func() { - if !d.quotaEnabled { - // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed - if err := subvolQgroupStatus(d.home); err != nil { - // quota is still not enabled - return - } - d.quotaEnabled = true - } - }) -} - -func (d *Driver) subvolEnableQuota() error { - d.updateQuotaStatus() - - if d.quotaEnabled { - return nil - } - - dir, err := openDir(d.home) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_quota_ctl_args - args.cmd = C.BTRFS_QUOTA_CTL_ENABLE - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) - } - - d.quotaEnabled = true - - return nil -} - -func (d *Driver) subvolDisableQuota() error { - d.updateQuotaStatus() - - if !d.quotaEnabled { - return nil - } - - dir, err := openDir(d.home) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_quota_ctl_args - args.cmd = C.BTRFS_QUOTA_CTL_DISABLE - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) - } - - d.quotaEnabled = false - - return nil -} - -func (d *Driver) subvolRescanQuota() error { - d.updateQuotaStatus() - - if !d.quotaEnabled { - return nil - } - - dir, err := openDir(d.home) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_quota_rescan_args - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) - } - - return nil -} - -func subvolLimitQgroup(path string, size uint64) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_qgroup_limit_args - args.lim.max_referenced = C.__u64(size) - args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) - } - - return nil -} - -// subvolQgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path -// with search key of BTRFS_QGROUP_STATUS_KEY. -// In case qgroup is enabled, the retuned key type will match BTRFS_QGROUP_STATUS_KEY. -// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035 -func subvolQgroupStatus(path string) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_search_args - args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID - args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY - args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY - args.key.max_objectid = C.__u64(math.MaxUint64) - args.key.max_offset = C.__u64(math.MaxUint64) - args.key.max_transid = C.__u64(math.MaxUint64) - args.key.nr_items = 4096 - - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error()) - } - sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf)) - if sh._type != C.BTRFS_QGROUP_STATUS_KEY { - return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type) - } - return nil -} - -func subvolLookupQgroup(path string) (uint64, error) { - dir, err := openDir(path) - if err != nil { - return 0, err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_ino_lookup_args - args.objectid = C.BTRFS_FIRST_FREE_OBJECTID - - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error()) - } - if args.treeid == 0 { - return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir) - } - - return uint64(args.treeid), nil -} - -func (d *Driver) subvolumesDir() string { - return path.Join(d.home, "subvolumes") -} - -func (d *Driver) subvolumesDirID(id string) string { - return path.Join(d.subvolumesDir(), id) -} - -func (d *Driver) quotasDir() string { - return path.Join(d.home, "quotas") -} - -func (d *Driver) quotasDirID(id string) string { - return path.Join(d.quotasDir(), id) -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) -} - -// Create the filesystem with given id. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - quotas := path.Join(d.home, "quotas") - subvolumes := path.Join(d.home, "subvolumes") - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAllAndChown(subvolumes, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { - return err - } - if parent == "" { - if err := subvolCreate(subvolumes, id); err != nil { - return err - } - } else { - parentDir := d.subvolumesDirID(parent) - st, err := os.Stat(parentDir) - if err != nil { - return err - } - if !st.IsDir() { - return fmt.Errorf("%s: not a directory", parentDir) - } - if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { - return err - } - } - - var storageOpt map[string]string - if opts != nil { - storageOpt = opts.StorageOpt - } - - if _, ok := storageOpt["size"]; ok { - driver := &Driver{} - if err := d.parseStorageOpt(storageOpt, driver); err != nil { - return err - } - - if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { - return err - } - if err := idtools.MkdirAllAndChown(quotas, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { - return err - } - if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { - return err - } - } - - // if we have a remapped root (user namespaces enabled), change the created snapshot - // dir ownership to match - if rootUID != 0 || rootGID != 0 { - if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { - return err - } - } - - mountLabel := "" - if opts != nil { - mountLabel = opts.MountLabel - } - - return label.Relabel(path.Join(subvolumes, id), mountLabel, false) -} - -// Parse btrfs storage options -func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { - // Read size to change the subvolume disk quota per container - for key, val := range storageOpt { - key := strings.ToLower(key) - switch key { - case "size": - size, err := units.RAMInBytes(val) - if err != nil { - return err - } - driver.options.size = uint64(size) - default: - return fmt.Errorf("Unknown option %s", key) - } - } - - return nil -} - -// Set btrfs storage size -func (d *Driver) setStorageSize(dir string, driver *Driver) error { - if driver.options.size <= 0 { - return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) - } - if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { - return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) - } - if err := d.subvolEnableQuota(); err != nil { - return err - } - return subvolLimitQgroup(dir, driver.options.size) -} - -// Remove the filesystem with given id. -func (d *Driver) Remove(id string) error { - dir := d.subvolumesDirID(id) - if _, err := os.Stat(dir); err != nil { - return err - } - quotasDir := d.quotasDirID(id) - if _, err := os.Stat(quotasDir); err == nil { - if err := os.Remove(quotasDir); err != nil { - return err - } - } else if !os.IsNotExist(err) { - return err - } - - // Call updateQuotaStatus() to invoke status update - d.updateQuotaStatus() - - if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil { - return err - } - if err := system.EnsureRemoveAll(dir); err != nil { - return err - } - return d.subvolRescanQuota() -} - -// Get the requested filesystem id. -func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { - dir := d.subvolumesDirID(id) - st, err := os.Stat(dir) - if err != nil { - return nil, err - } - - if !st.IsDir() { - return nil, fmt.Errorf("%s: not a directory", dir) - } - - if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil { - if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace { - if err := d.subvolEnableQuota(); err != nil { - return nil, err - } - if err := subvolLimitQgroup(dir, size); err != nil { - return nil, err - } - } - } - - return containerfs.NewLocalContainerFS(dir), nil -} - -// Put is not implemented for BTRFS as there is no cleanup required for the id. -func (d *Driver) Put(id string) error { - // Get() creates no runtime resources (like e.g. mounts) - // so this doesn't need to do anything. - return nil -} - -// Exists checks if the id exists in the filesystem. -func (d *Driver) Exists(id string) bool { - dir := d.subvolumesDirID(id) - _, err := os.Stat(dir) - return err == nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go deleted file mode 100644 index d7793f879..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/dummy_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux !cgo - -package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs" diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go deleted file mode 100644 index 2fb5c7355..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build linux,!btrfs_noversion - -package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs" - -/* -#include - -// around version 3.16, they did not define lib version yet -#ifndef BTRFS_LIB_VERSION -#define BTRFS_LIB_VERSION -1 -#endif - -// upstream had removed it, but now it will be coming back -#ifndef BTRFS_BUILD_VERSION -#define BTRFS_BUILD_VERSION "-" -#endif -*/ -import "C" - -func btrfsBuildVersion() string { - return string(C.BTRFS_BUILD_VERSION) -} - -func btrfsLibVersion() int { - return int(C.BTRFS_LIB_VERSION) -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go deleted file mode 100644 index 5c755f817..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/version_none.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build linux,btrfs_noversion - -package btrfs // import "github.com/docker/docker/daemon/graphdriver/btrfs" - -// TODO(vbatts) remove this work-around once supported linux distros are on -// btrfs utilities of >= 3.16.1 - -func btrfsBuildVersion() string { - return "-" -} - -func btrfsLibVersion() int { - return -1 -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/copy/copy.go b/vendor/github.com/docker/docker/daemon/graphdriver/copy/copy.go deleted file mode 100644 index 86316fdfe..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/copy/copy.go +++ /dev/null @@ -1,277 +0,0 @@ -// +build linux - -package copy // import "github.com/docker/docker/daemon/graphdriver/copy" - -/* -#include - -#ifndef FICLONE -#define FICLONE _IOW(0x94, 9, int) -#endif -*/ -import "C" -import ( - "container/list" - "fmt" - "io" - "os" - "path/filepath" - "syscall" - "time" - - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" - "golang.org/x/sys/unix" -) - -// Mode indicates whether to use hardlink or copy content -type Mode int - -const ( - // Content creates a new file, and copies the content of the file - Content Mode = iota - // Hardlink creates a new hardlink to the existing file - Hardlink -) - -func copyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error { - srcFile, err := os.Open(srcPath) - if err != nil { - return err - } - defer srcFile.Close() - - // If the destination file already exists, we shouldn't blow it away - dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode()) - if err != nil { - return err - } - defer dstFile.Close() - - if *copyWithFileClone { - _, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd()) - if err == nil { - return nil - } - - *copyWithFileClone = false - if err == unix.EXDEV { - *copyWithFileRange = false - } - } - if *copyWithFileRange { - err = doCopyWithFileRange(srcFile, dstFile, fileinfo) - // Trying the file_clone may not have caught the exdev case - // as the ioctl may not have been available (therefore EINVAL) - if err == unix.EXDEV || err == unix.ENOSYS { - *copyWithFileRange = false - } else { - return err - } - } - return legacyCopy(srcFile, dstFile) -} - -func doCopyWithFileRange(srcFile, dstFile *os.File, fileinfo os.FileInfo) error { - amountLeftToCopy := fileinfo.Size() - - for amountLeftToCopy > 0 { - n, err := unix.CopyFileRange(int(srcFile.Fd()), nil, int(dstFile.Fd()), nil, int(amountLeftToCopy), 0) - if err != nil { - return err - } - - amountLeftToCopy = amountLeftToCopy - int64(n) - } - - return nil -} - -func legacyCopy(srcFile io.Reader, dstFile io.Writer) error { - _, err := pools.Copy(dstFile, srcFile) - - return err -} - -func copyXattr(srcPath, dstPath, attr string) error { - data, err := system.Lgetxattr(srcPath, attr) - if err != nil { - return err - } - if data != nil { - if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { - return err - } - } - return nil -} - -type fileID struct { - dev uint64 - ino uint64 -} - -type dirMtimeInfo struct { - dstPath *string - stat *syscall.Stat_t -} - -// DirCopy copies or hardlinks the contents of one directory to another, -// properly handling xattrs, and soft links -// -// Copying xattrs can be opted out of by passing false for copyXattrs. -func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error { - copyWithFileRange := true - copyWithFileClone := true - - // This is a map of source file inodes to dst file paths - copiedFiles := make(map[fileID]string) - - dirsToSetMtimes := list.New() - err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(srcDir, srcPath) - if err != nil { - return err - } - - dstPath := filepath.Join(dstDir, relPath) - if err != nil { - return err - } - - stat, ok := f.Sys().(*syscall.Stat_t) - if !ok { - return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) - } - - isHardlink := false - - switch f.Mode() & os.ModeType { - case 0: // Regular file - id := fileID{dev: stat.Dev, ino: stat.Ino} - if copyMode == Hardlink { - isHardlink = true - if err2 := os.Link(srcPath, dstPath); err2 != nil { - return err2 - } - } else if hardLinkDstPath, ok := copiedFiles[id]; ok { - if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil { - return err2 - } - } else { - if err2 := copyRegular(srcPath, dstPath, f, ©WithFileRange, ©WithFileClone); err2 != nil { - return err2 - } - copiedFiles[id] = dstPath - } - - case os.ModeDir: - if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { - return err - } - - case os.ModeSymlink: - link, err := os.Readlink(srcPath) - if err != nil { - return err - } - - if err := os.Symlink(link, dstPath); err != nil { - return err - } - - case os.ModeNamedPipe: - fallthrough - case os.ModeSocket: - if err := unix.Mkfifo(dstPath, stat.Mode); err != nil { - return err - } - - case os.ModeDevice: - if rsystem.RunningInUserNS() { - // cannot create a device if running in user namespace - return nil - } - if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { - return err - } - - default: - return fmt.Errorf("unknown file type for %s", srcPath) - } - - // Everything below is copying metadata from src to dst. All this metadata - // already shares an inode for hardlinks. - if isHardlink { - return nil - } - - if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { - return err - } - - if copyXattrs { - if err := doCopyXattrs(srcPath, dstPath); err != nil { - return err - } - } - - isSymlink := f.Mode()&os.ModeSymlink != 0 - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if !isSymlink { - if err := os.Chmod(dstPath, f.Mode()); err != nil { - return err - } - } - - // system.Chtimes doesn't support a NOFOLLOW flag atm - // nolint: unconvert - if f.IsDir() { - dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat}) - } else if !isSymlink { - aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) - if err := system.Chtimes(dstPath, aTime, mTime); err != nil { - return err - } - } else { - ts := []syscall.Timespec{stat.Atim, stat.Mtim} - if err := system.LUtimesNano(dstPath, ts); err != nil { - return err - } - } - return nil - }) - if err != nil { - return err - } - for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() { - mtimeInfo := e.Value.(*dirMtimeInfo) - ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim} - if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil { - return err - } - } - - return nil -} - -func doCopyXattrs(srcPath, dstPath string) error { - if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { - return err - } - - // We need to copy this attribute if it appears in an overlay upper layer, as - // this function is used to copy those. It is set by overlay if a directory - // is removed and then re-created and should not inherit anything from the - // same dir in the lower dir. - return copyXattr(srcPath, dstPath, "trusted.overlay.opaque") -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/counter.go b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go deleted file mode 100644 index 2772bd247..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/counter.go +++ /dev/null @@ -1,62 +0,0 @@ -package graphdriver // import "github.com/docker/docker/daemon/graphdriver" - -import "sync" - -type minfo struct { - check bool - count int -} - -// RefCounter is a generic counter for use by graphdriver Get/Put calls -type RefCounter struct { - counts map[string]*minfo - mu sync.Mutex - checker Checker -} - -// NewRefCounter returns a new RefCounter -func NewRefCounter(c Checker) *RefCounter { - return &RefCounter{ - checker: c, - counts: make(map[string]*minfo), - } -} - -// Increment increases the ref count for the given id and returns the current count -func (c *RefCounter) Increment(path string) int { - return c.incdec(path, func(minfo *minfo) { - minfo.count++ - }) -} - -// Decrement decreases the ref count for the given id and returns the current count -func (c *RefCounter) Decrement(path string) int { - return c.incdec(path, func(minfo *minfo) { - minfo.count-- - }) -} - -func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { - c.mu.Lock() - m := c.counts[path] - if m == nil { - m = &minfo{} - c.counts[path] = m - } - // if we are checking this path for the first time check to make sure - // if it was already mounted on the system and make sure we have a correct ref - // count if it is mounted as it is in use. - if !m.check { - m.check = true - if c.checker.IsMounted(path) { - m.count++ - } - } - infoOp(m) - count := m.count - if count <= 0 { - delete(c.counts, path) - } - c.mu.Unlock() - return count -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/device_setup.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/device_setup.go deleted file mode 100644 index 4d7f35c40..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/device_setup.go +++ /dev/null @@ -1,231 +0,0 @@ -package devmapper // import "github.com/docker/docker/daemon/graphdriver/devmapper" - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "reflect" - "strings" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type directLVMConfig struct { - Device string - ThinpPercent uint64 - ThinpMetaPercent uint64 - AutoExtendPercent uint64 - AutoExtendThreshold uint64 -} - -var ( - errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified") - errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100") - errMissingSetupDevice = errors.New("must provide device path in `dm.setup_device` in order to configure direct-lvm") -) - -func validateLVMConfig(cfg directLVMConfig) error { - if reflect.DeepEqual(cfg, directLVMConfig{}) { - return nil - } - if cfg.Device == "" { - return errMissingSetupDevice - } - if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 { - return errThinpPercentMissing - } - - if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 { - return errThinpPercentTooBig - } - return nil -} - -func checkDevAvailable(dev string) error { - lvmScan, err := exec.LookPath("lvmdiskscan") - if err != nil { - logrus.Debug("could not find lvmdiskscan") - return nil - } - - out, err := exec.Command(lvmScan).CombinedOutput() - if err != nil { - logrus.WithError(err).Error(string(out)) - return nil - } - - if !bytes.Contains(out, []byte(dev)) { - return errors.Errorf("%s is not available for use with devicemapper", dev) - } - return nil -} - -func checkDevInVG(dev string) error { - pvDisplay, err := exec.LookPath("pvdisplay") - if err != nil { - logrus.Debug("could not find pvdisplay") - return nil - } - - out, err := exec.Command(pvDisplay, dev).CombinedOutput() - if err != nil { - logrus.WithError(err).Error(string(out)) - return nil - } - - scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out))) - for scanner.Scan() { - fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name") - if len(fields) > 1 { - // got "VG Name" line" - vg := strings.TrimSpace(fields[1]) - if len(vg) > 0 { - return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) - } - logrus.Error(fields) - break - } - } - return nil -} - -func checkDevHasFS(dev string) error { - blkid, err := exec.LookPath("blkid") - if err != nil { - logrus.Debug("could not find blkid") - return nil - } - - out, err := exec.Command(blkid, dev).CombinedOutput() - if err != nil { - logrus.WithError(err).Error(string(out)) - return nil - } - - fields := bytes.Fields(out) - for _, f := range fields { - kv := bytes.Split(f, []byte{'='}) - if bytes.Equal(kv[0], []byte("TYPE")) { - v := bytes.Trim(kv[1], "\"") - if len(v) > 0 { - return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) - } - return nil - } - } - return nil -} - -func verifyBlockDevice(dev string, force bool) error { - if err := checkDevAvailable(dev); err != nil { - return err - } - if err := checkDevInVG(dev); err != nil { - return err - } - if force { - return nil - } - return checkDevHasFS(dev) -} - -func readLVMConfig(root string) (directLVMConfig, error) { - var cfg directLVMConfig - - p := filepath.Join(root, "setup-config.json") - b, err := ioutil.ReadFile(p) - if err != nil { - if os.IsNotExist(err) { - return cfg, nil - } - return cfg, errors.Wrap(err, "error reading existing setup config") - } - - // check if this is just an empty file, no need to produce a json error later if so - if len(b) == 0 { - return cfg, nil - } - - err = json.Unmarshal(b, &cfg) - return cfg, errors.Wrap(err, "error unmarshaling previous device setup config") -} - -func writeLVMConfig(root string, cfg directLVMConfig) error { - p := filepath.Join(root, "setup-config.json") - b, err := json.Marshal(cfg) - if err != nil { - return errors.Wrap(err, "error marshalling direct lvm config") - } - err = ioutil.WriteFile(p, b, 0600) - return errors.Wrap(err, "error writing direct lvm config to file") -} - -func setupDirectLVM(cfg directLVMConfig) error { - lvmProfileDir := "/etc/lvm/profile" - binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"} - - for _, bin := range binaries { - if _, err := exec.LookPath(bin); err != nil { - return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm") - } - } - - err := os.MkdirAll(lvmProfileDir, 0755) - if err != nil { - return errors.Wrap(err, "error creating lvm profile directory") - } - - if cfg.AutoExtendPercent == 0 { - cfg.AutoExtendPercent = 20 - } - - if cfg.AutoExtendThreshold == 0 { - cfg.AutoExtendThreshold = 80 - } - - if cfg.ThinpPercent == 0 { - cfg.ThinpPercent = 95 - } - if cfg.ThinpMetaPercent == 0 { - cfg.ThinpMetaPercent = 1 - } - - out, err := exec.Command("pvcreate", "-f", cfg.Device).CombinedOutput() - if err != nil { - return errors.Wrap(err, string(out)) - } - - out, err = exec.Command("vgcreate", "docker", cfg.Device).CombinedOutput() - if err != nil { - return errors.Wrap(err, string(out)) - } - - out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "docker", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() - if err != nil { - return errors.Wrap(err, string(out)) - } - out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "docker", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() - if err != nil { - return errors.Wrap(err, string(out)) - } - - out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "docker/thinpool", "--poolmetadata", "docker/thinpoolmeta").CombinedOutput() - if err != nil { - return errors.Wrap(err, string(out)) - } - - profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) - err = ioutil.WriteFile(lvmProfileDir+"/docker-thinpool.profile", []byte(profile), 0600) - if err != nil { - return errors.Wrap(err, "error writing docker thinp autoextend profile") - } - - out, err = exec.Command("lvchange", "--metadataprofile", "docker-thinpool", "docker/thinpool").CombinedOutput() - return errors.Wrap(err, string(out)) -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go deleted file mode 100644 index 2bfbf05a2..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/deviceset.go +++ /dev/null @@ -1,2824 +0,0 @@ -// +build linux - -package devmapper // import "github.com/docker/docker/daemon/graphdriver/devmapper" - -import ( - "bufio" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/devicemapper" - "github.com/docker/docker/pkg/dmesg" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/loopback" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/go-units" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -var ( - defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 - defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 - defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 - defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors - defaultUdevSyncOverride = false - maxDeviceID = 0xffffff // 24 bit, pool limit - deviceIDMapSz = (maxDeviceID + 1) / 8 - driverDeferredRemovalSupport = false - enableDeferredRemoval = false - enableDeferredDeletion = false - userBaseSize = false - defaultMinFreeSpacePercent uint32 = 10 - lvmSetupConfigForce bool -) - -const deviceSetMetaFile = "deviceset-metadata" -const transactionMetaFile = "transaction-metadata" - -type transaction struct { - OpenTransactionID uint64 `json:"open_transaction_id"` - DeviceIDHash string `json:"device_hash"` - DeviceID int `json:"device_id"` -} - -type devInfo struct { - Hash string `json:"-"` - DeviceID int `json:"device_id"` - Size uint64 `json:"size"` - TransactionID uint64 `json:"transaction_id"` - Initialized bool `json:"initialized"` - Deleted bool `json:"deleted"` - devices *DeviceSet - - // The global DeviceSet lock guarantees that we serialize all - // the calls to libdevmapper (which is not threadsafe), but we - // sometimes release that lock while sleeping. In that case - // this per-device lock is still held, protecting against - // other accesses to the device that we're doing the wait on. - // - // WARNING: In order to avoid AB-BA deadlocks when releasing - // the global lock while holding the per-device locks all - // device locks must be acquired *before* the device lock, and - // multiple device locks should be acquired parent before child. - lock sync.Mutex -} - -type metaData struct { - Devices map[string]*devInfo `json:"Devices"` -} - -// DeviceSet holds information about list of devices -type DeviceSet struct { - metaData `json:"-"` - sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper - root string - devicePrefix string - TransactionID uint64 `json:"-"` - NextDeviceID int `json:"next_device_id"` - deviceIDMap []byte - - // Options - dataLoopbackSize int64 - metaDataLoopbackSize int64 - baseFsSize uint64 - filesystem string - mountOptions string - mkfsArgs []string - dataDevice string // block or loop dev - dataLoopFile string // loopback file, if used - metadataDevice string // block or loop dev - metadataLoopFile string // loopback file, if used - doBlkDiscard bool - thinpBlockSize uint32 - thinPoolDevice string - transaction `json:"-"` - overrideUdevSyncCheck bool - deferredRemove bool // use deferred removal - deferredDelete bool // use deferred deletion - BaseDeviceUUID string // save UUID of base device - BaseDeviceFilesystem string // save filesystem of base device - nrDeletedDevices uint // number of deleted devices - deletionWorkerTicker *time.Ticker - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - minFreeSpacePercent uint32 //min free space percentage in thinpool - xfsNospaceRetries string // max retries when xfs receives ENOSPC - lvmSetupConfig directLVMConfig -} - -// DiskUsage contains information about disk usage and is used when reporting Status of a device. -type DiskUsage struct { - // Used bytes on the disk. - Used uint64 - // Total bytes on the disk. - Total uint64 - // Available bytes on the disk. - Available uint64 -} - -// Status returns the information about the device. -type Status struct { - // PoolName is the name of the data pool. - PoolName string - // DataFile is the actual block device for data. - DataFile string - // DataLoopback loopback file, if used. - DataLoopback string - // MetadataFile is the actual block device for metadata. - MetadataFile string - // MetadataLoopback is the loopback file, if used. - MetadataLoopback string - // Data is the disk used for data. - Data DiskUsage - // Metadata is the disk used for meta data. - Metadata DiskUsage - // BaseDeviceSize is base size of container and image - BaseDeviceSize uint64 - // BaseDeviceFS is backing filesystem. - BaseDeviceFS string - // SectorSize size of the vector. - SectorSize uint64 - // UdevSyncSupported is true if sync is supported. - UdevSyncSupported bool - // DeferredRemoveEnabled is true then the device is not unmounted. - DeferredRemoveEnabled bool - // True if deferred deletion is enabled. This is different from - // deferred removal. "removal" means that device mapper device is - // deactivated. Thin device is still in thin pool and can be activated - // again. But "deletion" means that thin device will be deleted from - // thin pool and it can't be activated again. - DeferredDeleteEnabled bool - DeferredDeletedDeviceCount uint - MinFreeSpace uint64 -} - -// Structure used to export image/container metadata in docker inspect. -type deviceMetadata struct { - deviceID int - deviceSize uint64 // size in bytes - deviceName string // Device name as used during activation -} - -// DevStatus returns information about device mounted containing its id, size and sector information. -type DevStatus struct { - // DeviceID is the id of the device. - DeviceID int - // Size is the size of the filesystem. - Size uint64 - // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. - TransactionID uint64 - // SizeInSectors indicates the size of the sectors allocated. - SizeInSectors uint64 - // MappedSectors indicates number of mapped sectors. - MappedSectors uint64 - // HighestMappedSector is the pointer to the highest mapped sector. - HighestMappedSector uint64 -} - -func getDevName(name string) string { - return "/dev/mapper/" + name -} - -func (info *devInfo) Name() string { - hash := info.Hash - if hash == "" { - hash = "base" - } - return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) -} - -func (info *devInfo) DevName() string { - return getDevName(info.Name()) -} - -func (devices *DeviceSet) loopbackDir() string { - return path.Join(devices.root, "devicemapper") -} - -func (devices *DeviceSet) metadataDir() string { - return path.Join(devices.root, "metadata") -} - -func (devices *DeviceSet) metadataFile(info *devInfo) string { - file := info.Hash - if file == "" { - file = "base" - } - return path.Join(devices.metadataDir(), file) -} - -func (devices *DeviceSet) transactionMetaFile() string { - return path.Join(devices.metadataDir(), transactionMetaFile) -} - -func (devices *DeviceSet) deviceSetMetaFile() string { - return path.Join(devices.metadataDir(), deviceSetMetaFile) -} - -func (devices *DeviceSet) oldMetadataFile() string { - return path.Join(devices.loopbackDir(), "json") -} - -func (devices *DeviceSet) getPoolName() string { - if devices.thinPoolDevice == "" { - return devices.devicePrefix + "-pool" - } - return devices.thinPoolDevice -} - -func (devices *DeviceSet) getPoolDevName() string { - return getDevName(devices.getPoolName()) -} - -func (devices *DeviceSet) hasImage(name string) bool { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - _, err := os.Stat(filename) - return err == nil -} - -// ensureImage creates a sparse file of bytes at the path -// /devicemapper/. -// If the file already exists and new size is larger than its current size, it grows to the new size. -// Either way it returns the full path. -func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) - if err != nil { - return "", err - } - if err := idtools.MkdirAllAndChown(dirname, 0700, idtools.IDPair{UID: uid, GID: gid}); err != nil { - return "", err - } - - if fi, err := os.Stat(filename); err != nil { - if !os.IsNotExist(err) { - return "", err - } - logrus.WithField("storage-driver", "devicemapper").Debugf("Creating loopback file %s for device-manage use", filename) - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) - if err != nil { - return "", err - } - defer file.Close() - - if err := file.Truncate(size); err != nil { - return "", err - } - } else { - if fi.Size() < size { - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) - if err != nil { - return "", err - } - defer file.Close() - if err := file.Truncate(size); err != nil { - return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) - } - } else if fi.Size() > size { - logrus.WithField("storage-driver", "devicemapper").Warnf("Can't shrink loopback file %s", filename) - } - } - return filename, nil -} - -func (devices *DeviceSet) allocateTransactionID() uint64 { - devices.OpenTransactionID = devices.TransactionID + 1 - return devices.OpenTransactionID -} - -func (devices *DeviceSet) updatePoolTransactionID() error { - if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { - return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) - } - devices.TransactionID = devices.OpenTransactionID - return nil -} - -func (devices *DeviceSet) removeMetadata(info *devInfo) error { - if err := os.RemoveAll(devices.metadataFile(info)); err != nil { - return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) - } - return nil -} - -// Given json data and file path, write it to disk -func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { - tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") - if err != nil { - return fmt.Errorf("devmapper: Error creating metadata file: %s", err) - } - - n, err := tmpFile.Write(jsonData) - if err != nil { - return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) - } - if n < len(jsonData) { - return io.ErrShortWrite - } - if err := tmpFile.Sync(); err != nil { - return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) - } - if err := tmpFile.Close(); err != nil { - return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) - } - if err := os.Rename(tmpFile.Name(), filePath); err != nil { - return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) - } - - return nil -} - -func (devices *DeviceSet) saveMetadata(info *devInfo) error { - jsonData, err := json.Marshal(info) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - return devices.writeMetaFile(jsonData, devices.metadataFile(info)) -} - -func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { - var mask byte - i := deviceID % 8 - mask = 1 << uint(i) - devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask -} - -func (devices *DeviceSet) markDeviceIDFree(deviceID int) { - var mask byte - i := deviceID % 8 - mask = ^(1 << uint(i)) - devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask -} - -func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { - var mask byte - i := deviceID % 8 - mask = (1 << uint(i)) - return (devices.deviceIDMap[deviceID/8] & mask) == 0 -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { - info := devices.Devices[hash] - if info == nil { - info = devices.loadMetadata(hash) - if info == nil { - return nil, fmt.Errorf("devmapper: Unknown device %s", hash) - } - - devices.Devices[hash] = info - } - return info, nil -} - -func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { - devices.Lock() - defer devices.Unlock() - info, err := devices.lookupDevice(hash) - return info, err -} - -// This function relies on that device hash map has been loaded in advance. -// Should be called with devices.Lock() held. -func (devices *DeviceSet) constructDeviceIDMap() { - logrus.WithField("storage-driver", "devicemapper").Debug("constructDeviceIDMap()") - defer logrus.WithField("storage-driver", "devicemapper").Debug("constructDeviceIDMap() END") - - for _, info := range devices.Devices { - devices.markDeviceIDUsed(info.DeviceID) - logrus.WithField("storage-driver", "devicemapper").Debugf("Added deviceId=%d to DeviceIdMap", info.DeviceID) - } -} - -func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { - logger := logrus.WithField("storage-driver", "devicemapper") - - // Skip some of the meta files which are not device files. - if strings.HasSuffix(finfo.Name(), ".migrated") { - logger.Debugf("Skipping file %s", path) - return nil - } - - if strings.HasPrefix(finfo.Name(), ".") { - logger.Debugf("Skipping file %s", path) - return nil - } - - if finfo.Name() == deviceSetMetaFile { - logger.Debugf("Skipping file %s", path) - return nil - } - - if finfo.Name() == transactionMetaFile { - logger.Debugf("Skipping file %s", path) - return nil - } - - logger.Debugf("Loading data for file %s", path) - - hash := finfo.Name() - if hash == "base" { - hash = "" - } - - // Include deleted devices also as cleanup delete device logic - // will go through it and see if there are any deleted devices. - if _, err := devices.lookupDevice(hash); err != nil { - return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) - } - - return nil -} - -func (devices *DeviceSet) loadDeviceFilesOnStart() error { - logrus.WithField("storage-driver", "devicemapper").Debug("loadDeviceFilesOnStart()") - defer logrus.WithField("storage-driver", "devicemapper").Debug("loadDeviceFilesOnStart() END") - - var scan = func(path string, info os.FileInfo, err error) error { - if err != nil { - logrus.WithField("storage-driver", "devicemapper").Debugf("Can't walk the file %s", path) - return nil - } - - // Skip any directories - if info.IsDir() { - return nil - } - - return devices.deviceFileWalkFunction(path, info) - } - - return filepath.Walk(devices.metadataDir(), scan) -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) unregisterDevice(hash string) error { - logrus.WithField("storage-driver", "devicemapper").Debugf("unregisterDevice(%v)", hash) - info := &devInfo{ - Hash: hash, - } - - delete(devices.Devices, hash) - - if err := devices.removeMetadata(info); err != nil { - logrus.WithField("storage-driver", "devicemapper").Debugf("Error removing metadata: %s", err) - return err - } - - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { - logrus.WithField("storage-driver", "devicemapper").Debugf("registerDevice(%v, %v)", id, hash) - info := &devInfo{ - Hash: hash, - DeviceID: id, - Size: size, - TransactionID: transactionID, - Initialized: false, - devices: devices, - } - - devices.Devices[hash] = info - - if err := devices.saveMetadata(info); err != nil { - // Try to remove unused device - delete(devices.Devices, hash) - return nil, err - } - - return info, nil -} - -func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { - logrus.WithField("storage-driver", "devicemapper").Debugf("activateDeviceIfNeeded(%v)", info.Hash) - - if info.Deleted && !ignoreDeleted { - return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) - } - - // Make sure deferred removal on device is canceled, if one was - // scheduled. - if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { - return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) - } - - if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { - return nil - } - - return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) -} - -// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error -func xfsSupported() error { - // Make sure mkfs.xfs is available - if _, err := exec.LookPath("mkfs.xfs"); err != nil { - return err // error text is descriptive enough - } - - // Check if kernel supports xfs filesystem or not. - exec.Command("modprobe", "xfs").Run() - - f, err := os.Open("/proc/filesystems") - if err != nil { - return errors.Wrapf(err, "error checking for xfs support") - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if strings.HasSuffix(s.Text(), "\txfs") { - return nil - } - } - - if err := s.Err(); err != nil { - return errors.Wrapf(err, "error checking for xfs support") - } - - return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`) -} - -func determineDefaultFS() string { - err := xfsSupported() - if err == nil { - return "xfs" - } - - logrus.WithField("storage-driver", "devicemapper").Warnf("XFS is not supported in your system (%v). Defaulting to ext4 filesystem", err) - return "ext4" -} - -// mkfsOptions tries to figure out whether some additional mkfs options are required -func mkfsOptions(fs string) []string { - if fs == "xfs" && !kernel.CheckKernelVersion(3, 16, 0) { - // For kernels earlier than 3.16 (and newer xfsutils), - // some xfs features need to be explicitly disabled. - return []string{"-m", "crc=0,finobt=0"} - } - - return []string{} -} - -func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { - devname := info.DevName() - - if devices.filesystem == "" { - devices.filesystem = determineDefaultFS() - } - if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { - return err - } - - args := mkfsOptions(devices.filesystem) - args = append(args, devices.mkfsArgs...) - args = append(args, devname) - - logrus.WithField("storage-driver", "devicemapper").Infof("Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args) - defer func() { - if err != nil { - logrus.WithField("storage-driver", "devicemapper").Infof("Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) - } else { - logrus.WithField("storage-driver", "devicemapper").Infof("Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) - } - }() - - switch devices.filesystem { - case "xfs": - err = exec.Command("mkfs.xfs", args...).Run() - case "ext4": - err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() - if err != nil { - err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() - } - if err != nil { - return err - } - err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() - default: - err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) - } - return -} - -func (devices *DeviceSet) migrateOldMetaData() error { - // Migrate old metadata file - jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) - if err != nil && !os.IsNotExist(err) { - return err - } - - if jsonData != nil { - m := metaData{Devices: make(map[string]*devInfo)} - - if err := json.Unmarshal(jsonData, &m); err != nil { - return err - } - - for hash, info := range m.Devices { - info.Hash = hash - devices.saveMetadata(info) - } - if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { - return err - } - - } - - return nil -} - -// Cleanup deleted devices. It assumes that all the devices have been -// loaded in the hash table. -func (devices *DeviceSet) cleanupDeletedDevices() error { - devices.Lock() - - // If there are no deleted devices, there is nothing to do. - if devices.nrDeletedDevices == 0 { - devices.Unlock() - return nil - } - - var deletedDevices []*devInfo - - for _, info := range devices.Devices { - if !info.Deleted { - continue - } - logrus.WithField("storage-driver", "devicemapper").Debugf("Found deleted device %s.", info.Hash) - deletedDevices = append(deletedDevices, info) - } - - // Delete the deleted devices. DeleteDevice() first takes the info lock - // and then devices.Lock(). So drop it to avoid deadlock. - devices.Unlock() - - for _, info := range deletedDevices { - // This will again try deferred deletion. - if err := devices.DeleteDevice(info.Hash, false); err != nil { - logrus.WithField("storage-driver", "devicemapper").Warnf("Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) - } - } - - return nil -} - -func (devices *DeviceSet) countDeletedDevices() { - for _, info := range devices.Devices { - if !info.Deleted { - continue - } - devices.nrDeletedDevices++ - } -} - -func (devices *DeviceSet) startDeviceDeletionWorker() { - // Deferred deletion is not enabled. Don't do anything. - if !devices.deferredDelete { - return - } - - logrus.WithField("storage-driver", "devicemapper").Debug("Worker to cleanup deleted devices started") - for range devices.deletionWorkerTicker.C { - devices.cleanupDeletedDevices() - } -} - -func (devices *DeviceSet) initMetaData() error { - devices.Lock() - defer devices.Unlock() - - if err := devices.migrateOldMetaData(); err != nil { - return err - } - - _, transactionID, _, _, _, _, err := devices.poolStatus() - if err != nil { - return err - } - - devices.TransactionID = transactionID - - if err := devices.loadDeviceFilesOnStart(); err != nil { - return fmt.Errorf("devmapper: Failed to load device files:%v", err) - } - - devices.constructDeviceIDMap() - devices.countDeletedDevices() - - if err := devices.processPendingTransaction(); err != nil { - return err - } - - // Start a goroutine to cleanup Deleted Devices - go devices.startDeviceDeletionWorker() - return nil -} - -func (devices *DeviceSet) incNextDeviceID() { - // IDs are 24bit, so wrap around - devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID -} - -func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { - devices.incNextDeviceID() - for i := 0; i <= maxDeviceID; i++ { - if devices.isDeviceIDFree(devices.NextDeviceID) { - devices.markDeviceIDUsed(devices.NextDeviceID) - return devices.NextDeviceID, nil - } - devices.incNextDeviceID() - } - - return 0, fmt.Errorf("devmapper: Unable to find a free device ID") -} - -func (devices *DeviceSet) poolHasFreeSpace() error { - if devices.minFreeSpacePercent == 0 { - return nil - } - - _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() - if err != nil { - return err - } - - minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 - if minFreeData < 1 { - minFreeData = 1 - } - dataFree := dataTotal - dataUsed - if dataFree < minFreeData { - return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) - } - - minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 - if minFreeMetadata < 1 { - minFreeMetadata = 1 - } - - metadataFree := metadataTotal - metadataUsed - if metadataFree < minFreeMetadata { - return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) - } - - return nil -} - -func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { - devices.Lock() - defer devices.Unlock() - - deviceID, err := devices.getNextFreeDeviceID() - if err != nil { - return nil, err - } - - logger := logrus.WithField("storage-driver", "devicemapper") - - if err := devices.openTransaction(hash, deviceID); err != nil { - logger.Debugf("Error opening transaction hash = %s deviceID = %d", hash, deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - - for { - if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { - if devicemapper.DeviceIDExists(err) { - // Device ID already exists. This should not - // happen. Now we have a mechanism to find - // a free device ID. So something is not right. - // Give a warning and continue. - logger.Errorf("Device ID %d exists in pool but it is supposed to be unused", deviceID) - deviceID, err = devices.getNextFreeDeviceID() - if err != nil { - return nil, err - } - // Save new device id into transaction - devices.refreshTransaction(deviceID) - continue - } - logger.Debugf("Error creating device: %s", err) - devices.markDeviceIDFree(deviceID) - return nil, err - } - break - } - - logger.Debugf("Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) - info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) - if err != nil { - _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - - if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(hash) - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - return info, nil -} - -func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { - var ( - devinfo *devicemapper.Info - err error - ) - - if err = devices.poolHasFreeSpace(); err != nil { - return err - } - - if devices.deferredRemove { - devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) - if err != nil { - return err - } - if devinfo != nil && devinfo.DeferredRemove != 0 { - err = devices.cancelDeferredRemoval(baseInfo) - if err != nil { - // If Error is ErrEnxio. Device is probably already gone. Continue. - if err != devicemapper.ErrEnxio { - return err - } - devinfo = nil - } else { - defer devices.deactivateDevice(baseInfo) - } - } - } else { - devinfo, err = devicemapper.GetInfo(baseInfo.Name()) - if err != nil { - return err - } - } - - doSuspend := devinfo != nil && devinfo.Exists != 0 - - if doSuspend { - if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { - return err - } - defer devicemapper.ResumeDevice(baseInfo.Name()) - } - - return devices.createRegisterSnapDevice(hash, baseInfo, size) -} - -func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { - deviceID, err := devices.getNextFreeDeviceID() - if err != nil { - return err - } - - logger := logrus.WithField("storage-driver", "devicemapper") - - if err := devices.openTransaction(hash, deviceID); err != nil { - logger.Debugf("Error opening transaction hash = %s deviceID = %d", hash, deviceID) - devices.markDeviceIDFree(deviceID) - return err - } - - for { - if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { - if devicemapper.DeviceIDExists(err) { - // Device ID already exists. This should not - // happen. Now we have a mechanism to find - // a free device ID. So something is not right. - // Give a warning and continue. - logger.Errorf("Device ID %d exists in pool but it is supposed to be unused", deviceID) - deviceID, err = devices.getNextFreeDeviceID() - if err != nil { - return err - } - // Save new device id into transaction - devices.refreshTransaction(deviceID) - continue - } - logger.Debugf("Error creating snap device: %s", err) - devices.markDeviceIDFree(deviceID) - return err - } - break - } - - if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - logger.Debugf("Error registering device: %s", err) - return err - } - - if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(hash) - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return err - } - return nil -} - -func (devices *DeviceSet) loadMetadata(hash string) *devInfo { - info := &devInfo{Hash: hash, devices: devices} - logger := logrus.WithField("storage-driver", "devicemapper") - - jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) - if err != nil { - logger.Debugf("Failed to read %s with err: %v", devices.metadataFile(info), err) - return nil - } - - if err := json.Unmarshal(jsonData, &info); err != nil { - logger.Debugf("Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) - return nil - } - - if info.DeviceID > maxDeviceID { - logger.Errorf("Ignoring Invalid DeviceId=%d", info.DeviceID) - return nil - } - - return info -} - -func getDeviceUUID(device string) (string, error) { - out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() - if err != nil { - return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) - } - - uuid := strings.TrimSuffix(string(out), "\n") - uuid = strings.TrimSpace(uuid) - logrus.WithField("storage-driver", "devicemapper").Debugf("UUID for device: %s is:%s", device, uuid) - return uuid, nil -} - -func (devices *DeviceSet) getBaseDeviceSize() uint64 { - info, _ := devices.lookupDevice("") - if info == nil { - return 0 - } - return info.Size -} - -func (devices *DeviceSet) getBaseDeviceFS() string { - return devices.BaseDeviceFilesystem -} - -func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { - devices.Lock() - defer devices.Unlock() - - if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { - return err - } - defer devices.deactivateDevice(baseInfo) - - uuid, err := getDeviceUUID(baseInfo.DevName()) - if err != nil { - return err - } - - if devices.BaseDeviceUUID != uuid { - return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) - } - - if devices.BaseDeviceFilesystem == "" { - fsType, err := ProbeFsType(baseInfo.DevName()) - if err != nil { - return err - } - if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { - return err - } - } - - // If user specified a filesystem using dm.fs option and current - // file system of base image is not same, warn user that dm.fs - // will be ignored. - if devices.BaseDeviceFilesystem != devices.filesystem { - logrus.WithField("storage-driver", "devicemapper").Warnf("Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) - devices.filesystem = devices.BaseDeviceFilesystem - } - return nil -} - -func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { - devices.BaseDeviceFilesystem = fs - return devices.saveDeviceSetMetaData() -} - -func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { - devices.Lock() - defer devices.Unlock() - - if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { - return err - } - defer devices.deactivateDevice(baseInfo) - - uuid, err := getDeviceUUID(baseInfo.DevName()) - if err != nil { - return err - } - - devices.BaseDeviceUUID = uuid - return devices.saveDeviceSetMetaData() -} - -func (devices *DeviceSet) createBaseImage() error { - logrus.WithField("storage-driver", "devicemapper").Debug("Initializing base device-mapper thin volume") - - // Create initial device - info, err := devices.createRegisterDevice("") - if err != nil { - return err - } - - logrus.WithField("storage-driver", "devicemapper").Debug("Creating filesystem on base device-mapper thin volume") - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return err - } - - if err := devices.createFilesystem(info); err != nil { - return err - } - - info.Initialized = true - if err := devices.saveMetadata(info); err != nil { - info.Initialized = false - return err - } - - if err := devices.saveBaseDeviceUUID(info); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) - } - - return nil -} - -// Returns if thin pool device exists or not. If device exists, also makes -// sure it is a thin pool device and not some other type of device. -func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { - logrus.WithField("storage-driver", "devicemapper").Debugf("Checking for existence of the pool %s", thinPoolDevice) - - info, err := devicemapper.GetInfo(thinPoolDevice) - if err != nil { - return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) - } - - // Device does not exist. - if info.Exists == 0 { - return false, nil - } - - _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) - if err != nil { - return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) - } - - if deviceType != "thin-pool" { - return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) - } - - return true, nil -} - -func (devices *DeviceSet) checkThinPool() error { - _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() - if err != nil { - return err - } - if dataUsed != 0 { - return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", - devices.thinPoolDevice) - } - if transactionID != 0 { - return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", - devices.thinPoolDevice) - } - return nil -} - -// Base image is initialized properly. Either save UUID for first time (for -// upgrade case or verify UUID. -func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { - // If BaseDeviceUUID is nil (upgrade case), save it and return success. - if devices.BaseDeviceUUID == "" { - if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) - } - return nil - } - - if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { - return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) - } - - return nil -} - -func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { - - if !userBaseSize { - return nil - } - - if devices.baseFsSize < devices.getBaseDeviceSize() { - return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) - } - - if devices.baseFsSize == devices.getBaseDeviceSize() { - return nil - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - info.Size = devices.baseFsSize - - if err := devices.saveMetadata(info); err != nil { - // Try to remove unused device - delete(devices.Devices, info.Hash) - return err - } - - return devices.growFS(info) -} - -func (devices *DeviceSet) growFS(info *devInfo) error { - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return fmt.Errorf("Error activating devmapper device: %s", err) - } - - defer devices.deactivateDevice(info) - - fsMountPoint := "/run/docker/mnt" - if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { - if err := os.MkdirAll(fsMountPoint, 0700); err != nil { - return err - } - defer os.RemoveAll(fsMountPoint) - } - - options := "" - if devices.BaseDeviceFilesystem == "xfs" { - // XFS needs nouuid or it can't mount filesystems with the same fs - options = joinMountOptions(options, "nouuid") - } - options = joinMountOptions(options, devices.mountOptions) - - if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { - return fmt.Errorf("Error mounting '%s' on '%s' (fstype='%s' options='%s'): %s\n%v", info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options, err, string(dmesg.Dmesg(256))) - } - - defer unix.Unmount(fsMountPoint, unix.MNT_DETACH) - - switch devices.BaseDeviceFilesystem { - case "ext4": - if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) - } - case "xfs": - if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) - } - default: - return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) - } - return nil -} - -func (devices *DeviceSet) setupBaseImage() error { - oldInfo, _ := devices.lookupDeviceWithLock("") - - // base image already exists. If it is initialized properly, do UUID - // verification and return. Otherwise remove image and set it up - // fresh. - - if oldInfo != nil { - if oldInfo.Initialized && !oldInfo.Deleted { - if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { - return err - } - return devices.checkGrowBaseDeviceFS(oldInfo) - } - - logrus.WithField("storage-driver", "devicemapper").Debug("Removing uninitialized base image") - // If previous base device is in deferred delete state, - // that needs to be cleaned up first. So don't try - // deferred deletion. - if err := devices.DeleteDevice("", true); err != nil { - return err - } - } - - // If we are setting up base image for the first time, make sure - // thin pool is empty. - if devices.thinPoolDevice != "" && oldInfo == nil { - if err := devices.checkThinPool(); err != nil { - return err - } - } - - // Create new base image device - return devices.createBaseImage() -} - -func setCloseOnExec(name string) { - fileInfos, _ := ioutil.ReadDir("/proc/self/fd") - for _, i := range fileInfos { - link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) - if link == name { - fd, err := strconv.Atoi(i.Name()) - if err == nil { - unix.CloseOnExec(fd) - } - } - } -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -// ResizePool increases the size of the pool. -func (devices *DeviceSet) ResizePool(size int64) error { - dirname := devices.loopbackDir() - datafilename := path.Join(dirname, "data") - if len(devices.dataDevice) > 0 { - datafilename = devices.dataDevice - } - metadatafilename := path.Join(dirname, "metadata") - if len(devices.metadataDevice) > 0 { - metadatafilename = devices.metadataDevice - } - - datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) - if datafile == nil { - return err - } - defer datafile.Close() - - fi, err := datafile.Stat() - if fi == nil { - return err - } - - if fi.Size() > size { - return fmt.Errorf("devmapper: Can't shrink file") - } - - dataloopback := loopback.FindLoopDeviceFor(datafile) - if dataloopback == nil { - return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) - } - defer dataloopback.Close() - - metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) - if metadatafile == nil { - return err - } - defer metadatafile.Close() - - metadataloopback := loopback.FindLoopDeviceFor(metadatafile) - if metadataloopback == nil { - return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) - } - defer metadataloopback.Close() - - // Grow loopback file - if err := datafile.Truncate(size); err != nil { - return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) - } - - // Reload size for loopback device - if err := loopback.SetCapacity(dataloopback); err != nil { - return fmt.Errorf("Unable to update loopback capacity: %s", err) - } - - // Suspend the pool - if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) - } - - // Reload with the new block sizes - if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { - return fmt.Errorf("devmapper: Unable to reload pool: %s", err) - } - - // Resume the pool - if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("devmapper: Unable to resume pool: %s", err) - } - - return nil -} - -func (devices *DeviceSet) loadTransactionMetaData() error { - jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) - if err != nil { - // There is no active transaction. This will be the case - // during upgrade. - if os.IsNotExist(err) { - devices.OpenTransactionID = devices.TransactionID - return nil - } - return err - } - - json.Unmarshal(jsonData, &devices.transaction) - return nil -} - -func (devices *DeviceSet) saveTransactionMetaData() error { - jsonData, err := json.Marshal(&devices.transaction) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - - return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) -} - -func (devices *DeviceSet) removeTransactionMetaData() error { - return os.RemoveAll(devices.transactionMetaFile()) -} - -func (devices *DeviceSet) rollbackTransaction() error { - logger := logrus.WithField("storage-driver", "devicemapper") - - logger.Debugf("Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) - - // A device id might have already been deleted before transaction - // closed. In that case this call will fail. Just leave a message - // in case of failure. - if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { - logger.Errorf("Unable to delete device: %s", err) - } - - dinfo := &devInfo{Hash: devices.DeviceIDHash} - if err := devices.removeMetadata(dinfo); err != nil { - logger.Errorf("Unable to remove metadata: %s", err) - } else { - devices.markDeviceIDFree(devices.DeviceID) - } - - if err := devices.removeTransactionMetaData(); err != nil { - logger.Errorf("Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) - } - - return nil -} - -func (devices *DeviceSet) processPendingTransaction() error { - if err := devices.loadTransactionMetaData(); err != nil { - return err - } - - // If there was open transaction but pool transaction ID is same - // as open transaction ID, nothing to roll back. - if devices.TransactionID == devices.OpenTransactionID { - return nil - } - - // If open transaction ID is less than pool transaction ID, something - // is wrong. Bail out. - if devices.OpenTransactionID < devices.TransactionID { - logrus.WithField("storage-driver", "devicemapper").Errorf("Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) - return nil - } - - // Pool transaction ID is not same as open transaction. There is - // a transaction which was not completed. - if err := devices.rollbackTransaction(); err != nil { - return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) - } - - devices.OpenTransactionID = devices.TransactionID - return nil -} - -func (devices *DeviceSet) loadDeviceSetMetaData() error { - jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) - if err != nil { - // For backward compatibility return success if file does - // not exist. - if os.IsNotExist(err) { - return nil - } - return err - } - - return json.Unmarshal(jsonData, devices) -} - -func (devices *DeviceSet) saveDeviceSetMetaData() error { - jsonData, err := json.Marshal(devices) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - - return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) -} - -func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { - devices.allocateTransactionID() - devices.DeviceIDHash = hash - devices.DeviceID = DeviceID - if err := devices.saveTransactionMetaData(); err != nil { - return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) - } - return nil -} - -func (devices *DeviceSet) refreshTransaction(DeviceID int) error { - devices.DeviceID = DeviceID - if err := devices.saveTransactionMetaData(); err != nil { - return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) - } - return nil -} - -func (devices *DeviceSet) closeTransaction() error { - if err := devices.updatePoolTransactionID(); err != nil { - logrus.WithField("storage-driver", "devicemapper").Debug("Failed to close Transaction") - return err - } - return nil -} - -func determineDriverCapabilities(version string) error { - // Kernel driver version >= 4.27.0 support deferred removal - - logrus.WithField("storage-driver", "devicemapper").Debugf("kernel dm driver version is %s", version) - - versionSplit := strings.Split(version, ".") - major, err := strconv.Atoi(versionSplit[0]) - if err != nil { - return graphdriver.ErrNotSupported - } - - if major > 4 { - driverDeferredRemovalSupport = true - return nil - } - - if major < 4 { - return nil - } - - minor, err := strconv.Atoi(versionSplit[1]) - if err != nil { - return graphdriver.ErrNotSupported - } - - /* - * If major is 4 and minor is 27, then there is no need to - * check for patch level as it can not be less than 0. - */ - if minor >= 27 { - driverDeferredRemovalSupport = true - return nil - } - - return nil -} - -// Determine the major and minor number of loopback device -func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { - var stat unix.Stat_t - err := unix.Stat(file.Name(), &stat) - if err != nil { - return 0, 0, err - } - - dev := stat.Rdev - majorNum := major(dev) - minorNum := minor(dev) - - logrus.WithField("storage-driver", "devicemapper").Debugf("Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) - return majorNum, minorNum, nil -} - -// Given a file which is backing file of a loop back device, find the -// loopback device name and its major/minor number. -func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { - file, err := os.Open(filename) - if err != nil { - logrus.WithField("storage-driver", "devicemapper").Debugf("Failed to open file %s", filename) - return "", 0, 0, err - } - - defer file.Close() - loopbackDevice := loopback.FindLoopDeviceFor(file) - if loopbackDevice == nil { - return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) - } - defer loopbackDevice.Close() - - Major, Minor, err := getDeviceMajorMinor(loopbackDevice) - if err != nil { - return "", 0, 0, err - } - return loopbackDevice.Name(), Major, Minor, nil -} - -// Get the major/minor numbers of thin pool data and metadata devices -func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { - var params, poolDataMajMin, poolMetadataMajMin string - - _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) - if err != nil { - return 0, 0, 0, 0, err - } - - if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { - return 0, 0, 0, 0, err - } - - logrus.WithField("storage-driver", "devicemapper").Debugf("poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) - - poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") - poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") - poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil -} - -func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { - poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() - if err != nil { - return err - } - - dirname := devices.loopbackDir() - - // data device has not been passed in. So there should be a data file - // which is being mounted as loop device. - if devices.dataDevice == "" { - datafilename := path.Join(dirname, "data") - dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) - if err != nil { - return err - } - - // Compare the two - if poolDataMajor == dataMajor && poolDataMinor == dataMinor { - devices.dataDevice = dataLoopDevice - devices.dataLoopFile = datafilename - } - - } - - // metadata device has not been passed in. So there should be a - // metadata file which is being mounted as loop device. - if devices.metadataDevice == "" { - metadatafilename := path.Join(dirname, "metadata") - metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) - if err != nil { - return err - } - if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { - devices.metadataDevice = metadataLoopDevice - devices.metadataLoopFile = metadatafilename - } - } - - return nil -} - -func (devices *DeviceSet) enableDeferredRemovalDeletion() error { - - // If user asked for deferred removal then check both libdm library - // and kernel driver support deferred removal otherwise error out. - if enableDeferredRemoval { - if !driverDeferredRemovalSupport { - return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") - } - if !devicemapper.LibraryDeferredRemovalSupport { - return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") - } - logrus.WithField("storage-driver", "devicemapper").Debug("Deferred removal support enabled.") - devices.deferredRemove = true - } - - if enableDeferredDeletion { - if !devices.deferredRemove { - return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") - } - logrus.WithField("storage-driver", "devicemapper").Debug("Deferred deletion support enabled.") - devices.deferredDelete = true - } - return nil -} - -func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { - if err := devices.enableDeferredRemovalDeletion(); err != nil { - return err - } - - logger := logrus.WithField("storage-driver", "devicemapper") - - // https://github.com/docker/docker/issues/4036 - if supported := devicemapper.UdevSetSyncSupport(true); !supported { - if dockerversion.IAmStatic == "true" { - logger.Error("Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") - } else { - logger.Error("Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") - } - - if !devices.overrideUdevSyncCheck { - return graphdriver.ErrNotSupported - } - } - - //create the root dir of the devmapper driver ownership to match this - //daemon's remapped root uid/gid so containers can start properly - uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAndChown(devices.root, 0700, idtools.IDPair{UID: uid, GID: gid}); err != nil { - return err - } - if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil { - return err - } - - prevSetupConfig, err := readLVMConfig(devices.root) - if err != nil { - return err - } - - if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) { - if devices.thinPoolDevice != "" { - return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified") - } - - if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) { - if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) { - return errors.New("changing direct-lvm config is not supported") - } - logger.WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode") - if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil { - return err - } - if err := setupDirectLVM(devices.lvmSetupConfig); err != nil { - return err - } - if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil { - return err - } - } - devices.thinPoolDevice = "docker-thinpool" - logger.Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice) - } - - // Set the device prefix from the device id and inode of the docker root dir - var st unix.Stat_t - if err := unix.Stat(devices.root, &st); err != nil { - return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) - } - // "reg-" stands for "regular file". - // In the future we might use "dev-" for "device file", etc. - // docker-maj,min[-inode] stands for: - // - Managed by docker - // - The target of this device is at major and minor - // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. - devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(st.Dev), minor(st.Dev), st.Ino) - logger.Debugf("Generated prefix: %s", devices.devicePrefix) - - // Check for the existence of the thin-pool device - poolExists, err := devices.thinPoolExists(devices.getPoolName()) - if err != nil { - return err - } - - // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files - // that are not Close-on-exec, - // so we add this badhack to make sure it closes itself - setCloseOnExec("/dev/mapper/control") - - // Make sure the sparse images exist in /devicemapper/data and - // /devicemapper/metadata - - createdLoopback := false - - // If the pool doesn't exist, create it - if !poolExists && devices.thinPoolDevice == "" { - logger.Debug("Pool doesn't exist. Creating it.") - - var ( - dataFile *os.File - metadataFile *os.File - ) - - if devices.dataDevice == "" { - // Make sure the sparse images exist in /devicemapper/data - - hasData := devices.hasImage("data") - - if !doInit && !hasData { - return errors.New("loopback data file not found") - } - - if !hasData { - createdLoopback = true - } - - data, err := devices.ensureImage("data", devices.dataLoopbackSize) - if err != nil { - logger.Debugf("Error device ensureImage (data): %s", err) - return err - } - - dataFile, err = loopback.AttachLoopDevice(data) - if err != nil { - return err - } - devices.dataLoopFile = data - devices.dataDevice = dataFile.Name() - } else { - dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) - if err != nil { - return err - } - } - defer dataFile.Close() - - if devices.metadataDevice == "" { - // Make sure the sparse images exist in /devicemapper/metadata - - hasMetadata := devices.hasImage("metadata") - - if !doInit && !hasMetadata { - return errors.New("loopback metadata file not found") - } - - if !hasMetadata { - createdLoopback = true - } - - metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) - if err != nil { - logger.Debugf("Error device ensureImage (metadata): %s", err) - return err - } - - metadataFile, err = loopback.AttachLoopDevice(metadata) - if err != nil { - return err - } - devices.metadataLoopFile = metadata - devices.metadataDevice = metadataFile.Name() - } else { - metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) - if err != nil { - return err - } - } - defer metadataFile.Close() - - if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { - return err - } - defer func() { - if retErr != nil { - err = devices.deactivatePool() - if err != nil { - logger.Warnf("Failed to deactivatePool: %v", err) - } - } - }() - } - - // Pool already exists and caller did not pass us a pool. That means - // we probably created pool earlier and could not remove it as some - // containers were still using it. Detect some of the properties of - // pool, like is it using loop devices. - if poolExists && devices.thinPoolDevice == "" { - if err := devices.loadThinPoolLoopBackInfo(); err != nil { - logger.Debugf("Failed to load thin pool loopback device information:%v", err) - return err - } - } - - // If we didn't just create the data or metadata image, we need to - // load the transaction id and migrate old metadata - if !createdLoopback { - if err := devices.initMetaData(); err != nil { - return err - } - } - - if devices.thinPoolDevice == "" { - if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { - logger.Warn("Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man dockerd` to refer to dm.thinpooldev section.") - } - } - - // Right now this loads only NextDeviceID. If there is more metadata - // down the line, we might have to move it earlier. - if err := devices.loadDeviceSetMetaData(); err != nil { - return err - } - - // Setup the base image - if doInit { - if err := devices.setupBaseImage(); err != nil { - logger.Debugf("Error device setupBaseImage: %s", err) - return err - } - } - - return nil -} - -// AddDevice adds a device and registers in the hash. -func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { - logrus.WithField("storage-driver", "devicemapper").Debugf("AddDevice START(hash=%s basehash=%s)", hash, baseHash) - defer logrus.WithField("storage-driver", "devicemapper").Debugf("AddDevice END(hash=%s basehash=%s)", hash, baseHash) - - // If a deleted device exists, return error. - baseInfo, err := devices.lookupDeviceWithLock(baseHash) - if err != nil { - return err - } - - if baseInfo.Deleted { - return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) - } - - baseInfo.lock.Lock() - defer baseInfo.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - // Also include deleted devices in case hash of new device is - // same as one of the deleted devices. - if info, _ := devices.lookupDevice(hash); info != nil { - return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) - } - - size, err := devices.parseStorageOpt(storageOpt) - if err != nil { - return err - } - - if size == 0 { - size = baseInfo.Size - } - - if size < baseInfo.Size { - return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) - } - - if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { - return err - } - - // Grow the container rootfs. - if size > baseInfo.Size { - info, err := devices.lookupDevice(hash) - if err != nil { - return err - } - - if err := devices.growFS(info); err != nil { - return err - } - } - - return nil -} - -func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { - - // Read size to change the block device size per container. - for key, val := range storageOpt { - key := strings.ToLower(key) - switch key { - case "size": - size, err := units.RAMInBytes(val) - if err != nil { - return 0, err - } - return uint64(size), nil - default: - return 0, fmt.Errorf("Unknown option %s", key) - } - } - - return 0, nil -} - -func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { - // If device is already in deleted state, there is nothing to be done. - if info.Deleted { - return nil - } - - logrus.WithField("storage-driver", "devicemapper").Debugf("Marking device %s for deferred deletion.", info.Hash) - - info.Deleted = true - - // save device metadata to reflect deleted state. - if err := devices.saveMetadata(info); err != nil { - info.Deleted = false - return err - } - - devices.nrDeletedDevices++ - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { - if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { - logrus.WithField("storage-driver", "devicemapper").Debugf("Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) - return err - } - - defer devices.closeTransaction() - - err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) - if err != nil { - // If syncDelete is true, we want to return error. If deferred - // deletion is not enabled, we return an error. If error is - // something other then EBUSY, return an error. - if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy { - logrus.WithField("storage-driver", "devicemapper").Debugf("Error deleting device: %s", err) - return err - } - } - - if err == nil { - if err := devices.unregisterDevice(info.Hash); err != nil { - return err - } - // If device was already in deferred delete state that means - // deletion was being tried again later. Reduce the deleted - // device count. - if info.Deleted { - devices.nrDeletedDevices-- - } - devices.markDeviceIDFree(info.DeviceID) - } else { - if err := devices.markForDeferredDeletion(info); err != nil { - return err - } - } - - return nil -} - -// Issue discard only if device open count is zero. -func (devices *DeviceSet) issueDiscard(info *devInfo) error { - logger := logrus.WithField("storage-driver", "devicemapper") - logger.Debugf("issueDiscard START(device: %s).", info.Hash) - defer logger.Debugf("issueDiscard END(device: %s).", info.Hash) - // This is a workaround for the kernel not discarding block so - // on the thin pool when we remove a thinp device, so we do it - // manually. - // Even if device is deferred deleted, activate it and issue - // discards. - if err := devices.activateDeviceIfNeeded(info, true); err != nil { - return err - } - - devinfo, err := devicemapper.GetInfo(info.Name()) - if err != nil { - return err - } - - if devinfo.OpenCount != 0 { - logger.Debugf("Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) - return nil - } - - if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { - logger.Debugf("Error discarding block on device: %s (ignoring)", err) - } - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { - if devices.doBlkDiscard { - devices.issueDiscard(info) - } - - // Try to deactivate device in case it is active. - // If deferred removal is enabled and deferred deletion is disabled - // then make sure device is removed synchronously. There have been - // some cases of device being busy for short duration and we would - // rather busy wait for device removal to take care of these cases. - deferredRemove := devices.deferredRemove - if !devices.deferredDelete { - deferredRemove = false - } - - if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil { - logrus.WithField("storage-driver", "devicemapper").Debugf("Error deactivating device: %s", err) - return err - } - - return devices.deleteTransaction(info, syncDelete) -} - -// DeleteDevice will return success if device has been marked for deferred -// removal. If one wants to override that and want DeleteDevice() to fail if -// device was busy and could not be deleted, set syncDelete=true. -func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { - logrus.WithField("storage-driver", "devicemapper").Debugf("DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) - defer logrus.WithField("storage-driver", "devicemapper").Debugf("DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - return devices.deleteDevice(info, syncDelete) -} - -func (devices *DeviceSet) deactivatePool() error { - logrus.WithField("storage-driver", "devicemapper").Debug("deactivatePool() START") - defer logrus.WithField("storage-driver", "devicemapper").Debug("deactivatePool() END") - devname := devices.getPoolDevName() - - devinfo, err := devicemapper.GetInfo(devname) - if err != nil { - return err - } - - if devinfo.Exists == 0 { - return nil - } - if err := devicemapper.RemoveDevice(devname); err != nil { - return err - } - - if d, err := devicemapper.GetDeps(devname); err == nil { - logrus.WithField("storage-driver", "devicemapper").Warnf("device %s still has %d active dependents", devname, d.Count) - } - - return nil -} - -func (devices *DeviceSet) deactivateDevice(info *devInfo) error { - return devices.deactivateDeviceMode(info, devices.deferredRemove) -} - -func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error { - var err error - logrus.WithField("storage-driver", "devicemapper").Debugf("deactivateDevice START(%s)", info.Hash) - defer logrus.WithField("storage-driver", "devicemapper").Debugf("deactivateDevice END(%s)", info.Hash) - - devinfo, err := devicemapper.GetInfo(info.Name()) - if err != nil { - return err - } - - if devinfo.Exists == 0 { - return nil - } - - if deferredRemove { - err = devicemapper.RemoveDeviceDeferred(info.Name()) - } else { - err = devices.removeDevice(info.Name()) - } - - // This function's semantics is such that it does not return an - // error if device does not exist. So if device went away by - // the time we actually tried to remove it, do not return error. - if err != devicemapper.ErrEnxio { - return err - } - return nil -} - -// Issues the underlying dm remove operation. -func (devices *DeviceSet) removeDevice(devname string) error { - var err error - - logrus.WithField("storage-driver", "devicemapper").Debugf("removeDevice START(%s)", devname) - defer logrus.WithField("storage-driver", "devicemapper").Debugf("removeDevice END(%s)", devname) - - for i := 0; i < 200; i++ { - err = devicemapper.RemoveDevice(devname) - if err == nil { - break - } - if err != devicemapper.ErrBusy { - return err - } - - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(100 * time.Millisecond) - devices.Lock() - } - - return err -} - -func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { - if !devices.deferredRemove { - return nil - } - - logrus.WithField("storage-driver", "devicemapper").Debugf("cancelDeferredRemovalIfNeeded START(%s)", info.Name()) - defer logrus.WithField("storage-driver", "devicemapper").Debugf("cancelDeferredRemovalIfNeeded END(%s)", info.Name()) - - devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) - if err != nil { - return err - } - - if devinfo != nil && devinfo.DeferredRemove == 0 { - return nil - } - - // Cancel deferred remove - if err := devices.cancelDeferredRemoval(info); err != nil { - // If Error is ErrEnxio. Device is probably already gone. Continue. - if err != devicemapper.ErrEnxio { - return err - } - } - return nil -} - -func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { - logrus.WithField("storage-driver", "devicemapper").Debugf("cancelDeferredRemoval START(%s)", info.Name()) - defer logrus.WithField("storage-driver", "devicemapper").Debugf("cancelDeferredRemoval END(%s)", info.Name()) - - var err error - - // Cancel deferred remove - for i := 0; i < 100; i++ { - err = devicemapper.CancelDeferredRemove(info.Name()) - if err != nil { - if err == devicemapper.ErrBusy { - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(100 * time.Millisecond) - devices.Lock() - continue - } - } - break - } - return err -} - -func (devices *DeviceSet) unmountAndDeactivateAll(dir string) { - logger := logrus.WithField("storage-driver", "devicemapper") - - files, err := ioutil.ReadDir(dir) - if err != nil { - logger.Warnf("unmountAndDeactivate: %s", err) - return - } - - for _, d := range files { - if !d.IsDir() { - continue - } - - name := d.Name() - fullname := path.Join(dir, name) - - // We use MNT_DETACH here in case it is still busy in some running - // container. This means it'll go away from the global scope directly, - // and the device will be released when that container dies. - if err := unix.Unmount(fullname, unix.MNT_DETACH); err != nil && err != unix.EINVAL { - logger.Warnf("Shutdown unmounting %s, error: %s", fullname, err) - } - - if devInfo, err := devices.lookupDevice(name); err != nil { - logger.Debugf("Shutdown lookup device %s, error: %s", name, err) - } else { - if err := devices.deactivateDevice(devInfo); err != nil { - logger.Debugf("Shutdown deactivate %s, error: %s", devInfo.Hash, err) - } - } - } -} - -// Shutdown shuts down the device by unmounting the root. -func (devices *DeviceSet) Shutdown(home string) error { - logger := logrus.WithField("storage-driver", "devicemapper") - - logger.Debugf("[deviceset %s] Shutdown()", devices.devicePrefix) - logger.Debugf("Shutting down DeviceSet: %s", devices.root) - defer logger.Debugf("[deviceset %s] Shutdown() END", devices.devicePrefix) - - // Stop deletion worker. This should start delivering new events to - // ticker channel. That means no new instance of cleanupDeletedDevice() - // will run after this call. If one instance is already running at - // the time of the call, it must be holding devices.Lock() and - // we will block on this lock till cleanup function exits. - devices.deletionWorkerTicker.Stop() - - devices.Lock() - // Save DeviceSet Metadata first. Docker kills all threads if they - // don't finish in certain time. It is possible that Shutdown() - // routine does not finish in time as we loop trying to deactivate - // some devices while these are busy. In that case shutdown() routine - // will be killed and we will not get a chance to save deviceset - // metadata. Hence save this early before trying to deactivate devices. - devices.saveDeviceSetMetaData() - devices.unmountAndDeactivateAll(path.Join(home, "mnt")) - devices.Unlock() - - info, _ := devices.lookupDeviceWithLock("") - if info != nil { - info.lock.Lock() - devices.Lock() - if err := devices.deactivateDevice(info); err != nil { - logger.Debugf("Shutdown deactivate base , error: %s", err) - } - devices.Unlock() - info.lock.Unlock() - } - - devices.Lock() - if devices.thinPoolDevice == "" { - if err := devices.deactivatePool(); err != nil { - logger.Debugf("Shutdown deactivate pool , error: %s", err) - } - } - devices.Unlock() - - return nil -} - -// Recent XFS changes allow changing behavior of filesystem in case of errors. -// When thin pool gets full and XFS gets ENOSPC error, currently it tries -// IO infinitely and sometimes it can block the container process -// and process can't be killWith 0 value, XFS will not retry upon error -// and instead will shutdown filesystem. - -func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { - dmDevicePath, err := os.Readlink(info.DevName()) - if err != nil { - return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err) - } - - dmDeviceName := path.Base(dmDevicePath) - filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" - maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) - if err != nil { - return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err) - } - defer maxRetriesFile.Close() - - // Set max retries to 0 - _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) - if err != nil { - return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err) - } - return nil -} - -// MountDevice mounts the device if not already mounted. -func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - if info.Deleted { - return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) - } - - fstype, err := ProbeFsType(info.DevName()) - if err != nil { - return err - } - - options := "" - - if fstype == "xfs" { - // XFS needs nouuid or it can't mount filesystems with the same fs - options = joinMountOptions(options, "nouuid") - } - - options = joinMountOptions(options, devices.mountOptions) - options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) - - if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { - return fmt.Errorf("devmapper: Error mounting '%s' on '%s' (fstype='%s' options='%s'): %s\n%v", info.DevName(), path, fstype, options, err, string(dmesg.Dmesg(256))) - } - - if fstype == "xfs" && devices.xfsNospaceRetries != "" { - if err := devices.xfsSetNospaceRetries(info); err != nil { - unix.Unmount(path, unix.MNT_DETACH) - devices.deactivateDevice(info) - return err - } - } - - return nil -} - -// UnmountDevice unmounts the device and removes it from hash. -func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { - logger := logrus.WithField("storage-driver", "devicemapper") - - logger.Debugf("UnmountDevice START(hash=%s)", hash) - defer logger.Debugf("UnmountDevice END(hash=%s)", hash) - - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - logger.Debugf("Unmount(%s)", mountPath) - if err := unix.Unmount(mountPath, unix.MNT_DETACH); err != nil { - return err - } - logger.Debug("Unmount done") - - // Remove the mountpoint here. Removing the mountpoint (in newer kernels) - // will cause all other instances of this mount in other mount namespaces - // to be killed (this is an anti-DoS measure that is necessary for things - // like devicemapper). This is necessary to avoid cases where a libdm mount - // that is present in another namespace will cause subsequent RemoveDevice - // operations to fail. We ignore any errors here because this may fail on - // older kernels which don't have - // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. - if err := os.Remove(mountPath); err != nil { - logger.Debugf("error doing a remove on unmounted device %s: %v", mountPath, err) - } - - return devices.deactivateDevice(info) -} - -// HasDevice returns true if the device metadata exists. -func (devices *DeviceSet) HasDevice(hash string) bool { - info, _ := devices.lookupDeviceWithLock(hash) - return info != nil -} - -// List returns a list of device ids. -func (devices *DeviceSet) List() []string { - devices.Lock() - defer devices.Unlock() - - ids := make([]string, len(devices.Devices)) - i := 0 - for k := range devices.Devices { - ids[i] = k - i++ - } - return ids -} - -func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { - var params string - _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) - if err != nil { - return - } - if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { - return - } - return -} - -// GetDeviceStatus provides size, mapped sectors -func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return nil, err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - status := &DevStatus{ - DeviceID: info.DeviceID, - Size: info.Size, - TransactionID: info.TransactionID, - } - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) - } - - sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) - - if err != nil { - return nil, err - } - - status.SizeInSectors = sizeInSectors - status.MappedSectors = mappedSectors - status.HighestMappedSector = highestMappedSector - - return status, nil -} - -func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { - var params string - if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { - _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) - } - return -} - -// DataDevicePath returns the path to the data storage for this deviceset, -// regardless of loopback or block device -func (devices *DeviceSet) DataDevicePath() string { - return devices.dataDevice -} - -// MetadataDevicePath returns the path to the metadata storage for this deviceset, -// regardless of loopback or block device -func (devices *DeviceSet) MetadataDevicePath() string { - return devices.metadataDevice -} - -func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { - buf := new(unix.Statfs_t) - if err := unix.Statfs(loopFile, buf); err != nil { - logrus.WithField("storage-driver", "devicemapper").Warnf("Couldn't stat loopfile filesystem %v: %v", loopFile, err) - return 0, err - } - return buf.Bfree * uint64(buf.Bsize), nil -} - -func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { - if loopFile != "" { - fi, err := os.Stat(loopFile) - if err != nil { - logrus.WithField("storage-driver", "devicemapper").Warnf("Couldn't stat loopfile %v: %v", loopFile, err) - return false, err - } - return fi.Mode().IsRegular(), nil - } - return false, nil -} - -// Status returns the current status of this deviceset -func (devices *DeviceSet) Status() *Status { - devices.Lock() - defer devices.Unlock() - - status := &Status{} - - status.PoolName = devices.getPoolName() - status.DataFile = devices.DataDevicePath() - status.DataLoopback = devices.dataLoopFile - status.MetadataFile = devices.MetadataDevicePath() - status.MetadataLoopback = devices.metadataLoopFile - status.UdevSyncSupported = devicemapper.UdevSyncSupported() - status.DeferredRemoveEnabled = devices.deferredRemove - status.DeferredDeleteEnabled = devices.deferredDelete - status.DeferredDeletedDeviceCount = devices.nrDeletedDevices - status.BaseDeviceSize = devices.getBaseDeviceSize() - status.BaseDeviceFS = devices.getBaseDeviceFS() - - totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() - if err == nil { - // Convert from blocks to bytes - blockSizeInSectors := totalSizeInSectors / dataTotal - - status.Data.Used = dataUsed * blockSizeInSectors * 512 - status.Data.Total = dataTotal * blockSizeInSectors * 512 - status.Data.Available = status.Data.Total - status.Data.Used - - // metadata blocks are always 4k - status.Metadata.Used = metadataUsed * 4096 - status.Metadata.Total = metadataTotal * 4096 - status.Metadata.Available = status.Metadata.Total - status.Metadata.Used - - status.SectorSize = blockSizeInSectors * 512 - - if check, _ := devices.isRealFile(devices.dataLoopFile); check { - actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) - if err == nil && actualSpace < status.Data.Available { - status.Data.Available = actualSpace - } - } - - if check, _ := devices.isRealFile(devices.metadataLoopFile); check { - actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) - if err == nil && actualSpace < status.Metadata.Available { - status.Metadata.Available = actualSpace - } - } - - minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 - status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 - } - - return status -} - -// Status returns the current status of this deviceset -func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return nil, err - } - - info.lock.Lock() - defer info.lock.Unlock() - - metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} - return metadata, nil -} - -// NewDeviceSet creates the device set based on the options provided. -func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { - devicemapper.SetDevDir("/dev") - - devices := &DeviceSet{ - root: root, - metaData: metaData{Devices: make(map[string]*devInfo)}, - dataLoopbackSize: defaultDataLoopbackSize, - metaDataLoopbackSize: defaultMetaDataLoopbackSize, - baseFsSize: defaultBaseFsSize, - overrideUdevSyncCheck: defaultUdevSyncOverride, - doBlkDiscard: true, - thinpBlockSize: defaultThinpBlockSize, - deviceIDMap: make([]byte, deviceIDMapSz), - deletionWorkerTicker: time.NewTicker(time.Second * 30), - uidMaps: uidMaps, - gidMaps: gidMaps, - minFreeSpacePercent: defaultMinFreeSpacePercent, - } - - version, err := devicemapper.GetDriverVersion() - if err != nil { - // Can't even get driver version, assume not supported - return nil, graphdriver.ErrNotSupported - } - - if err := determineDriverCapabilities(version); err != nil { - return nil, graphdriver.ErrNotSupported - } - - if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport { - // enable deferred stuff by default - enableDeferredDeletion = true - enableDeferredRemoval = true - } - - foundBlkDiscard := false - var lvmSetupConfig directLVMConfig - for _, option := range options { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return nil, err - } - key = strings.ToLower(key) - switch key { - case "dm.basesize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - userBaseSize = true - devices.baseFsSize = uint64(size) - case "dm.loopdatasize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - devices.dataLoopbackSize = size - case "dm.loopmetadatasize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - devices.metaDataLoopbackSize = size - case "dm.fs": - if val != "ext4" && val != "xfs" { - return nil, fmt.Errorf("devmapper: Unsupported filesystem %s", val) - } - devices.filesystem = val - case "dm.mkfsarg": - devices.mkfsArgs = append(devices.mkfsArgs, val) - case "dm.mountopt": - devices.mountOptions = joinMountOptions(devices.mountOptions, val) - case "dm.metadatadev": - devices.metadataDevice = val - case "dm.datadev": - devices.dataDevice = val - case "dm.thinpooldev": - devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") - case "dm.blkdiscard": - foundBlkDiscard = true - devices.doBlkDiscard, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - case "dm.blocksize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - // convert to 512b sectors - devices.thinpBlockSize = uint32(size) >> 9 - case "dm.override_udev_sync_check": - devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.use_deferred_removal": - enableDeferredRemoval, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.use_deferred_deletion": - enableDeferredDeletion, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.min_free_space": - if !strings.HasSuffix(val, "%") { - return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") - } - - valstring := strings.TrimSuffix(val, "%") - minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) - if err != nil { - return nil, err - } - - if minFreeSpacePercent >= 100 { - return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) - } - - devices.minFreeSpacePercent = uint32(minFreeSpacePercent) - case "dm.xfs_nospace_max_retries": - _, err := strconv.ParseUint(val, 10, 64) - if err != nil { - return nil, err - } - devices.xfsNospaceRetries = val - case "dm.directlvm_device": - lvmSetupConfig.Device = val - case "dm.directlvm_device_force": - lvmSetupConfigForce, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - case "dm.thinp_percent": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val) - } - if per >= 100 { - return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") - } - lvmSetupConfig.ThinpPercent = per - case "dm.thinp_metapercent": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val) - } - if per >= 100 { - return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") - } - lvmSetupConfig.ThinpMetaPercent = per - case "dm.thinp_autoextend_percent": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val) - } - if per > 100 { - return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") - } - lvmSetupConfig.AutoExtendPercent = per - case "dm.thinp_autoextend_threshold": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val) - } - if per > 100 { - return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") - } - lvmSetupConfig.AutoExtendThreshold = per - case "dm.libdm_log_level": - level, err := strconv.ParseInt(val, 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val) - } - if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug { - return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) - } - // Register a new logging callback with the specified level. - devicemapper.LogInit(devicemapper.DefaultLogger{ - Level: int(level), - }) - default: - return nil, fmt.Errorf("devmapper: Unknown option %s", key) - } - } - - if err := validateLVMConfig(lvmSetupConfig); err != nil { - return nil, err - } - - devices.lvmSetupConfig = lvmSetupConfig - - // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive - if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { - devices.doBlkDiscard = false - } - - if err := devices.initDevmapper(doInit); err != nil { - return nil, err - } - - return devices, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go deleted file mode 100644 index 98ff5cf12..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/devmapper_doc.go +++ /dev/null @@ -1,106 +0,0 @@ -package devmapper // import "github.com/docker/docker/daemon/graphdriver/devmapper" - -// Definition of struct dm_task and sub structures (from lvm2) -// -// struct dm_ioctl { -// /* -// * The version number is made up of three parts: -// * major - no backward or forward compatibility, -// * minor - only backwards compatible, -// * patch - both backwards and forwards compatible. -// * -// * All clients of the ioctl interface should fill in the -// * version number of the interface that they were -// * compiled with. -// * -// * All recognized ioctl commands (ie. those that don't -// * return -ENOTTY) fill out this field, even if the -// * command failed. -// */ -// uint32_t version[3]; /* in/out */ -// uint32_t data_size; /* total size of data passed in -// * including this struct */ - -// uint32_t data_start; /* offset to start of data -// * relative to start of this struct */ - -// uint32_t target_count; /* in/out */ -// int32_t open_count; /* out */ -// uint32_t flags; /* in/out */ - -// /* -// * event_nr holds either the event number (input and output) or the -// * udev cookie value (input only). -// * The DM_DEV_WAIT ioctl takes an event number as input. -// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls -// * use the field as a cookie to return in the DM_COOKIE -// * variable with the uevents they issue. -// * For output, the ioctls return the event number, not the cookie. -// */ -// uint32_t event_nr; /* in/out */ -// uint32_t padding; - -// uint64_t dev; /* in/out */ - -// char name[DM_NAME_LEN]; /* device name */ -// char uuid[DM_UUID_LEN]; /* unique identifier for -// * the block device */ -// char data[7]; /* padding or data */ -// }; - -// struct target { -// uint64_t start; -// uint64_t length; -// char *type; -// char *params; - -// struct target *next; -// }; - -// typedef enum { -// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ -// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ -// } dm_add_node_t; - -// struct dm_task { -// int type; -// char *dev_name; -// char *mangled_dev_name; - -// struct target *head, *tail; - -// int read_only; -// uint32_t event_nr; -// int major; -// int minor; -// int allow_default_major_fallback; -// uid_t uid; -// gid_t gid; -// mode_t mode; -// uint32_t read_ahead; -// uint32_t read_ahead_flags; -// union { -// struct dm_ioctl *v4; -// } dmi; -// char *newname; -// char *message; -// char *geometry; -// uint64_t sector; -// int no_flush; -// int no_open_count; -// int skip_lockfs; -// int query_inactive_table; -// int suppress_identical_reload; -// dm_add_node_t add_node; -// uint64_t existing_table_size; -// int cookie_set; -// int new_uuid; -// int secure_data; -// int retry_remove; -// int enable_checks; -// int expected_errno; - -// char *uuid; -// char *mangled_uuid; -// }; -// diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go deleted file mode 100644 index df883de31..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/driver.go +++ /dev/null @@ -1,258 +0,0 @@ -// +build linux - -package devmapper // import "github.com/docker/docker/daemon/graphdriver/devmapper" - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "strconv" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/devicemapper" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/locker" - "github.com/docker/docker/pkg/mount" - "github.com/docker/go-units" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -func init() { - graphdriver.Register("devicemapper", Init) -} - -// Driver contains the device set mounted and the home directory -type Driver struct { - *DeviceSet - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter - locker *locker.Locker -} - -// Init creates a driver with the given home and the set of options. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps) - if err != nil { - return nil, err - } - - d := &Driver{ - DeviceSet: deviceSet, - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), - locker: locker.New(), - } - - return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil -} - -func (d *Driver) String() string { - return "devicemapper" -} - -// Status returns the status about the driver in a printable format. -// Information returned contains Pool Name, Data File, Metadata file, disk usage by -// the data and metadata, etc. -func (d *Driver) Status() [][2]string { - s := d.DeviceSet.Status() - - status := [][2]string{ - {"Pool Name", s.PoolName}, - {"Pool Blocksize", units.HumanSize(float64(s.SectorSize))}, - {"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))}, - {"Backing Filesystem", s.BaseDeviceFS}, - {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, - } - - if len(s.DataFile) > 0 { - status = append(status, [2]string{"Data file", s.DataFile}) - } - if len(s.MetadataFile) > 0 { - status = append(status, [2]string{"Metadata file", s.MetadataFile}) - } - if len(s.DataLoopback) > 0 { - status = append(status, [2]string{"Data loop file", s.DataLoopback}) - } - if len(s.MetadataLoopback) > 0 { - status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) - } - - status = append(status, [][2]string{ - {"Data Space Used", units.HumanSize(float64(s.Data.Used))}, - {"Data Space Total", units.HumanSize(float64(s.Data.Total))}, - {"Data Space Available", units.HumanSize(float64(s.Data.Available))}, - {"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))}, - {"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))}, - {"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))}, - {"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))}, - {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, - {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, - {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, - }...) - - if vStr, err := devicemapper.GetLibraryVersion(); err == nil { - status = append(status, [2]string{"Library Version", vStr}) - } - return status -} - -// GetMetadata returns a map of information about the device. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - m, err := d.DeviceSet.exportDeviceMetadata(id) - - if err != nil { - return nil, err - } - - metadata := make(map[string]string) - metadata["DeviceId"] = strconv.Itoa(m.deviceID) - metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) - metadata["DeviceName"] = m.deviceName - return metadata, nil -} - -// Cleanup unmounts a device. -func (d *Driver) Cleanup() error { - err := d.DeviceSet.Shutdown(d.home) - umountErr := mount.RecursiveUnmount(d.home) - - // in case we have two errors, prefer the one from Shutdown() - if err != nil { - return err - } - - if umountErr != nil { - return errors.Wrapf(umountErr, "error unmounting %s", d.home) - } - - return nil -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) -} - -// Create adds a device with a given id and the parent. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - var storageOpt map[string]string - if opts != nil { - storageOpt = opts.StorageOpt - } - return d.DeviceSet.AddDevice(id, parent, storageOpt) -} - -// Remove removes a device with a given id, unmounts the filesystem, and removes the mount point. -func (d *Driver) Remove(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - if !d.DeviceSet.HasDevice(id) { - // Consider removing a non-existing device a no-op - // This is useful to be able to progress on container removal - // if the underlying device has gone away due to earlier errors - return nil - } - - // This assumes the device has been properly Get/Put:ed and thus is unmounted - if err := d.DeviceSet.DeleteDevice(id, false); err != nil { - return fmt.Errorf("failed to remove device %s: %v", id, err) - } - - // Most probably the mount point is already removed on Put() - // (see DeviceSet.UnmountDevice()), but just in case it was not - // let's try to remove it here as well, ignoring errors as - // an older kernel can return EBUSY if e.g. the mount was leaked - // to other mount namespaces. A failure to remove the container's - // mount point is not important and should not be treated - // as a failure to remove the container. - mp := path.Join(d.home, "mnt", id) - err := unix.Rmdir(mp) - if err != nil && !os.IsNotExist(err) { - logrus.WithField("storage-driver", "devicemapper").Warnf("unable to remove mount point %q: %s", mp, err) - } - - return nil -} - -// Get mounts a device with given id into the root filesystem -func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) - mp := path.Join(d.home, "mnt", id) - rootFs := path.Join(mp, "rootfs") - if count := d.ctr.Increment(mp); count > 1 { - return containerfs.NewLocalContainerFS(rootFs), nil - } - - uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - d.ctr.Decrement(mp) - return nil, err - } - - // Create the target directories if they don't exist - if err := idtools.MkdirAllAndChown(path.Join(d.home, "mnt"), 0755, idtools.IDPair{UID: uid, GID: gid}); err != nil { - d.ctr.Decrement(mp) - return nil, err - } - if err := idtools.MkdirAndChown(mp, 0755, idtools.IDPair{UID: uid, GID: gid}); err != nil && !os.IsExist(err) { - d.ctr.Decrement(mp) - return nil, err - } - - // Mount the device - if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { - d.ctr.Decrement(mp) - return nil, err - } - - if err := idtools.MkdirAllAndChown(rootFs, 0755, idtools.IDPair{UID: uid, GID: gid}); err != nil { - d.ctr.Decrement(mp) - d.DeviceSet.UnmountDevice(id, mp) - return nil, err - } - - idFile := path.Join(mp, "id") - if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { - // Create an "id" file with the container/image id in it to help reconstruct this in case - // of later problems - if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { - d.ctr.Decrement(mp) - d.DeviceSet.UnmountDevice(id, mp) - return nil, err - } - } - - return containerfs.NewLocalContainerFS(rootFs), nil -} - -// Put unmounts a device and removes it. -func (d *Driver) Put(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - mp := path.Join(d.home, "mnt", id) - if count := d.ctr.Decrement(mp); count > 0 { - return nil - } - - err := d.DeviceSet.UnmountDevice(id, mp) - if err != nil { - logrus.WithField("storage-driver", "devicemapper").Errorf("Error unmounting device %s: %v", id, err) - } - - return err -} - -// Exists checks to see if the device exists. -func (d *Driver) Exists(id string) bool { - return d.DeviceSet.HasDevice(id) -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go b/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go deleted file mode 100644 index 78d05b079..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/devmapper/mount.go +++ /dev/null @@ -1,66 +0,0 @@ -// +build linux - -package devmapper // import "github.com/docker/docker/daemon/graphdriver/devmapper" - -import ( - "bytes" - "fmt" - "os" -) - -type probeData struct { - fsName string - magic string - offset uint64 -} - -// ProbeFsType returns the filesystem name for the given device id. -func ProbeFsType(device string) (string, error) { - probes := []probeData{ - {"btrfs", "_BHRfS_M", 0x10040}, - {"ext4", "\123\357", 0x438}, - {"xfs", "XFSB", 0}, - } - - maxLen := uint64(0) - for _, p := range probes { - l := p.offset + uint64(len(p.magic)) - if l > maxLen { - maxLen = l - } - } - - file, err := os.Open(device) - if err != nil { - return "", err - } - defer file.Close() - - buffer := make([]byte, maxLen) - l, err := file.Read(buffer) - if err != nil { - return "", err - } - - if uint64(l) != maxLen { - return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) - } - - for _, p := range probes { - if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { - return p.fsName, nil - } - } - - return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) -} - -func joinMountOptions(a, b string) string { - if a == "" { - return b - } - if b == "" { - return a - } - return a + "," + b -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go deleted file mode 100644 index a9e195739..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go +++ /dev/null @@ -1,307 +0,0 @@ -package graphdriver // import "github.com/docker/docker/daemon/graphdriver" - -import ( - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/plugingetter" -) - -// FsMagic unsigned id of the filesystem in use. -type FsMagic uint32 - -const ( - // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. - FsMagicUnsupported = FsMagic(0x00000000) -) - -var ( - // All registered drivers - drivers map[string]InitFunc -) - -//CreateOpts contains optional arguments for Create() and CreateReadWrite() -// methods. -type CreateOpts struct { - MountLabel string - StorageOpt map[string]string -} - -// InitFunc initializes the storage driver. -type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) - -// ProtoDriver defines the basic capabilities of a driver. -// This interface exists solely to be a minimum set of methods -// for client code which choose not to implement the entire Driver -// interface and use the NaiveDiffDriver wrapper constructor. -// -// Use of ProtoDriver directly by client code is not recommended. -type ProtoDriver interface { - // String returns a string representation of this driver. - String() string - // CreateReadWrite creates a new, empty filesystem layer that is ready - // to be used as the storage for a container. Additional options can - // be passed in opts. parent may be "" and opts may be nil. - CreateReadWrite(id, parent string, opts *CreateOpts) error - // Create creates a new, empty, filesystem layer with the - // specified id and parent and options passed in opts. Parent - // may be "" and opts may be nil. - Create(id, parent string, opts *CreateOpts) error - // Remove attempts to remove the filesystem layer with this id. - Remove(id string) error - // Get returns the mountpoint for the layered filesystem referred - // to by this id. You can optionally specify a mountLabel or "". - // Returns the absolute path to the mounted layered filesystem. - Get(id, mountLabel string) (fs containerfs.ContainerFS, err error) - // Put releases the system resources for the specified id, - // e.g, unmounting layered filesystem. - Put(id string) error - // Exists returns whether a filesystem layer with the specified - // ID exists on this driver. - Exists(id string) bool - // Status returns a set of key-value pairs which give low - // level diagnostic status about this driver. - Status() [][2]string - // Returns a set of key-value pairs which give low level information - // about the image/container driver is managing. - GetMetadata(id string) (map[string]string, error) - // Cleanup performs necessary tasks to release resources - // held by the driver, e.g., unmounting all layered filesystems - // known to this driver. - Cleanup() error -} - -// DiffDriver is the interface to use to implement graph diffs -type DiffDriver interface { - // Diff produces an archive of the changes between the specified - // layer and its parent layer which may be "". - Diff(id, parent string) (io.ReadCloser, error) - // Changes produces a list of changes between the specified layer - // and its parent layer. If parent is "", then all changes will be ADD changes. - Changes(id, parent string) ([]archive.Change, error) - // ApplyDiff extracts the changeset from the given diff into the - // layer with the specified id and parent, returning the size of the - // new layer in bytes. - // The archive.Reader must be an uncompressed stream. - ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) - // DiffSize calculates the changes between the specified id - // and its parent and returns the size in bytes of the changes - // relative to its base filesystem directory. - DiffSize(id, parent string) (size int64, err error) -} - -// Driver is the interface for layered/snapshot file system drivers. -type Driver interface { - ProtoDriver - DiffDriver -} - -// Capabilities defines a list of capabilities a driver may implement. -// These capabilities are not required; however, they do determine how a -// graphdriver can be used. -type Capabilities struct { - // Flags that this driver is capable of reproducing exactly equivalent - // diffs for read-only layers. If set, clients can rely on the driver - // for consistent tar streams, and avoid extra processing to account - // for potential differences (eg: the layer store's use of tar-split). - ReproducesExactDiffs bool -} - -// CapabilityDriver is the interface for layered file system drivers that -// can report on their Capabilities. -type CapabilityDriver interface { - Capabilities() Capabilities -} - -// DiffGetterDriver is the interface for layered file system drivers that -// provide a specialized function for getting file contents for tar-split. -type DiffGetterDriver interface { - Driver - // DiffGetter returns an interface to efficiently retrieve the contents - // of files in a layer. - DiffGetter(id string) (FileGetCloser, error) -} - -// FileGetCloser extends the storage.FileGetter interface with a Close method -// for cleaning up. -type FileGetCloser interface { - storage.FileGetter - // Close cleans up any resources associated with the FileGetCloser. - Close() error -} - -// Checker makes checks on specified filesystems. -type Checker interface { - // IsMounted returns true if the provided path is mounted for the specific checker - IsMounted(path string) bool -} - -func init() { - drivers = make(map[string]InitFunc) -} - -// Register registers an InitFunc for the driver. -func Register(name string, initFunc InitFunc) error { - if _, exists := drivers[name]; exists { - return fmt.Errorf("Name already registered %s", name) - } - drivers[name] = initFunc - - return nil -} - -// GetDriver initializes and returns the registered driver -func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { - if initFunc, exists := drivers[name]; exists { - return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) - } - - pluginDriver, err := lookupPlugin(name, pg, config) - if err == nil { - return pluginDriver, nil - } - logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") - return nil, ErrNotSupported -} - -// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins -func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { - if initFunc, exists := drivers[name]; exists { - return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) - } - logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) - return nil, ErrNotSupported -} - -// Options is used to initialize a graphdriver -type Options struct { - Root string - DriverOptions []string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - ExperimentalEnabled bool -} - -// New creates the driver and initializes it at the specified root. -func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { - if name != "" { - logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver - return GetDriver(name, pg, config) - } - - // Guess for prior driver - driversMap := scanPriorDrivers(config.Root) - list := strings.Split(priority, ",") - logrus.Debugf("[graphdriver] priority list: %v", list) - for _, name := range list { - if name == "vfs" { - // don't use vfs even if there is state present. - continue - } - if _, prior := driversMap[name]; prior { - // of the state found from prior drivers, check in order of our priority - // which we would prefer - driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) - if err != nil { - // unlike below, we will return error here, because there is prior - // state, and now it is no longer supported/prereq/compatible, so - // something changed and needs attention. Otherwise the daemon's - // images would just "disappear". - logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) - return nil, err - } - - // abort starting when there are other prior configured drivers - // to ensure the user explicitly selects the driver to load - if len(driversMap)-1 > 0 { - var driversSlice []string - for name := range driversMap { - driversSlice = append(driversSlice, name) - } - - return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) - } - - logrus.Infof("[graphdriver] using prior storage driver: %s", name) - return driver, nil - } - } - - // Check for priority drivers first - for _, name := range list { - driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) - if err != nil { - if IsDriverNotSupported(err) { - continue - } - return nil, err - } - return driver, nil - } - - // Check all registered drivers if no priority driver is found - for name, initFunc := range drivers { - driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) - if err != nil { - if IsDriverNotSupported(err) { - continue - } - return nil, err - } - return driver, nil - } - return nil, fmt.Errorf("No supported storage backend found") -} - -// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers -func scanPriorDrivers(root string) map[string]bool { - driversMap := make(map[string]bool) - - for driver := range drivers { - p := filepath.Join(root, driver) - if _, err := os.Stat(p); err == nil && driver != "vfs" { - if !isEmptyDir(p) { - driversMap[driver] = true - } - } - } - return driversMap -} - -// IsInitialized checks if the driver's home-directory exists and is non-empty. -func IsInitialized(driverHome string) bool { - _, err := os.Stat(driverHome) - if os.IsNotExist(err) { - return false - } - if err != nil { - logrus.Warnf("graphdriver.IsInitialized: stat failed: %v", err) - } - return !isEmptyDir(driverHome) -} - -// isEmptyDir checks if a directory is empty. It is used to check if prior -// storage-driver directories exist. If an error occurs, it also assumes the -// directory is not empty (which preserves the behavior _before_ this check -// was added) -func isEmptyDir(name string) bool { - f, err := os.Open(name) - if err != nil { - return false - } - defer f.Close() - - if _, err = f.Readdirnames(1); err == io.EOF { - return true - } - return false -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go deleted file mode 100644 index cd83c4e21..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go +++ /dev/null @@ -1,21 +0,0 @@ -package graphdriver // import "github.com/docker/docker/daemon/graphdriver" - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -var ( - // List of drivers that should be used in an order - priority = "zfs" -) - -// Mounted checks if the given path is mounted as the fs type -func Mounted(fsType FsMagic, mountPath string) (bool, error) { - var buf unix.Statfs_t - if err := syscall.Statfs(mountPath, &buf); err != nil { - return false, err - } - return FsMagic(buf.Type) == fsType, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go deleted file mode 100644 index 61c6b24a9..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go +++ /dev/null @@ -1,124 +0,0 @@ -package graphdriver // import "github.com/docker/docker/daemon/graphdriver" - -import ( - "github.com/docker/docker/pkg/mount" - "golang.org/x/sys/unix" -) - -const ( - // FsMagicAufs filesystem id for Aufs - FsMagicAufs = FsMagic(0x61756673) - // FsMagicBtrfs filesystem id for Btrfs - FsMagicBtrfs = FsMagic(0x9123683E) - // FsMagicCramfs filesystem id for Cramfs - FsMagicCramfs = FsMagic(0x28cd3d45) - // FsMagicEcryptfs filesystem id for eCryptfs - FsMagicEcryptfs = FsMagic(0xf15f) - // FsMagicExtfs filesystem id for Extfs - FsMagicExtfs = FsMagic(0x0000EF53) - // FsMagicF2fs filesystem id for F2fs - FsMagicF2fs = FsMagic(0xF2F52010) - // FsMagicGPFS filesystem id for GPFS - FsMagicGPFS = FsMagic(0x47504653) - // FsMagicJffs2Fs filesystem if for Jffs2Fs - FsMagicJffs2Fs = FsMagic(0x000072b6) - // FsMagicJfs filesystem id for Jfs - FsMagicJfs = FsMagic(0x3153464a) - // FsMagicNfsFs filesystem id for NfsFs - FsMagicNfsFs = FsMagic(0x00006969) - // FsMagicRAMFs filesystem id for RamFs - FsMagicRAMFs = FsMagic(0x858458f6) - // FsMagicReiserFs filesystem id for ReiserFs - FsMagicReiserFs = FsMagic(0x52654973) - // FsMagicSmbFs filesystem id for SmbFs - FsMagicSmbFs = FsMagic(0x0000517B) - // FsMagicSquashFs filesystem id for SquashFs - FsMagicSquashFs = FsMagic(0x73717368) - // FsMagicTmpFs filesystem id for TmpFs - FsMagicTmpFs = FsMagic(0x01021994) - // FsMagicVxFS filesystem id for VxFs - FsMagicVxFS = FsMagic(0xa501fcf5) - // FsMagicXfs filesystem id for Xfs - FsMagicXfs = FsMagic(0x58465342) - // FsMagicZfs filesystem id for Zfs - FsMagicZfs = FsMagic(0x2fc12fc1) - // FsMagicOverlay filesystem id for overlay - FsMagicOverlay = FsMagic(0x794C7630) -) - -var ( - // List of drivers that should be used in an order - priority = "btrfs,zfs,overlay2,aufs,overlay,devicemapper,vfs" - - // FsNames maps filesystem id to name of the filesystem. - FsNames = map[FsMagic]string{ - FsMagicAufs: "aufs", - FsMagicBtrfs: "btrfs", - FsMagicCramfs: "cramfs", - FsMagicEcryptfs: "ecryptfs", - FsMagicExtfs: "extfs", - FsMagicF2fs: "f2fs", - FsMagicGPFS: "gpfs", - FsMagicJffs2Fs: "jffs2", - FsMagicJfs: "jfs", - FsMagicNfsFs: "nfs", - FsMagicOverlay: "overlayfs", - FsMagicRAMFs: "ramfs", - FsMagicReiserFs: "reiserfs", - FsMagicSmbFs: "smb", - FsMagicSquashFs: "squashfs", - FsMagicTmpFs: "tmpfs", - FsMagicUnsupported: "unsupported", - FsMagicVxFS: "vxfs", - FsMagicXfs: "xfs", - FsMagicZfs: "zfs", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - var buf unix.Statfs_t - if err := unix.Statfs(rootpath, &buf); err != nil { - return 0, err - } - return FsMagic(buf.Type), nil -} - -// NewFsChecker returns a checker configured for the provided FsMagic -func NewFsChecker(t FsMagic) Checker { - return &fsChecker{ - t: t, - } -} - -type fsChecker struct { - t FsMagic -} - -func (c *fsChecker) IsMounted(path string) bool { - m, _ := Mounted(c.t, path) - return m -} - -// NewDefaultChecker returns a check that parses /proc/mountinfo to check -// if the specified path is mounted. -func NewDefaultChecker() Checker { - return &defaultChecker{} -} - -type defaultChecker struct { -} - -func (c *defaultChecker) IsMounted(path string) bool { - m, _ := mount.Mounted(path) - return m -} - -// Mounted checks if the given path is mounted as the fs type -func Mounted(fsType FsMagic, mountPath string) (bool, error) { - var buf unix.Statfs_t - if err := unix.Statfs(mountPath, &buf); err != nil { - return false, err - } - return FsMagic(buf.Type) == fsType, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go deleted file mode 100644 index 1f2e8f071..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux,!windows,!freebsd - -package graphdriver // import "github.com/docker/docker/daemon/graphdriver" - -var ( - // List of drivers that should be used in an order - priority = "unsupported" -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - return FsMagicUnsupported, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go deleted file mode 100644 index 856b575e7..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -package graphdriver // import "github.com/docker/docker/daemon/graphdriver" - -var ( - // List of drivers that should be used in order - priority = "windowsfilter" -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - // Note it is OK to return FsMagicUnsupported on Windows. - return FsMagicUnsupported, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/errors.go b/vendor/github.com/docker/docker/daemon/graphdriver/errors.go deleted file mode 100644 index 96d354455..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/errors.go +++ /dev/null @@ -1,36 +0,0 @@ -package graphdriver // import "github.com/docker/docker/daemon/graphdriver" - -const ( - // ErrNotSupported returned when driver is not supported. - ErrNotSupported NotSupportedError = "driver not supported" - // ErrPrerequisites returned when driver does not meet prerequisites. - ErrPrerequisites NotSupportedError = "prerequisites for driver not satisfied (wrong filesystem?)" - // ErrIncompatibleFS returned when file system is not supported. - ErrIncompatibleFS NotSupportedError = "backing file system is unsupported for this graph driver" -) - -// ErrUnSupported signals that the graph-driver is not supported on the current configuration -type ErrUnSupported interface { - NotSupported() -} - -// NotSupportedError signals that the graph-driver is not supported on the current configuration -type NotSupportedError string - -func (e NotSupportedError) Error() string { - return string(e) -} - -// NotSupported signals that a graph-driver is not supported. -func (e NotSupportedError) NotSupported() {} - -// IsDriverNotSupported returns true if the error initializing -// the graph driver is a non-supported error. -func IsDriverNotSupported(err error) bool { - switch err.(type) { - case ErrUnSupported: - return true - default: - return false - } -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go deleted file mode 100644 index e1f368508..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go +++ /dev/null @@ -1,175 +0,0 @@ -package graphdriver // import "github.com/docker/docker/daemon/graphdriver" - -import ( - "io" - "time" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/sirupsen/logrus" -) - -var ( - // ApplyUncompressedLayer defines the unpack method used by the graph - // driver. - ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer -) - -// NaiveDiffDriver takes a ProtoDriver and adds the -// capability of the Diffing methods on the local file system, -// which it may or may not support on its own. See the comment -// on the exported NewNaiveDiffDriver function below. -// Notably, the AUFS driver doesn't need to be wrapped like this. -type NaiveDiffDriver struct { - ProtoDriver - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap -} - -// NewNaiveDiffDriver returns a fully functional driver that wraps the -// given ProtoDriver and adds the capability of the following methods which -// it may or may not support on its own: -// Diff(id, parent string) (archive.Archive, error) -// Changes(id, parent string) ([]archive.Change, error) -// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) -// DiffSize(id, parent string) (size int64, err error) -func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { - return &NaiveDiffDriver{ProtoDriver: driver, - uidMaps: uidMaps, - gidMaps: gidMaps} -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { - startTime := time.Now() - driver := gdw.ProtoDriver - - layerRootFs, err := driver.Get(id, "") - if err != nil { - return nil, err - } - layerFs := layerRootFs.Path() - - defer func() { - if err != nil { - driver.Put(id) - } - }() - - if parent == "" { - archive, err := archive.Tar(layerFs, archive.Uncompressed) - if err != nil { - return nil, err - } - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - driver.Put(id) - return err - }), nil - } - - parentRootFs, err := driver.Get(parent, "") - if err != nil { - return nil, err - } - defer driver.Put(parent) - - parentFs := parentRootFs.Path() - - changes, err := archive.ChangesDirs(layerFs, parentFs) - if err != nil { - return nil, err - } - - archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) - if err != nil { - return nil, err - } - - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - driver.Put(id) - - // NaiveDiffDriver compares file metadata with parent layers. Parent layers - // are extracted from tar's with full second precision on modified time. - // We need this hack here to make sure calls within same second receive - // correct result. - time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second))) - return err - }), nil -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { - driver := gdw.ProtoDriver - - layerRootFs, err := driver.Get(id, "") - if err != nil { - return nil, err - } - defer driver.Put(id) - - layerFs := layerRootFs.Path() - parentFs := "" - - if parent != "" { - parentRootFs, err := driver.Get(parent, "") - if err != nil { - return nil, err - } - defer driver.Put(parent) - parentFs = parentRootFs.Path() - } - - return archive.ChangesDirs(layerFs, parentFs) -} - -// ApplyDiff extracts the changeset from the given diff into the -// layer with the specified id and parent, returning the size of the -// new layer in bytes. -func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { - driver := gdw.ProtoDriver - - // Mount the root filesystem so we can apply the diff/layer. - layerRootFs, err := driver.Get(id, "") - if err != nil { - return - } - defer driver.Put(id) - - layerFs := layerRootFs.Path() - options := &archive.TarOptions{UIDMaps: gdw.uidMaps, - GIDMaps: gdw.gidMaps} - start := time.Now().UTC() - logrus.Debug("Start untar layer") - if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { - return - } - logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) - - return -} - -// DiffSize calculates the changes between the specified layer -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { - driver := gdw.ProtoDriver - - changes, err := gdw.Changes(id, parent) - if err != nil { - return - } - - layerFs, err := driver.Get(id, "") - if err != nil { - return - } - defer driver.Put(id) - - return archive.ChangesSize(layerFs.Path(), changes), nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go deleted file mode 100644 index 1b221dabe..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphbench_unix.go +++ /dev/null @@ -1,257 +0,0 @@ -// +build linux freebsd - -package graphtest // import "github.com/docker/docker/daemon/graphdriver/graphtest" - -import ( - "io" - "io/ioutil" - "testing" - - contdriver "github.com/containerd/continuity/driver" - "github.com/docker/docker/pkg/stringid" - "github.com/gotestyourself/gotestyourself/assert" -) - -// DriverBenchExists benchmarks calls to exist -func DriverBenchExists(b *testing.B, drivername string, driveroptions ...string) { - driver := GetDriver(b, drivername, driveroptions...) - defer PutDriver(b) - - base := stringid.GenerateRandomID() - - if err := driver.Create(base, "", nil); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - if !driver.Exists(base) { - b.Fatal("Newly created image doesn't exist") - } - } -} - -// DriverBenchGetEmpty benchmarks calls to get on an empty layer -func DriverBenchGetEmpty(b *testing.B, drivername string, driveroptions ...string) { - driver := GetDriver(b, drivername, driveroptions...) - defer PutDriver(b) - - base := stringid.GenerateRandomID() - - if err := driver.Create(base, "", nil); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := driver.Get(base, "") - b.StopTimer() - if err != nil { - b.Fatalf("Error getting mount: %s", err) - } - if err := driver.Put(base); err != nil { - b.Fatalf("Error putting mount: %s", err) - } - b.StartTimer() - } -} - -// DriverBenchDiffBase benchmarks calls to diff on a root layer -func DriverBenchDiffBase(b *testing.B, drivername string, driveroptions ...string) { - driver := GetDriver(b, drivername, driveroptions...) - defer PutDriver(b) - - base := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { - b.Fatal(err) - } - - if err := addFiles(driver, base, 3); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - arch, err := driver.Diff(base, "") - if err != nil { - b.Fatal(err) - } - _, err = io.Copy(ioutil.Discard, arch) - if err != nil { - b.Fatalf("Error copying archive: %s", err) - } - arch.Close() - } -} - -// DriverBenchDiffN benchmarks calls to diff on two layers with -// a provided number of files on the lower and upper layers. -func DriverBenchDiffN(b *testing.B, bottom, top int, drivername string, driveroptions ...string) { - driver := GetDriver(b, drivername, driveroptions...) - defer PutDriver(b) - base := stringid.GenerateRandomID() - upper := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { - b.Fatal(err) - } - - if err := addManyFiles(driver, base, bottom, 3); err != nil { - b.Fatal(err) - } - - if err := driver.Create(upper, base, nil); err != nil { - b.Fatal(err) - } - - if err := addManyFiles(driver, upper, top, 6); err != nil { - b.Fatal(err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - arch, err := driver.Diff(upper, "") - if err != nil { - b.Fatal(err) - } - _, err = io.Copy(ioutil.Discard, arch) - if err != nil { - b.Fatalf("Error copying archive: %s", err) - } - arch.Close() - } -} - -// DriverBenchDiffApplyN benchmarks calls to diff and apply together -func DriverBenchDiffApplyN(b *testing.B, fileCount int, drivername string, driveroptions ...string) { - driver := GetDriver(b, drivername, driveroptions...) - defer PutDriver(b) - base := stringid.GenerateRandomID() - upper := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { - b.Fatal(err) - } - - if err := addManyFiles(driver, base, fileCount, 3); err != nil { - b.Fatal(err) - } - - if err := driver.Create(upper, base, nil); err != nil { - b.Fatal(err) - } - - if err := addManyFiles(driver, upper, fileCount, 6); err != nil { - b.Fatal(err) - } - diffSize, err := driver.DiffSize(upper, "") - if err != nil { - b.Fatal(err) - } - b.ResetTimer() - b.StopTimer() - for i := 0; i < b.N; i++ { - diff := stringid.GenerateRandomID() - if err := driver.Create(diff, base, nil); err != nil { - b.Fatal(err) - } - - if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { - b.Fatal(err) - } - - b.StartTimer() - - arch, err := driver.Diff(upper, "") - if err != nil { - b.Fatal(err) - } - - applyDiffSize, err := driver.ApplyDiff(diff, "", arch) - if err != nil { - b.Fatal(err) - } - - b.StopTimer() - arch.Close() - - if applyDiffSize != diffSize { - // TODO: enforce this - //b.Fatalf("Apply diff size different, got %d, expected %s", applyDiffSize, diffSize) - } - if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { - b.Fatal(err) - } - } -} - -// DriverBenchDeepLayerDiff benchmarks calls to diff on top of a given number of layers. -func DriverBenchDeepLayerDiff(b *testing.B, layerCount int, drivername string, driveroptions ...string) { - driver := GetDriver(b, drivername, driveroptions...) - defer PutDriver(b) - - base := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { - b.Fatal(err) - } - - if err := addFiles(driver, base, 50); err != nil { - b.Fatal(err) - } - - topLayer, err := addManyLayers(driver, base, layerCount) - if err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - arch, err := driver.Diff(topLayer, "") - if err != nil { - b.Fatal(err) - } - _, err = io.Copy(ioutil.Discard, arch) - if err != nil { - b.Fatalf("Error copying archive: %s", err) - } - arch.Close() - } -} - -// DriverBenchDeepLayerRead benchmarks calls to read a file under a given number of layers. -func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, driveroptions ...string) { - driver := GetDriver(b, drivername, driveroptions...) - defer PutDriver(b) - - base := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { - b.Fatal(err) - } - - content := []byte("test content") - if err := addFile(driver, base, "testfile.txt", content); err != nil { - b.Fatal(err) - } - - topLayer, err := addManyLayers(driver, base, layerCount) - if err != nil { - b.Fatal(err) - } - - root, err := driver.Get(topLayer, "") - if err != nil { - b.Fatal(err) - } - defer driver.Put(topLayer) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - - // Read content - c, err := contdriver.ReadFile(root, root.Join(root.Path(), "testfile.txt")) - if err != nil { - b.Fatal(err) - } - - b.StopTimer() - assert.DeepEqual(b, content, c) - b.StartTimer() - } -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go deleted file mode 100644 index 5ac397975..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_unix.go +++ /dev/null @@ -1,352 +0,0 @@ -// +build linux freebsd - -package graphtest // import "github.com/docker/docker/daemon/graphdriver/graphtest" - -import ( - "bytes" - "io/ioutil" - "math/rand" - "os" - "path" - "reflect" - "testing" - "unsafe" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/daemon/graphdriver/quota" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/go-units" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/sys/unix" -) - -var ( - drv *Driver -) - -// Driver conforms to graphdriver.Driver interface and -// contains information such as root and reference count of the number of clients using it. -// This helps in testing drivers added into the framework. -type Driver struct { - graphdriver.Driver - root string - refCount int -} - -func newDriver(t testing.TB, name string, options []string) *Driver { - root, err := ioutil.TempDir("", "docker-graphtest-") - assert.NilError(t, err) - - assert.NilError(t, os.MkdirAll(root, 0755)) - d, err := graphdriver.GetDriver(name, nil, graphdriver.Options{DriverOptions: options, Root: root}) - if err != nil { - t.Logf("graphdriver: %v\n", err) - if graphdriver.IsDriverNotSupported(err) { - t.Skipf("Driver %s not supported", name) - } - t.Fatal(err) - } - return &Driver{d, root, 1} -} - -func cleanup(t testing.TB, d *Driver) { - if err := drv.Cleanup(); err != nil { - t.Fatal(err) - } - os.RemoveAll(d.root) -} - -// GetDriver create a new driver with given name or return an existing driver with the name updating the reference count. -func GetDriver(t testing.TB, name string, options ...string) graphdriver.Driver { - if drv == nil { - drv = newDriver(t, name, options) - } else { - drv.refCount++ - } - return drv -} - -// PutDriver removes the driver if it is no longer used and updates the reference count. -func PutDriver(t testing.TB) { - if drv == nil { - t.Skip("No driver to put!") - } - drv.refCount-- - if drv.refCount == 0 { - cleanup(t, drv) - drv = nil - } -} - -// DriverTestCreateEmpty creates a new image and verifies it is empty and the right metadata -func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...string) { - driver := GetDriver(t, drivername, driverOptions...) - defer PutDriver(t) - - err := driver.Create("empty", "", nil) - assert.NilError(t, err) - - defer func() { - assert.NilError(t, driver.Remove("empty")) - }() - - if !driver.Exists("empty") { - t.Fatal("Newly created image doesn't exist") - } - - dir, err := driver.Get("empty", "") - assert.NilError(t, err) - - verifyFile(t, dir.Path(), 0755|os.ModeDir, 0, 0) - - // Verify that the directory is empty - fis, err := readDir(dir, dir.Path()) - assert.NilError(t, err) - assert.Check(t, is.Len(fis, 0)) - - driver.Put("empty") -} - -// DriverTestCreateBase create a base driver and verify. -func DriverTestCreateBase(t testing.TB, drivername string, driverOptions ...string) { - driver := GetDriver(t, drivername, driverOptions...) - defer PutDriver(t) - - createBase(t, driver, "Base") - defer func() { - assert.NilError(t, driver.Remove("Base")) - }() - verifyBase(t, driver, "Base") -} - -// DriverTestCreateSnap Create a driver and snap and verify. -func DriverTestCreateSnap(t testing.TB, drivername string, driverOptions ...string) { - driver := GetDriver(t, drivername, driverOptions...) - defer PutDriver(t) - - createBase(t, driver, "Base") - defer func() { - assert.NilError(t, driver.Remove("Base")) - }() - - err := driver.Create("Snap", "Base", nil) - assert.NilError(t, err) - defer func() { - assert.NilError(t, driver.Remove("Snap")) - }() - - verifyBase(t, driver, "Snap") -} - -// DriverTestDeepLayerRead reads a file from a lower layer under a given number of layers -func DriverTestDeepLayerRead(t testing.TB, layerCount int, drivername string, driverOptions ...string) { - driver := GetDriver(t, drivername, driverOptions...) - defer PutDriver(t) - - base := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { - t.Fatal(err) - } - - content := []byte("test content") - if err := addFile(driver, base, "testfile.txt", content); err != nil { - t.Fatal(err) - } - - topLayer, err := addManyLayers(driver, base, layerCount) - if err != nil { - t.Fatal(err) - } - - err = checkManyLayers(driver, topLayer, layerCount) - if err != nil { - t.Fatal(err) - } - - if err := checkFile(driver, topLayer, "testfile.txt", content); err != nil { - t.Fatal(err) - } -} - -// DriverTestDiffApply tests diffing and applying produces the same layer -func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverOptions ...string) { - driver := GetDriver(t, drivername, driverOptions...) - defer PutDriver(t) - base := stringid.GenerateRandomID() - upper := stringid.GenerateRandomID() - deleteFile := "file-remove.txt" - deleteFileContent := []byte("This file should get removed in upper!") - deleteDir := "var/lib" - - if err := driver.Create(base, "", nil); err != nil { - t.Fatal(err) - } - - if err := addManyFiles(driver, base, fileCount, 3); err != nil { - t.Fatal(err) - } - - if err := addFile(driver, base, deleteFile, deleteFileContent); err != nil { - t.Fatal(err) - } - - if err := addDirectory(driver, base, deleteDir); err != nil { - t.Fatal(err) - } - - if err := driver.Create(upper, base, nil); err != nil { - t.Fatal(err) - } - - if err := addManyFiles(driver, upper, fileCount, 6); err != nil { - t.Fatal(err) - } - - if err := removeAll(driver, upper, deleteFile, deleteDir); err != nil { - t.Fatal(err) - } - - diffSize, err := driver.DiffSize(upper, "") - if err != nil { - t.Fatal(err) - } - - diff := stringid.GenerateRandomID() - if err := driver.Create(diff, base, nil); err != nil { - t.Fatal(err) - } - - if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { - t.Fatal(err) - } - - if err := checkFile(driver, diff, deleteFile, deleteFileContent); err != nil { - t.Fatal(err) - } - - arch, err := driver.Diff(upper, base) - if err != nil { - t.Fatal(err) - } - - buf := bytes.NewBuffer(nil) - if _, err := buf.ReadFrom(arch); err != nil { - t.Fatal(err) - } - if err := arch.Close(); err != nil { - t.Fatal(err) - } - - applyDiffSize, err := driver.ApplyDiff(diff, base, bytes.NewReader(buf.Bytes())) - if err != nil { - t.Fatal(err) - } - - if applyDiffSize != diffSize { - t.Fatalf("Apply diff size different, got %d, expected %d", applyDiffSize, diffSize) - } - - if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { - t.Fatal(err) - } - - if err := checkFileRemoved(driver, diff, deleteFile); err != nil { - t.Fatal(err) - } - - if err := checkFileRemoved(driver, diff, deleteDir); err != nil { - t.Fatal(err) - } -} - -// DriverTestChanges tests computed changes on a layer matches changes made -func DriverTestChanges(t testing.TB, drivername string, driverOptions ...string) { - driver := GetDriver(t, drivername, driverOptions...) - defer PutDriver(t) - base := stringid.GenerateRandomID() - upper := stringid.GenerateRandomID() - if err := driver.Create(base, "", nil); err != nil { - t.Fatal(err) - } - - if err := addManyFiles(driver, base, 20, 3); err != nil { - t.Fatal(err) - } - - if err := driver.Create(upper, base, nil); err != nil { - t.Fatal(err) - } - - expectedChanges, err := changeManyFiles(driver, upper, 20, 6) - if err != nil { - t.Fatal(err) - } - - changes, err := driver.Changes(upper, base) - if err != nil { - t.Fatal(err) - } - - if err = checkChanges(expectedChanges, changes); err != nil { - t.Fatal(err) - } -} - -func writeRandomFile(path string, size uint64) error { - buf := make([]int64, size/8) - - r := rand.NewSource(0) - for i := range buf { - buf[i] = r.Int63() - } - - // Cast to []byte - header := *(*reflect.SliceHeader)(unsafe.Pointer(&buf)) - header.Len *= 8 - header.Cap *= 8 - data := *(*[]byte)(unsafe.Pointer(&header)) - - return ioutil.WriteFile(path, data, 0700) -} - -// DriverTestSetQuota Create a driver and test setting quota. -func DriverTestSetQuota(t *testing.T, drivername string, required bool) { - driver := GetDriver(t, drivername) - defer PutDriver(t) - - createBase(t, driver, "Base") - createOpts := &graphdriver.CreateOpts{} - createOpts.StorageOpt = make(map[string]string, 1) - createOpts.StorageOpt["size"] = "50M" - layerName := drivername + "Test" - if err := driver.CreateReadWrite(layerName, "Base", createOpts); err == quota.ErrQuotaNotSupported && !required { - t.Skipf("Quota not supported on underlying filesystem: %v", err) - } else if err != nil { - t.Fatal(err) - } - - mountPath, err := driver.Get(layerName, "") - if err != nil { - t.Fatal(err) - } - - quota := uint64(50 * units.MiB) - - // Try to write a file smaller than quota, and ensure it works - err = writeRandomFile(path.Join(mountPath.Path(), "smallfile"), quota/2) - if err != nil { - t.Fatal(err) - } - defer os.Remove(path.Join(mountPath.Path(), "smallfile")) - - // Try to write a file bigger than quota. We've already filled up half the quota, so hitting the limit should be easy - err = writeRandomFile(path.Join(mountPath.Path(), "bigfile"), quota) - if err == nil { - t.Fatalf("expected write to fail(), instead had success") - } - if pathError, ok := err.(*os.PathError); ok && pathError.Err != unix.EDQUOT && pathError.Err != unix.ENOSPC { - os.Remove(path.Join(mountPath.Path(), "bigfile")) - t.Fatalf("expect write() to fail with %v or %v, got %v", unix.EDQUOT, unix.ENOSPC, pathError.Err) - } -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go deleted file mode 100644 index c6a03f341..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/graphtest_windows.go +++ /dev/null @@ -1 +0,0 @@ -package graphtest // import "github.com/docker/docker/daemon/graphdriver/graphtest" diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go deleted file mode 100644 index 258aba700..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil.go +++ /dev/null @@ -1,337 +0,0 @@ -package graphtest // import "github.com/docker/docker/daemon/graphdriver/graphtest" - -import ( - "bytes" - "fmt" - "math/rand" - "os" - "sort" - - "github.com/containerd/continuity/driver" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/stringid" -) - -func randomContent(size int, seed int64) []byte { - s := rand.NewSource(seed) - content := make([]byte, size) - - for i := 0; i < len(content); i += 7 { - val := s.Int63() - for j := 0; i+j < len(content) && j < 7; j++ { - content[i+j] = byte(val) - val >>= 8 - } - } - - return content -} - -func addFiles(drv graphdriver.Driver, layer string, seed int64) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - if err := driver.WriteFile(root, root.Join(root.Path(), "file-a"), randomContent(64, seed), 0755); err != nil { - return err - } - if err := root.MkdirAll(root.Join(root.Path(), "dir-b"), 0755); err != nil { - return err - } - if err := driver.WriteFile(root, root.Join(root.Path(), "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil { - return err - } - - return driver.WriteFile(root, root.Join(root.Path(), "file-c"), randomContent(128*128, seed+2), 0755) -} - -func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - fileContent, err := driver.ReadFile(root, root.Join(root.Path(), filename)) - if err != nil { - return err - } - - if !bytes.Equal(fileContent, content) { - return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) - } - - return nil -} - -func addFile(drv graphdriver.Driver, layer, filename string, content []byte) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - return driver.WriteFile(root, root.Join(root.Path(), filename), content, 0755) -} - -func addDirectory(drv graphdriver.Driver, layer, dir string) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - return root.MkdirAll(root.Join(root.Path(), dir), 0755) -} - -func removeAll(drv graphdriver.Driver, layer string, names ...string) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - for _, filename := range names { - if err := root.RemoveAll(root.Join(root.Path(), filename)); err != nil { - return err - } - } - return nil -} - -func checkFileRemoved(drv graphdriver.Driver, layer, filename string) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - if _, err := root.Stat(root.Join(root.Path(), filename)); err == nil { - return fmt.Errorf("file still exists: %s", root.Join(root.Path(), filename)) - } else if !os.IsNotExist(err) { - return err - } - - return nil -} - -func addManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - for i := 0; i < count; i += 100 { - dir := root.Join(root.Path(), fmt.Sprintf("directory-%d", i)) - if err := root.MkdirAll(dir, 0755); err != nil { - return err - } - for j := 0; i+j < count && j < 100; j++ { - file := root.Join(dir, fmt.Sprintf("file-%d", i+j)) - if err := driver.WriteFile(root, file, randomContent(64, seed+int64(i+j)), 0755); err != nil { - return err - } - } - } - - return nil -} - -func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) ([]archive.Change, error) { - root, err := drv.Get(layer, "") - if err != nil { - return nil, err - } - defer drv.Put(layer) - - var changes []archive.Change - for i := 0; i < count; i += 100 { - archiveRoot := fmt.Sprintf("/directory-%d", i) - if err := root.MkdirAll(root.Join(root.Path(), archiveRoot), 0755); err != nil { - return nil, err - } - for j := 0; i+j < count && j < 100; j++ { - if j == 0 { - changes = append(changes, archive.Change{ - Path: archiveRoot, - Kind: archive.ChangeModify, - }) - } - var change archive.Change - switch j % 3 { - // Update file - case 0: - change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) - change.Kind = archive.ChangeModify - if err := driver.WriteFile(root, root.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { - return nil, err - } - // Add file - case 1: - change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j)) - change.Kind = archive.ChangeAdd - if err := driver.WriteFile(root, root.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { - return nil, err - } - // Remove file - case 2: - change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) - change.Kind = archive.ChangeDelete - if err := root.Remove(root.Join(root.Path(), change.Path)); err != nil { - return nil, err - } - } - changes = append(changes, change) - } - } - - return changes, nil -} - -func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - for i := 0; i < count; i += 100 { - dir := root.Join(root.Path(), fmt.Sprintf("directory-%d", i)) - for j := 0; i+j < count && j < 100; j++ { - file := root.Join(dir, fmt.Sprintf("file-%d", i+j)) - fileContent, err := driver.ReadFile(root, file) - if err != nil { - return err - } - - content := randomContent(64, seed+int64(i+j)) - - if !bytes.Equal(fileContent, content) { - return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) - } - } - } - - return nil -} - -type changeList []archive.Change - -func (c changeList) Less(i, j int) bool { - if c[i].Path == c[j].Path { - return c[i].Kind < c[j].Kind - } - return c[i].Path < c[j].Path -} -func (c changeList) Len() int { return len(c) } -func (c changeList) Swap(i, j int) { c[j], c[i] = c[i], c[j] } - -func checkChanges(expected, actual []archive.Change) error { - if len(expected) != len(actual) { - return fmt.Errorf("unexpected number of changes, expected %d, got %d", len(expected), len(actual)) - } - sort.Sort(changeList(expected)) - sort.Sort(changeList(actual)) - - for i := range expected { - if expected[i] != actual[i] { - return fmt.Errorf("unexpected change, expecting %v, got %v", expected[i], actual[i]) - } - } - - return nil -} - -func addLayerFiles(drv graphdriver.Driver, layer, parent string, i int) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - if err := driver.WriteFile(root, root.Join(root.Path(), "top-id"), []byte(layer), 0755); err != nil { - return err - } - layerDir := root.Join(root.Path(), fmt.Sprintf("layer-%d", i)) - if err := root.MkdirAll(layerDir, 0755); err != nil { - return err - } - if err := driver.WriteFile(root, root.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil { - return err - } - return driver.WriteFile(root, root.Join(layerDir, "parent-id"), []byte(parent), 0755) -} - -func addManyLayers(drv graphdriver.Driver, baseLayer string, count int) (string, error) { - lastLayer := baseLayer - for i := 1; i <= count; i++ { - nextLayer := stringid.GenerateRandomID() - if err := drv.Create(nextLayer, lastLayer, nil); err != nil { - return "", err - } - if err := addLayerFiles(drv, nextLayer, lastLayer, i); err != nil { - return "", err - } - - lastLayer = nextLayer - - } - return lastLayer, nil -} - -func checkManyLayers(drv graphdriver.Driver, layer string, count int) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - layerIDBytes, err := driver.ReadFile(root, root.Join(root.Path(), "top-id")) - if err != nil { - return err - } - - if !bytes.Equal(layerIDBytes, []byte(layer)) { - return fmt.Errorf("mismatched file content %v, expecting %v", layerIDBytes, []byte(layer)) - } - - for i := count; i > 0; i-- { - layerDir := root.Join(root.Path(), fmt.Sprintf("layer-%d", i)) - - thisLayerIDBytes, err := driver.ReadFile(root, root.Join(layerDir, "layer-id")) - if err != nil { - return err - } - if !bytes.Equal(thisLayerIDBytes, layerIDBytes) { - return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes) - } - layerIDBytes, err = driver.ReadFile(root, root.Join(layerDir, "parent-id")) - if err != nil { - return err - } - } - return nil -} - -// readDir reads a directory just like driver.ReadDir() -// then hides specific files (currently "lost+found") -// so the tests don't "see" it -func readDir(r driver.Driver, dir string) ([]os.FileInfo, error) { - a, err := driver.ReadDir(r, dir) - if err != nil { - return nil, err - } - - b := a[:0] - for _, x := range a { - if x.Name() != "lost+found" { // ext4 always have this dir - b = append(b, x) - } - } - - return b, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go b/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go deleted file mode 100644 index 3103df150..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/graphtest/testutil_unix.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build linux freebsd - -package graphtest // import "github.com/docker/docker/daemon/graphdriver/graphtest" - -import ( - "os" - "syscall" - "testing" - - contdriver "github.com/containerd/continuity/driver" - "github.com/docker/docker/daemon/graphdriver" - "github.com/gotestyourself/gotestyourself/assert" - is "github.com/gotestyourself/gotestyourself/assert/cmp" - "golang.org/x/sys/unix" -) - -func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) { - fi, err := os.Stat(path) - assert.NilError(t, err) - - actual := fi.Mode() - assert.Check(t, is.Equal(mode&os.ModeType, actual&os.ModeType), path) - assert.Check(t, is.Equal(mode&os.ModePerm, actual&os.ModePerm), path) - assert.Check(t, is.Equal(mode&os.ModeSticky, actual&os.ModeSticky), path) - assert.Check(t, is.Equal(mode&os.ModeSetuid, actual&os.ModeSetuid), path) - assert.Check(t, is.Equal(mode&os.ModeSetgid, actual&os.ModeSetgid), path) - - if stat, ok := fi.Sys().(*syscall.Stat_t); ok { - assert.Check(t, is.Equal(uid, stat.Uid), path) - assert.Check(t, is.Equal(gid, stat.Gid), path) - } -} - -func createBase(t testing.TB, driver graphdriver.Driver, name string) { - // We need to be able to set any perms - oldmask := unix.Umask(0) - defer unix.Umask(oldmask) - - err := driver.CreateReadWrite(name, "", nil) - assert.NilError(t, err) - - dirFS, err := driver.Get(name, "") - assert.NilError(t, err) - defer driver.Put(name) - - subdir := dirFS.Join(dirFS.Path(), "a subdir") - assert.NilError(t, dirFS.Mkdir(subdir, 0705|os.ModeSticky)) - assert.NilError(t, dirFS.Lchown(subdir, 1, 2)) - - file := dirFS.Join(dirFS.Path(), "a file") - err = contdriver.WriteFile(dirFS, file, []byte("Some data"), 0222|os.ModeSetuid) - assert.NilError(t, err) -} - -func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { - dirFS, err := driver.Get(name, "") - assert.NilError(t, err) - defer driver.Put(name) - - subdir := dirFS.Join(dirFS.Path(), "a subdir") - verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) - - file := dirFS.Join(dirFS.Path(), "a file") - verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) - - files, err := readDir(dirFS, dirFS.Path()) - assert.NilError(t, err) - assert.Check(t, is.Len(files, 2)) -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow.go b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow.go deleted file mode 100644 index 649beccdc..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow.go +++ /dev/null @@ -1,1052 +0,0 @@ -// +build windows - -// Maintainer: jhowardmsft -// Locale: en-gb -// About: Graph-driver for Linux Containers On Windows (LCOW) -// -// This graphdriver runs in two modes. Yet to be determined which one will -// be the shipping mode. The global mode is where a single utility VM -// is used for all service VM tool operations. This isn't safe security-wise -// as it's attaching a sandbox of multiple containers to it, containing -// untrusted data. This may be fine for client devops scenarios. In -// safe mode, a unique utility VM is instantiated for all service VM tool -// operations. The downside of safe-mode is that operations are slower as -// a new service utility VM has to be started and torn-down when needed. -// -// Options: -// -// The following options are read by the graphdriver itself: -// -// * lcow.globalmode - Enables global service VM Mode -// -- Possible values: true/false -// -- Default if omitted: false -// -// * lcow.sandboxsize - Specifies a custom sandbox size in GB for starting a container -// -- Possible values: >= default sandbox size (opengcs defined, currently 20) -// -- Default if omitted: 20 -// -// The following options are read by opengcs: -// -// * lcow.kirdpath - Specifies a custom path to a kernel/initrd pair -// -- Possible values: Any local path that is not a mapped drive -// -- Default if omitted: %ProgramFiles%\Linux Containers -// -// * lcow.kernel - Specifies a custom kernel file located in the `lcow.kirdpath` path -// -- Possible values: Any valid filename -// -- Default if omitted: bootx64.efi -// -// * lcow.initrd - Specifies a custom initrd file located in the `lcow.kirdpath` path -// -- Possible values: Any valid filename -// -- Default if omitted: initrd.img -// -// * lcow.bootparameters - Specifies additional boot parameters for booting in kernel+initrd mode -// -- Possible values: Any valid linux kernel boot options -// -- Default if omitted: -// -// * lcow.vhdx - Specifies a custom vhdx file to boot (instead of a kernel+initrd) -// -- Possible values: Any valid filename -// -- Default if omitted: uvm.vhdx under `lcow.kirdpath` -// -// * lcow.timeout - Specifies a timeout for utility VM operations in seconds -// -- Possible values: >=0 -// -- Default if omitted: 300 - -// TODO: Grab logs from SVM at terminate or errors - -package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow" - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/Microsoft/hcsshim" - "github.com/Microsoft/opengcs/client" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -// init registers this driver to the register. It gets initialised by the -// function passed in the second parameter, implemented in this file. -func init() { - graphdriver.Register("lcow", InitDriver) -} - -const ( - // sandboxFilename is the name of the file containing a layer's sandbox (read-write layer). - sandboxFilename = "sandbox.vhdx" - - // scratchFilename is the name of the scratch-space used by an SVM to avoid running out of memory. - scratchFilename = "scratch.vhdx" - - // layerFilename is the name of the file containing a layer's read-only contents. - // Note this really is VHD format, not VHDX. - layerFilename = "layer.vhd" - - // toolsScratchPath is a location in a service utility VM that the tools can use as a - // scratch space to avoid running out of memory. - toolsScratchPath = "/tmp/scratch" - - // svmGlobalID is the ID used in the serviceVMs map for the global service VM when running in "global" mode. - svmGlobalID = "_lcow_global_svm_" - - // cacheDirectory is the sub-folder under the driver's data-root used to cache blank sandbox and scratch VHDs. - cacheDirectory = "cache" - - // scratchDirectory is the sub-folder under the driver's data-root used for scratch VHDs in service VMs - scratchDirectory = "scratch" - - // errOperationPending is the HRESULT returned by the HCS when the VM termination operation is still pending. - errOperationPending syscall.Errno = 0xc0370103 -) - -// Driver represents an LCOW graph driver. -type Driver struct { - dataRoot string // Root path on the host where we are storing everything. - cachedSandboxFile string // Location of the local default-sized cached sandbox. - cachedSandboxMutex sync.Mutex // Protects race conditions from multiple threads creating the cached sandbox. - cachedScratchFile string // Location of the local cached empty scratch space. - cachedScratchMutex sync.Mutex // Protects race conditions from multiple threads creating the cached scratch. - options []string // Graphdriver options we are initialised with. - globalMode bool // Indicates if running in an unsafe/global service VM mode. - - // NOTE: It is OK to use a cache here because Windows does not support - // restoring containers when the daemon dies. - serviceVms *serviceVMMap // Map of the configs representing the service VM(s) we are running. -} - -// layerDetails is the structure returned by a helper function `getLayerDetails` -// for getting information about a layer folder -type layerDetails struct { - filename string // \path\to\sandbox.vhdx or \path\to\layer.vhd - size int64 // size of the above file - isSandbox bool // true if sandbox.vhdx -} - -// deletefiles is a helper function for initialisation where we delete any -// left-over scratch files in case we were previously forcibly terminated. -func deletefiles(path string, f os.FileInfo, err error) error { - if strings.HasSuffix(f.Name(), ".vhdx") { - logrus.Warnf("lcowdriver: init: deleting stale scratch file %s", path) - return os.Remove(path) - } - return nil -} - -// InitDriver returns a new LCOW storage driver. -func InitDriver(dataRoot string, options []string, _, _ []idtools.IDMap) (graphdriver.Driver, error) { - title := "lcowdriver: init:" - - cd := filepath.Join(dataRoot, cacheDirectory) - sd := filepath.Join(dataRoot, scratchDirectory) - - d := &Driver{ - dataRoot: dataRoot, - options: options, - cachedSandboxFile: filepath.Join(cd, sandboxFilename), - cachedScratchFile: filepath.Join(cd, scratchFilename), - serviceVms: &serviceVMMap{ - svms: make(map[string]*serviceVMMapItem), - }, - globalMode: false, - } - - // Looks for relevant options - for _, v := range options { - opt := strings.SplitN(v, "=", 2) - if len(opt) == 2 { - switch strings.ToLower(opt[0]) { - case "lcow.globalmode": - var err error - d.globalMode, err = strconv.ParseBool(opt[1]) - if err != nil { - return nil, fmt.Errorf("%s failed to parse value for 'lcow.globalmode' - must be 'true' or 'false'", title) - } - break - } - } - } - - // Make sure the dataRoot directory is created - if err := idtools.MkdirAllAndChown(dataRoot, 0700, idtools.IDPair{UID: 0, GID: 0}); err != nil { - return nil, fmt.Errorf("%s failed to create '%s': %v", title, dataRoot, err) - } - - // Make sure the cache directory is created under dataRoot - if err := idtools.MkdirAllAndChown(cd, 0700, idtools.IDPair{UID: 0, GID: 0}); err != nil { - return nil, fmt.Errorf("%s failed to create '%s': %v", title, cd, err) - } - - // Make sure the scratch directory is created under dataRoot - if err := idtools.MkdirAllAndChown(sd, 0700, idtools.IDPair{UID: 0, GID: 0}); err != nil { - return nil, fmt.Errorf("%s failed to create '%s': %v", title, sd, err) - } - - // Delete any items in the scratch directory - filepath.Walk(sd, deletefiles) - - logrus.Infof("%s dataRoot: %s globalMode: %t", title, dataRoot, d.globalMode) - - return d, nil -} - -func (d *Driver) getVMID(id string) string { - if d.globalMode { - return svmGlobalID - } - return id -} - -// startServiceVMIfNotRunning starts a service utility VM if it is not currently running. -// It can optionally be started with a mapped virtual disk. Returns a opengcs config structure -// representing the VM. -func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.MappedVirtualDisk, context string) (_ *serviceVM, err error) { - // Use the global ID if in global mode - id = d.getVMID(id) - - title := fmt.Sprintf("lcowdriver: startservicevmifnotrunning %s:", id) - - // Attempt to add ID to the service vm map - logrus.Debugf("%s: Adding entry to service vm map", title) - svm, exists, err := d.serviceVms.add(id) - if err != nil && err == errVMisTerminating { - // VM is in the process of terminating. Wait until it's done and and then try again - logrus.Debugf("%s: VM with current ID still in the process of terminating: %s", title, id) - if err := svm.getStopError(); err != nil { - logrus.Debugf("%s: VM %s did not stop successfully: %s", title, id, err) - return nil, err - } - return d.startServiceVMIfNotRunning(id, mvdToAdd, context) - } else if err != nil { - logrus.Debugf("%s: failed to add service vm to map: %s", err) - return nil, fmt.Errorf("%s: failed to add to service vm map: %s", title, err) - } - - if exists { - // Service VM is already up and running. In this case, just hot add the vhds. - logrus.Debugf("%s: service vm already exists. Just hot adding: %+v", title, mvdToAdd) - if err := svm.hotAddVHDs(mvdToAdd...); err != nil { - logrus.Debugf("%s: failed to hot add vhds on service vm creation: %s", title, err) - return nil, fmt.Errorf("%s: failed to hot add vhds on service vm: %s", title, err) - } - return svm, nil - } - - // We are the first service for this id, so we need to start it - logrus.Debugf("%s: service vm doesn't exist. Now starting it up: %s", title, id) - - defer func() { - // Signal that start has finished, passing in the error if any. - svm.signalStartFinished(err) - if err != nil { - // We added a ref to the VM, since we failed, we should delete the ref. - d.terminateServiceVM(id, "error path on startServiceVMIfNotRunning", false) - } - }() - - // Generate a default configuration - if err := svm.config.GenerateDefault(d.options); err != nil { - return nil, fmt.Errorf("%s failed to generate default gogcs configuration for global svm (%s): %s", title, context, err) - } - - // For the name, we deliberately suffix if safe-mode to ensure that it doesn't - // clash with another utility VM which may be running for the container itself. - // This also makes it easier to correlate through Get-ComputeProcess. - if id == svmGlobalID { - svm.config.Name = svmGlobalID - } else { - svm.config.Name = fmt.Sprintf("%s_svm", id) - } - - // Ensure we take the cached scratch mutex around the check to ensure the file is complete - // and not in the process of being created by another thread. - scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id)) - - logrus.Debugf("%s locking cachedScratchMutex", title) - d.cachedScratchMutex.Lock() - if _, err := os.Stat(d.cachedScratchFile); err == nil { - // Make a copy of cached scratch to the scratch directory - logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) cloning cached scratch for mvd", context) - if err := client.CopyFile(d.cachedScratchFile, scratchTargetFile, true); err != nil { - logrus.Debugf("%s releasing cachedScratchMutex on err: %s", title, err) - d.cachedScratchMutex.Unlock() - return nil, err - } - - // Add the cached clone as a mapped virtual disk - logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) adding cloned scratch as mvd", context) - mvd := hcsshim.MappedVirtualDisk{ - HostPath: scratchTargetFile, - ContainerPath: toolsScratchPath, - CreateInUtilityVM: true, - } - svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd) - svm.scratchAttached = true - } - - logrus.Debugf("%s releasing cachedScratchMutex", title) - d.cachedScratchMutex.Unlock() - - // If requested to start it with a mapped virtual disk, add it now. - svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvdToAdd...) - for _, mvd := range svm.config.MappedVirtualDisks { - svm.attachedVHDs[mvd.HostPath] = 1 - } - - // Start it. - logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) starting %s", context, svm.config.Name) - if err := svm.config.StartUtilityVM(); err != nil { - return nil, fmt.Errorf("failed to start service utility VM (%s): %s", context, err) - } - - // defer function to terminate the VM if the next steps fail - defer func() { - if err != nil { - waitTerminate(svm, fmt.Sprintf("startServiceVmIfNotRunning: %s (%s)", id, context)) - } - }() - - // Now we have a running service VM, we can create the cached scratch file if it doesn't exist. - logrus.Debugf("%s locking cachedScratchMutex", title) - d.cachedScratchMutex.Lock() - if _, err := os.Stat(d.cachedScratchFile); err != nil { - logrus.Debugf("%s (%s): creating an SVM scratch", title, context) - - // Don't use svm.CreateExt4Vhdx since that only works when the service vm is setup, - // but we're still in that process right now. - if err := svm.config.CreateExt4Vhdx(scratchTargetFile, client.DefaultVhdxSizeGB, d.cachedScratchFile); err != nil { - logrus.Debugf("%s (%s): releasing cachedScratchMutex on error path", title, context) - d.cachedScratchMutex.Unlock() - logrus.Debugf("%s: failed to create vm scratch %s: %s", title, scratchTargetFile, err) - return nil, fmt.Errorf("failed to create SVM scratch VHDX (%s): %s", context, err) - } - } - logrus.Debugf("%s (%s): releasing cachedScratchMutex", title, context) - d.cachedScratchMutex.Unlock() - - // Hot-add the scratch-space if not already attached - if !svm.scratchAttached { - logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) hot-adding scratch %s", context, scratchTargetFile) - if err := svm.hotAddVHDsAtStart(hcsshim.MappedVirtualDisk{ - HostPath: scratchTargetFile, - ContainerPath: toolsScratchPath, - CreateInUtilityVM: true, - }); err != nil { - logrus.Debugf("%s: failed to hot-add scratch %s: %s", title, scratchTargetFile, err) - return nil, fmt.Errorf("failed to hot-add %s failed: %s", scratchTargetFile, err) - } - svm.scratchAttached = true - } - - logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) success", context) - return svm, nil -} - -// terminateServiceVM terminates a service utility VM if its running if it's, -// not being used by any goroutine, but does nothing when in global mode as it's -// lifetime is limited to that of the daemon. If the force flag is set, then -// the VM will be killed regardless of the ref count or if it's global. -func (d *Driver) terminateServiceVM(id, context string, force bool) (err error) { - // We don't do anything in safe mode unless the force flag has been passed, which - // is only the case for cleanup at driver termination. - if d.globalMode && !force { - logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context) - return nil - } - - id = d.getVMID(id) - - var svm *serviceVM - var lastRef bool - if !force { - // In the not force case, we ref count - svm, lastRef, err = d.serviceVms.decrementRefCount(id) - } else { - // In the force case, we ignore the ref count and just set it to 0 - svm, err = d.serviceVms.setRefCountZero(id) - lastRef = true - } - - if err == errVMUnknown { - return nil - } else if err == errVMisTerminating { - return svm.getStopError() - } else if !lastRef { - return nil - } - - // We run the deletion of the scratch as a deferred function to at least attempt - // clean-up in case of errors. - defer func() { - if svm.scratchAttached { - scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id)) - logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - deleting scratch %s", id, context, scratchTargetFile) - if errRemove := os.Remove(scratchTargetFile); errRemove != nil { - logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, errRemove) - err = errRemove - } - } - - // This function shouldn't actually return error unless there is a bug - if errDelete := d.serviceVms.deleteID(id); errDelete != nil { - logrus.Warnf("failed to service vm from svm map %s (%s): %s", id, context, errDelete) - } - - // Signal that this VM has stopped - svm.signalStopFinished(err) - }() - - // Now it's possible that the service VM failed to start and now we are trying to terminate it. - // In this case, we will relay the error to the goroutines waiting for this vm to stop. - if err := svm.getStartError(); err != nil { - logrus.Debugf("lcowdriver: terminateservicevm: %s had failed to start up: %s", id, err) - return err - } - - if err := waitTerminate(svm, fmt.Sprintf("terminateservicevm: %s (%s)", id, context)); err != nil { - return err - } - - logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - success", id, context) - return nil -} - -func waitTerminate(svm *serviceVM, context string) error { - if svm.config == nil { - return fmt.Errorf("lcowdriver: waitTermiante: Nil utility VM. %s", context) - } - - logrus.Debugf("lcowdriver: waitTerminate: Calling terminate: %s", context) - if err := svm.config.Uvm.Terminate(); err != nil { - // We might get operation still pending from the HCS. In that case, we shouldn't return - // an error since we call wait right after. - underlyingError := err - if conterr, ok := err.(*hcsshim.ContainerError); ok { - underlyingError = conterr.Err - } - - if syscallErr, ok := underlyingError.(syscall.Errno); ok { - underlyingError = syscallErr - } - - if underlyingError != errOperationPending { - return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err) - } - logrus.Debugf("lcowdriver: waitTerminate: uvm.Terminate() returned operation pending (%s)", context) - } - - logrus.Debugf("lcowdriver: waitTerminate: (%s) - waiting for utility VM to terminate", context) - if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil { - return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err) - } - return nil -} - -// String returns the string representation of a driver. This should match -// the name the graph driver has been registered with. -func (d *Driver) String() string { - return "lcow" -} - -// Status returns the status of the driver. -func (d *Driver) Status() [][2]string { - return [][2]string{ - {"LCOW", ""}, - // TODO: Add some more info here - mode, home, .... - } -} - -// Exists returns true if the given id is registered with this driver. -func (d *Driver) Exists(id string) bool { - _, err := os.Lstat(d.dir(id)) - logrus.Debugf("lcowdriver: exists: id %s %t", id, err == nil) - return err == nil -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. That equates to creating a sandbox. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - title := fmt.Sprintf("lcowdriver: createreadwrite: id %s", id) - logrus.Debugf(title) - - // First we need to create the folder - if err := d.Create(id, parent, opts); err != nil { - return err - } - - // Look for an explicit sandbox size option. - sandboxSize := uint64(client.DefaultVhdxSizeGB) - for k, v := range opts.StorageOpt { - switch strings.ToLower(k) { - case "lcow.sandboxsize": - var err error - sandboxSize, err = strconv.ParseUint(v, 10, 32) - if err != nil { - return fmt.Errorf("%s failed to parse value '%s' for 'lcow.sandboxsize'", title, v) - } - if sandboxSize < client.DefaultVhdxSizeGB { - return fmt.Errorf("%s 'lcow.sandboxsize' option cannot be less than %d", title, client.DefaultVhdxSizeGB) - } - break - } - } - - // Massive perf optimisation here. If we know that the RW layer is the default size, - // and that the cached sandbox already exists, and we are running in safe mode, we - // can just do a simple copy into the layers sandbox file without needing to start a - // unique service VM. For a global service VM, it doesn't really matter. Of course, - // this is only the case where the sandbox is the default size. - // - // Make sure we have the sandbox mutex taken while we are examining it. - if sandboxSize == client.DefaultVhdxSizeGB { - logrus.Debugf("%s: locking cachedSandboxMutex", title) - d.cachedSandboxMutex.Lock() - _, err := os.Stat(d.cachedSandboxFile) - logrus.Debugf("%s: releasing cachedSandboxMutex", title) - d.cachedSandboxMutex.Unlock() - if err == nil { - logrus.Debugf("%s: using cached sandbox to populate", title) - if err := client.CopyFile(d.cachedSandboxFile, filepath.Join(d.dir(id), sandboxFilename), true); err != nil { - return err - } - return nil - } - } - - logrus.Debugf("%s: creating SVM to create sandbox", title) - svm, err := d.startServiceVMIfNotRunning(id, nil, "createreadwrite") - if err != nil { - return err - } - defer d.terminateServiceVM(id, "createreadwrite", false) - - // So the sandbox needs creating. If default size ensure we are the only thread populating the cache. - // Non-default size we don't store, just create them one-off so no need to lock the cachedSandboxMutex. - if sandboxSize == client.DefaultVhdxSizeGB { - logrus.Debugf("%s: locking cachedSandboxMutex for creation", title) - d.cachedSandboxMutex.Lock() - defer func() { - logrus.Debugf("%s: releasing cachedSandboxMutex for creation", title) - d.cachedSandboxMutex.Unlock() - }() - } - - // Make sure we don't write to our local cached copy if this is for a non-default size request. - targetCacheFile := d.cachedSandboxFile - if sandboxSize != client.DefaultVhdxSizeGB { - targetCacheFile = "" - } - - // Create the ext4 vhdx - logrus.Debugf("%s: creating sandbox ext4 vhdx", title) - if err := svm.createExt4VHDX(filepath.Join(d.dir(id), sandboxFilename), uint32(sandboxSize), targetCacheFile); err != nil { - logrus.Debugf("%s: failed to create sandbox vhdx for %s: %s", title, id, err) - return err - } - return nil -} - -// Create creates the folder for the layer with the given id, and -// adds it to the layer chain. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - logrus.Debugf("lcowdriver: create: id %s parent: %s", id, parent) - - parentChain, err := d.getLayerChain(parent) - if err != nil { - return err - } - - var layerChain []string - if parent != "" { - if !d.Exists(parent) { - return fmt.Errorf("lcowdriver: cannot create layer folder with missing parent %s", parent) - } - layerChain = []string{d.dir(parent)} - } - layerChain = append(layerChain, parentChain...) - - // Make sure layers are created with the correct ACL so that VMs can access them. - layerPath := d.dir(id) - logrus.Debugf("lcowdriver: create: id %s: creating %s", id, layerPath) - if err := system.MkdirAllWithACL(layerPath, 755, system.SddlNtvmAdministratorsLocalSystem); err != nil { - return err - } - - if err := d.setLayerChain(id, layerChain); err != nil { - if err2 := os.RemoveAll(layerPath); err2 != nil { - logrus.Warnf("failed to remove layer %s: %s", layerPath, err2) - } - return err - } - logrus.Debugf("lcowdriver: create: id %s: success", id) - - return nil -} - -// Remove unmounts and removes the dir information. -func (d *Driver) Remove(id string) error { - logrus.Debugf("lcowdriver: remove: id %s", id) - tmpID := fmt.Sprintf("%s-removing", id) - tmpLayerPath := d.dir(tmpID) - layerPath := d.dir(id) - - logrus.Debugf("lcowdriver: remove: id %s: layerPath %s", id, layerPath) - - // Unmount all the layers - err := d.Put(id) - if err != nil { - logrus.Debugf("lcowdriver: remove id %s: failed to unmount: %s", id, err) - return err - } - - // for non-global case just kill the vm - if !d.globalMode { - if err := d.terminateServiceVM(id, fmt.Sprintf("Remove %s", id), true); err != nil { - return err - } - } - - if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { - return err - } - - if err := os.RemoveAll(tmpLayerPath); err != nil { - return err - } - - logrus.Debugf("lcowdriver: remove: id %s: layerPath %s succeeded", id, layerPath) - return nil -} - -// Get returns the rootfs path for the id. It is reference counted and -// effectively can be thought of as a "mount the layer into the utility -// vm if it isn't already". The contract from the caller of this is that -// all Gets and Puts are matched. It -should- be the case that on cleanup, -// nothing is mounted. -// -// For optimisation, we don't actually mount the filesystem (which in our -// case means [hot-]adding it to a service VM. But we track that and defer -// the actual adding to the point we need to access it. -func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { - title := fmt.Sprintf("lcowdriver: get: %s", id) - logrus.Debugf(title) - - // Generate the mounts needed for the defered operation. - disks, err := d.getAllMounts(id) - if err != nil { - logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err) - return nil, fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err) - } - - logrus.Debugf("%s: got layer mounts: %+v", title, disks) - return &lcowfs{ - root: unionMountName(disks), - d: d, - mappedDisks: disks, - vmID: d.getVMID(id), - }, nil -} - -// Put does the reverse of get. If there are no more references to -// the layer, it unmounts it from the utility VM. -func (d *Driver) Put(id string) error { - title := fmt.Sprintf("lcowdriver: put: %s", id) - - // Get the service VM that we need to remove from - svm, err := d.serviceVms.get(d.getVMID(id)) - if err == errVMUnknown { - return nil - } else if err == errVMisTerminating { - return svm.getStopError() - } - - // Generate the mounts that Get() might have mounted - disks, err := d.getAllMounts(id) - if err != nil { - logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err) - return fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err) - } - - // Now, we want to perform the unmounts, hot-remove and stop the service vm. - // We want to go though all the steps even if we have an error to clean up properly - err = svm.deleteUnionMount(unionMountName(disks), disks...) - if err != nil { - logrus.Debugf("%s failed to delete union mount %s: %s", title, id, err) - } - - err1 := svm.hotRemoveVHDs(disks...) - if err1 != nil { - logrus.Debugf("%s failed to hot remove vhds %s: %s", title, id, err) - if err == nil { - err = err1 - } - } - - err1 = d.terminateServiceVM(id, fmt.Sprintf("Put %s", id), false) - if err1 != nil { - logrus.Debugf("%s failed to terminate service vm %s: %s", title, id, err1) - if err == nil { - err = err1 - } - } - logrus.Debugf("Put succeeded on id %s", id) - return err -} - -// Cleanup ensures the information the driver stores is properly removed. -// We use this opportunity to cleanup any -removing folders which may be -// still left if the daemon was killed while it was removing a layer. -func (d *Driver) Cleanup() error { - title := "lcowdriver: cleanup" - - items, err := ioutil.ReadDir(d.dataRoot) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - // Note we don't return an error below - it's possible the files - // are locked. However, next time around after the daemon exits, - // we likely will be able to to cleanup successfully. Instead we log - // warnings if there are errors. - for _, item := range items { - if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { - if err := os.RemoveAll(filepath.Join(d.dataRoot, item.Name())); err != nil { - logrus.Warnf("%s failed to cleanup %s: %s", title, item.Name(), err) - } else { - logrus.Infof("%s cleaned up %s", title, item.Name()) - } - } - } - - // Cleanup any service VMs we have running, along with their scratch spaces. - // We don't take the lock for this as it's taken in terminateServiceVm. - for k, v := range d.serviceVms.svms { - logrus.Debugf("%s svm entry: %s: %+v", title, k, v) - d.terminateServiceVM(k, "cleanup", true) - } - - return nil -} - -// Diff takes a layer (and it's parent layer which may be null, but -// is ignored by this implementation below) and returns a reader for -// a tarstream representing the layers contents. The id could be -// a read-only "layer.vhd" or a read-write "sandbox.vhdx". The semantics -// of this function dictate that the layer is already mounted. -// However, as we do lazy mounting as a performance optimisation, -// this will likely not be the case. -func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { - title := fmt.Sprintf("lcowdriver: diff: %s", id) - - // Get VHDX info - ld, err := getLayerDetails(d.dir(id)) - if err != nil { - logrus.Debugf("%s: failed to get vhdx information of %s: %s", title, d.dir(id), err) - return nil, err - } - - // Start the SVM with a mapped virtual disk. Note that if the SVM is - // already running and we are in global mode, this will be - // hot-added. - mvd := hcsshim.MappedVirtualDisk{ - HostPath: ld.filename, - ContainerPath: hostToGuest(ld.filename), - CreateInUtilityVM: true, - ReadOnly: true, - } - - logrus.Debugf("%s: starting service VM", title) - svm, err := d.startServiceVMIfNotRunning(id, []hcsshim.MappedVirtualDisk{mvd}, fmt.Sprintf("diff %s", id)) - if err != nil { - return nil, err - } - - logrus.Debugf("lcowdriver: diff: waiting for svm to finish booting") - err = svm.getStartError() - if err != nil { - d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) - return nil, fmt.Errorf("lcowdriver: diff: svm failed to boot: %s", err) - } - - // Obtain the tar stream for it - logrus.Debugf("%s: %s %s, size %d, ReadOnly %t", title, ld.filename, mvd.ContainerPath, ld.size, ld.isSandbox) - tarReadCloser, err := svm.config.VhdToTar(mvd.HostPath, mvd.ContainerPath, ld.isSandbox, ld.size) - if err != nil { - svm.hotRemoveVHDs(mvd) - d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) - return nil, fmt.Errorf("%s failed to export layer to tar stream for id: %s, parent: %s : %s", title, id, parent, err) - } - - logrus.Debugf("%s id %s parent %s completed successfully", title, id, parent) - - // In safe/non-global mode, we can't tear down the service VM until things have been read. - return ioutils.NewReadCloserWrapper(tarReadCloser, func() error { - tarReadCloser.Close() - svm.hotRemoveVHDs(mvd) - d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) - return nil - }), nil -} - -// ApplyDiff extracts the changeset from the given diff into the -// layer with the specified id and parent, returning the size of the -// new layer in bytes. The layer should not be mounted when calling -// this function. Another way of describing this is that ApplyDiff writes -// to a new layer (a VHD in LCOW) the contents of a tarstream it's given. -func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { - logrus.Debugf("lcowdriver: applydiff: id %s", id) - - svm, err := d.startServiceVMIfNotRunning(id, nil, fmt.Sprintf("applydiff %s", id)) - if err != nil { - return 0, err - } - defer d.terminateServiceVM(id, fmt.Sprintf("applydiff %s", id), false) - - logrus.Debugf("lcowdriver: applydiff: waiting for svm to finish booting") - err = svm.getStartError() - if err != nil { - return 0, fmt.Errorf("lcowdriver: applydiff: svm failed to boot: %s", err) - } - - // TODO @jhowardmsft - the retries are temporary to overcome platform reliability issues. - // Obviously this will be removed as platform bugs are fixed. - retries := 0 - for { - retries++ - size, err := svm.config.TarToVhd(filepath.Join(d.dataRoot, id, layerFilename), diff) - if err != nil { - if retries <= 10 { - continue - } - return 0, err - } - return size, err - } -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -// The layer should not be mounted when calling this function. -func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { - logrus.Debugf("lcowdriver: changes: id %s parent %s", id, parent) - // TODO @gupta-ak. Needs implementation with assistance from service VM - return nil, nil -} - -// DiffSize calculates the changes between the specified layer -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (d *Driver) DiffSize(id, parent string) (size int64, err error) { - logrus.Debugf("lcowdriver: diffsize: id %s", id) - // TODO @gupta-ak. Needs implementation with assistance from service VM - return 0, nil -} - -// GetMetadata returns custom driver information. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - logrus.Debugf("lcowdriver: getmetadata: id %s", id) - m := make(map[string]string) - m["dir"] = d.dir(id) - return m, nil -} - -// GetLayerPath gets the layer path on host (path to VHD/VHDX) -func (d *Driver) GetLayerPath(id string) (string, error) { - return d.dir(id), nil -} - -// dir returns the absolute path to the layer. -func (d *Driver) dir(id string) string { - return filepath.Join(d.dataRoot, filepath.Base(id)) -} - -// getLayerChain returns the layer chain information. -func (d *Driver) getLayerChain(id string) ([]string, error) { - jPath := filepath.Join(d.dir(id), "layerchain.json") - logrus.Debugf("lcowdriver: getlayerchain: id %s json %s", id, jPath) - content, err := ioutil.ReadFile(jPath) - if os.IsNotExist(err) { - return nil, nil - } else if err != nil { - return nil, fmt.Errorf("lcowdriver: getlayerchain: %s unable to read layerchain file %s: %s", id, jPath, err) - } - - var layerChain []string - err = json.Unmarshal(content, &layerChain) - if err != nil { - return nil, fmt.Errorf("lcowdriver: getlayerchain: %s failed to unmarshall layerchain file %s: %s", id, jPath, err) - } - return layerChain, nil -} - -// setLayerChain stores the layer chain information on disk. -func (d *Driver) setLayerChain(id string, chain []string) error { - content, err := json.Marshal(&chain) - if err != nil { - return fmt.Errorf("lcowdriver: setlayerchain: %s failed to marshall layerchain json: %s", id, err) - } - - jPath := filepath.Join(d.dir(id), "layerchain.json") - logrus.Debugf("lcowdriver: setlayerchain: id %s json %s", id, jPath) - err = ioutil.WriteFile(jPath, content, 0600) - if err != nil { - return fmt.Errorf("lcowdriver: setlayerchain: %s failed to write layerchain file: %s", id, err) - } - return nil -} - -// getLayerDetails is a utility for getting a file name, size and indication of -// sandbox for a VHD(x) in a folder. A read-only layer will be layer.vhd. A -// read-write layer will be sandbox.vhdx. -func getLayerDetails(folder string) (*layerDetails, error) { - var fileInfo os.FileInfo - ld := &layerDetails{ - isSandbox: false, - filename: filepath.Join(folder, layerFilename), - } - - fileInfo, err := os.Stat(ld.filename) - if err != nil { - ld.filename = filepath.Join(folder, sandboxFilename) - if fileInfo, err = os.Stat(ld.filename); err != nil { - return nil, fmt.Errorf("failed to locate layer or sandbox in %s", folder) - } - ld.isSandbox = true - } - ld.size = fileInfo.Size() - - return ld, nil -} - -func (d *Driver) getAllMounts(id string) ([]hcsshim.MappedVirtualDisk, error) { - layerChain, err := d.getLayerChain(id) - if err != nil { - return nil, err - } - layerChain = append([]string{d.dir(id)}, layerChain...) - - logrus.Debugf("getting all layers: %v", layerChain) - disks := make([]hcsshim.MappedVirtualDisk, len(layerChain), len(layerChain)) - for i := range layerChain { - ld, err := getLayerDetails(layerChain[i]) - if err != nil { - logrus.Debugf("Failed to get LayerVhdDetails from %s: %s", layerChain[i], err) - return nil, err - } - disks[i].HostPath = ld.filename - disks[i].ContainerPath = hostToGuest(ld.filename) - disks[i].CreateInUtilityVM = true - disks[i].ReadOnly = !ld.isSandbox - } - return disks, nil -} - -func hostToGuest(hostpath string) string { - return fmt.Sprintf("/tmp/%s", filepath.Base(filepath.Dir(hostpath))) -} - -func unionMountName(disks []hcsshim.MappedVirtualDisk) string { - return fmt.Sprintf("%s-mount", disks[0].ContainerPath) -} - -type nopCloser struct { - io.Reader -} - -func (nopCloser) Close() error { - return nil -} - -type fileGetCloserFromSVM struct { - id string - svm *serviceVM - mvd *hcsshim.MappedVirtualDisk - d *Driver -} - -func (fgc *fileGetCloserFromSVM) Close() error { - if fgc.svm != nil { - if fgc.mvd != nil { - if err := fgc.svm.hotRemoveVHDs(*fgc.mvd); err != nil { - // We just log this as we're going to tear down the SVM imminently unless in global mode - logrus.Errorf("failed to remove mvd %s: %s", fgc.mvd.ContainerPath, err) - } - } - } - if fgc.d != nil && fgc.svm != nil && fgc.id != "" { - if err := fgc.d.terminateServiceVM(fgc.id, fmt.Sprintf("diffgetter %s", fgc.id), false); err != nil { - return err - } - } - return nil -} - -func (fgc *fileGetCloserFromSVM) Get(filename string) (io.ReadCloser, error) { - errOut := &bytes.Buffer{} - outOut := &bytes.Buffer{} - file := path.Join(fgc.mvd.ContainerPath, filename) - if err := fgc.svm.runProcess(fmt.Sprintf("cat %s", file), nil, outOut, errOut); err != nil { - logrus.Debugf("cat %s failed: %s", file, errOut.String()) - return nil, err - } - return nopCloser{bytes.NewReader(outOut.Bytes())}, nil -} - -// DiffGetter returns a FileGetCloser that can read files from the directory that -// contains files for the layer differences. Used for direct access for tar-split. -func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { - title := fmt.Sprintf("lcowdriver: diffgetter: %s", id) - logrus.Debugf(title) - - ld, err := getLayerDetails(d.dir(id)) - if err != nil { - logrus.Debugf("%s: failed to get vhdx information of %s: %s", title, d.dir(id), err) - return nil, err - } - - // Start the SVM with a mapped virtual disk. Note that if the SVM is - // already running and we are in global mode, this will be hot-added. - mvd := hcsshim.MappedVirtualDisk{ - HostPath: ld.filename, - ContainerPath: hostToGuest(ld.filename), - CreateInUtilityVM: true, - ReadOnly: true, - } - - logrus.Debugf("%s: starting service VM", title) - svm, err := d.startServiceVMIfNotRunning(id, []hcsshim.MappedVirtualDisk{mvd}, fmt.Sprintf("diffgetter %s", id)) - if err != nil { - return nil, err - } - - logrus.Debugf("%s: waiting for svm to finish booting", title) - err = svm.getStartError() - if err != nil { - d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false) - return nil, fmt.Errorf("%s: svm failed to boot: %s", title, err) - } - - return &fileGetCloserFromSVM{ - id: id, - svm: svm, - mvd: &mvd, - d: d}, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow_svm.go b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow_svm.go deleted file mode 100644 index 9a27ac949..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/lcow_svm.go +++ /dev/null @@ -1,378 +0,0 @@ -// +build windows - -package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow" - -import ( - "errors" - "fmt" - "io" - "strings" - "sync" - "time" - - "github.com/Microsoft/hcsshim" - "github.com/Microsoft/opengcs/client" - "github.com/sirupsen/logrus" -) - -// Code for all the service VM management for the LCOW graphdriver - -var errVMisTerminating = errors.New("service VM is shutting down") -var errVMUnknown = errors.New("service vm id is unknown") -var errVMStillHasReference = errors.New("Attemping to delete a VM that is still being used") - -// serviceVMMap is the struct representing the id -> service VM mapping. -type serviceVMMap struct { - sync.Mutex - svms map[string]*serviceVMMapItem -} - -// serviceVMMapItem is our internal structure representing an item in our -// map of service VMs we are maintaining. -type serviceVMMapItem struct { - svm *serviceVM // actual service vm object - refCount int // refcount for VM -} - -type serviceVM struct { - sync.Mutex // Serialises operations being performed in this service VM. - scratchAttached bool // Has a scratch been attached? - config *client.Config // Represents the service VM item. - - // Indicates that the vm is started - startStatus chan interface{} - startError error - - // Indicates that the vm is stopped - stopStatus chan interface{} - stopError error - - attachedVHDs map[string]int // Map ref counting all the VHDS we've hot-added/hot-removed. - unionMounts map[string]int // Map ref counting all the union filesystems we mounted. -} - -// add will add an id to the service vm map. There are three cases: -// - entry doesn't exist: -// - add id to map and return a new vm that the caller can manually configure+start -// - entry does exist -// - return vm in map and increment ref count -// - entry does exist but the ref count is 0 -// - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop -func (svmMap *serviceVMMap) add(id string) (svm *serviceVM, alreadyExists bool, err error) { - svmMap.Lock() - defer svmMap.Unlock() - if svm, ok := svmMap.svms[id]; ok { - if svm.refCount == 0 { - return svm.svm, true, errVMisTerminating - } - svm.refCount++ - return svm.svm, true, nil - } - - // Doesn't exist, so create an empty svm to put into map and return - newSVM := &serviceVM{ - startStatus: make(chan interface{}), - stopStatus: make(chan interface{}), - attachedVHDs: make(map[string]int), - unionMounts: make(map[string]int), - config: &client.Config{}, - } - svmMap.svms[id] = &serviceVMMapItem{ - svm: newSVM, - refCount: 1, - } - return newSVM, false, nil -} - -// get will get the service vm from the map. There are three cases: -// - entry doesn't exist: -// - return errVMUnknown -// - entry does exist -// - return vm with no error -// - entry does exist but the ref count is 0 -// - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop -func (svmMap *serviceVMMap) get(id string) (*serviceVM, error) { - svmMap.Lock() - defer svmMap.Unlock() - svm, ok := svmMap.svms[id] - if !ok { - return nil, errVMUnknown - } - if svm.refCount == 0 { - return svm.svm, errVMisTerminating - } - return svm.svm, nil -} - -// decrementRefCount decrements the ref count of the given ID from the map. There are four cases: -// - entry doesn't exist: -// - return errVMUnknown -// - entry does exist but the ref count is 0 -// - return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop -// - entry does exist but ref count is 1 -// - return vm and set lastRef to true. The caller can then stop the vm, delete the id from this map -// - and execute svm.signalStopFinished to signal the threads that the svm has been terminated. -// - entry does exist and ref count > 1 -// - just reduce ref count and return svm -func (svmMap *serviceVMMap) decrementRefCount(id string) (_ *serviceVM, lastRef bool, _ error) { - svmMap.Lock() - defer svmMap.Unlock() - - svm, ok := svmMap.svms[id] - if !ok { - return nil, false, errVMUnknown - } - if svm.refCount == 0 { - return svm.svm, false, errVMisTerminating - } - svm.refCount-- - return svm.svm, svm.refCount == 0, nil -} - -// setRefCountZero works the same way as decrementRefCount, but sets ref count to 0 instead of decrementing it. -func (svmMap *serviceVMMap) setRefCountZero(id string) (*serviceVM, error) { - svmMap.Lock() - defer svmMap.Unlock() - - svm, ok := svmMap.svms[id] - if !ok { - return nil, errVMUnknown - } - if svm.refCount == 0 { - return svm.svm, errVMisTerminating - } - svm.refCount = 0 - return svm.svm, nil -} - -// deleteID deletes the given ID from the map. If the refcount is not 0 or the -// VM does not exist, then this function returns an error. -func (svmMap *serviceVMMap) deleteID(id string) error { - svmMap.Lock() - defer svmMap.Unlock() - svm, ok := svmMap.svms[id] - if !ok { - return errVMUnknown - } - if svm.refCount != 0 { - return errVMStillHasReference - } - delete(svmMap.svms, id) - return nil -} - -func (svm *serviceVM) signalStartFinished(err error) { - svm.Lock() - svm.startError = err - svm.Unlock() - close(svm.startStatus) -} - -func (svm *serviceVM) getStartError() error { - <-svm.startStatus - svm.Lock() - defer svm.Unlock() - return svm.startError -} - -func (svm *serviceVM) signalStopFinished(err error) { - svm.Lock() - svm.stopError = err - svm.Unlock() - close(svm.stopStatus) -} - -func (svm *serviceVM) getStopError() error { - <-svm.stopStatus - svm.Lock() - defer svm.Unlock() - return svm.stopError -} - -// hotAddVHDs waits for the service vm to start and then attaches the vhds. -func (svm *serviceVM) hotAddVHDs(mvds ...hcsshim.MappedVirtualDisk) error { - if err := svm.getStartError(); err != nil { - return err - } - return svm.hotAddVHDsAtStart(mvds...) -} - -// hotAddVHDsAtStart works the same way as hotAddVHDs but does not wait for the VM to start. -func (svm *serviceVM) hotAddVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error { - svm.Lock() - defer svm.Unlock() - for i, mvd := range mvds { - if _, ok := svm.attachedVHDs[mvd.HostPath]; ok { - svm.attachedVHDs[mvd.HostPath]++ - continue - } - - if err := svm.config.HotAddVhd(mvd.HostPath, mvd.ContainerPath, mvd.ReadOnly, !mvd.AttachOnly); err != nil { - svm.hotRemoveVHDsNoLock(mvds[:i]...) - return err - } - svm.attachedVHDs[mvd.HostPath] = 1 - } - return nil -} - -// hotRemoveVHDs waits for the service vm to start and then removes the vhds. -// The service VM must not be locked when calling this function. -func (svm *serviceVM) hotRemoveVHDs(mvds ...hcsshim.MappedVirtualDisk) error { - if err := svm.getStartError(); err != nil { - return err - } - svm.Lock() - defer svm.Unlock() - return svm.hotRemoveVHDsNoLock(mvds...) -} - -// hotRemoveVHDsNoLock removes VHDs from a service VM. When calling this function, -// the contract is the service VM lock must be held. -func (svm *serviceVM) hotRemoveVHDsNoLock(mvds ...hcsshim.MappedVirtualDisk) error { - var retErr error - for _, mvd := range mvds { - if _, ok := svm.attachedVHDs[mvd.HostPath]; !ok { - // We continue instead of returning an error if we try to hot remove a non-existent VHD. - // This is because one of the callers of the function is graphdriver.Put(). Since graphdriver.Get() - // defers the VM start to the first operation, it's possible that nothing have been hot-added - // when Put() is called. To avoid Put returning an error in that case, we simply continue if we - // don't find the vhd attached. - continue - } - - if svm.attachedVHDs[mvd.HostPath] > 1 { - svm.attachedVHDs[mvd.HostPath]-- - continue - } - - // last VHD, so remove from VM and map - if err := svm.config.HotRemoveVhd(mvd.HostPath); err == nil { - delete(svm.attachedVHDs, mvd.HostPath) - } else { - // Take note of the error, but still continue to remove the other VHDs - logrus.Warnf("Failed to hot remove %s: %s", mvd.HostPath, err) - if retErr == nil { - retErr = err - } - } - } - return retErr -} - -func (svm *serviceVM) createExt4VHDX(destFile string, sizeGB uint32, cacheFile string) error { - if err := svm.getStartError(); err != nil { - return err - } - - svm.Lock() - defer svm.Unlock() - return svm.config.CreateExt4Vhdx(destFile, sizeGB, cacheFile) -} - -func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedVirtualDisk) (err error) { - if len(mvds) == 0 { - return fmt.Errorf("createUnionMount: error must have at least 1 layer") - } - - if err = svm.getStartError(); err != nil { - return err - } - - svm.Lock() - defer svm.Unlock() - if _, ok := svm.unionMounts[mountName]; ok { - svm.unionMounts[mountName]++ - return nil - } - - var lowerLayers []string - if mvds[0].ReadOnly { - lowerLayers = append(lowerLayers, mvds[0].ContainerPath) - } - - for i := 1; i < len(mvds); i++ { - lowerLayers = append(lowerLayers, mvds[i].ContainerPath) - } - - logrus.Debugf("Doing the overlay mount with union directory=%s", mountName) - if err = svm.runProcess(fmt.Sprintf("mkdir -p %s", mountName), nil, nil, nil); err != nil { - return err - } - - var cmd string - if len(mvds) == 1 { - // `FROM SCRATCH` case and the only layer. No overlay required. - cmd = fmt.Sprintf("mount %s %s", mvds[0].ContainerPath, mountName) - } else if mvds[0].ReadOnly { - // Readonly overlay - cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s %s", - strings.Join(lowerLayers, ","), - mountName) - } else { - upper := fmt.Sprintf("%s/upper", mvds[0].ContainerPath) - work := fmt.Sprintf("%s/work", mvds[0].ContainerPath) - - if err = svm.runProcess(fmt.Sprintf("mkdir -p %s %s", upper, work), nil, nil, nil); err != nil { - return err - } - - cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s,upperdir=%s,workdir=%s %s", - strings.Join(lowerLayers, ":"), - upper, - work, - mountName) - } - - logrus.Debugf("createUnionMount: Executing mount=%s", cmd) - if err = svm.runProcess(cmd, nil, nil, nil); err != nil { - return err - } - - svm.unionMounts[mountName] = 1 - return nil -} - -func (svm *serviceVM) deleteUnionMount(mountName string, disks ...hcsshim.MappedVirtualDisk) error { - if err := svm.getStartError(); err != nil { - return err - } - - svm.Lock() - defer svm.Unlock() - if _, ok := svm.unionMounts[mountName]; !ok { - return nil - } - - if svm.unionMounts[mountName] > 1 { - svm.unionMounts[mountName]-- - return nil - } - - logrus.Debugf("Removing union mount %s", mountName) - if err := svm.runProcess(fmt.Sprintf("umount %s", mountName), nil, nil, nil); err != nil { - return err - } - - delete(svm.unionMounts, mountName) - return nil -} - -func (svm *serviceVM) runProcess(command string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { - process, err := svm.config.RunProcess(command, stdin, stdout, stderr) - if err != nil { - return err - } - defer process.Close() - - process.WaitTimeout(time.Duration(int(time.Second) * svm.config.UvmTimeoutSeconds)) - exitCode, err := process.ExitCode() - if err != nil { - return err - } - - if exitCode != 0 { - return fmt.Errorf("svm.runProcess: command %s failed with exit code %d", command, exitCode) - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs.go b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs.go deleted file mode 100644 index 29f15fd24..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs.go +++ /dev/null @@ -1,139 +0,0 @@ -// +build windows - -package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow" - -import ( - "bytes" - "fmt" - "io" - "runtime" - "strings" - "sync" - - "github.com/Microsoft/hcsshim" - "github.com/Microsoft/opengcs/service/gcsutils/remotefs" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/containerfs" - "github.com/sirupsen/logrus" -) - -type lcowfs struct { - root string - d *Driver - mappedDisks []hcsshim.MappedVirtualDisk - vmID string - currentSVM *serviceVM - sync.Mutex -} - -var _ containerfs.ContainerFS = &lcowfs{} - -// ErrNotSupported is an error for unsupported operations in the remotefs -var ErrNotSupported = fmt.Errorf("not supported") - -// Functions to implement the ContainerFS interface -func (l *lcowfs) Path() string { - return l.root -} - -func (l *lcowfs) ResolveScopedPath(path string, rawPath bool) (string, error) { - logrus.Debugf("remotefs.resolvescopedpath inputs: %s %s ", path, l.root) - - arg1 := l.Join(l.root, path) - if !rawPath { - // The l.Join("/", path) will make path an absolute path and then clean it - // so if path = ../../X, it will become /X. - arg1 = l.Join(l.root, l.Join("/", path)) - } - arg2 := l.root - - output := &bytes.Buffer{} - if err := l.runRemoteFSProcess(nil, output, remotefs.ResolvePathCmd, arg1, arg2); err != nil { - return "", err - } - - logrus.Debugf("remotefs.resolvescopedpath success. Output: %s\n", output.String()) - return output.String(), nil -} - -func (l *lcowfs) OS() string { - return "linux" -} - -func (l *lcowfs) Architecture() string { - return runtime.GOARCH -} - -// Other functions that are used by docker like the daemon Archiver/Extractor -func (l *lcowfs) ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error { - logrus.Debugf("remotefs.ExtractArchve inputs: %s %+v", dst, opts) - - tarBuf := &bytes.Buffer{} - if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil { - return fmt.Errorf("failed to marshall tar opts: %s", err) - } - - input := io.MultiReader(tarBuf, src) - if err := l.runRemoteFSProcess(input, nil, remotefs.ExtractArchiveCmd, dst); err != nil { - return fmt.Errorf("failed to extract archive to %s: %s", dst, err) - } - return nil -} - -func (l *lcowfs) ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) { - logrus.Debugf("remotefs.ArchivePath: %s %+v", src, opts) - - tarBuf := &bytes.Buffer{} - if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil { - return nil, fmt.Errorf("failed to marshall tar opts: %s", err) - } - - r, w := io.Pipe() - go func() { - defer w.Close() - if err := l.runRemoteFSProcess(tarBuf, w, remotefs.ArchivePathCmd, src); err != nil { - logrus.Debugf("REMOTEFS: Failed to extract archive: %s %+v %s", src, opts, err) - } - }() - return r, nil -} - -// Helper functions -func (l *lcowfs) startVM() error { - l.Lock() - defer l.Unlock() - if l.currentSVM != nil { - return nil - } - - svm, err := l.d.startServiceVMIfNotRunning(l.vmID, l.mappedDisks, fmt.Sprintf("lcowfs.startVM")) - if err != nil { - return err - } - - if err = svm.createUnionMount(l.root, l.mappedDisks...); err != nil { - return err - } - l.currentSVM = svm - return nil -} - -func (l *lcowfs) runRemoteFSProcess(stdin io.Reader, stdout io.Writer, args ...string) error { - if err := l.startVM(); err != nil { - return err - } - - // Append remotefs prefix and setup as a command line string - cmd := fmt.Sprintf("%s %s", remotefs.RemotefsCmd, strings.Join(args, " ")) - stderr := &bytes.Buffer{} - if err := l.currentSVM.runProcess(cmd, stdin, stdout, stderr); err != nil { - return err - } - - eerr, err := remotefs.ReadError(stderr) - if eerr != nil { - // Process returned an error so return that. - return remotefs.ExportedToError(eerr) - } - return err -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_file.go b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_file.go deleted file mode 100644 index 1f00bfff4..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_file.go +++ /dev/null @@ -1,211 +0,0 @@ -// +build windows - -package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow" - -import ( - "bytes" - "encoding/binary" - "encoding/json" - "fmt" - "io" - "os" - "strconv" - - "github.com/Microsoft/hcsshim" - "github.com/Microsoft/opengcs/service/gcsutils/remotefs" - "github.com/containerd/continuity/driver" -) - -type lcowfile struct { - process hcsshim.Process - stdin io.WriteCloser - stdout io.ReadCloser - stderr io.ReadCloser - fs *lcowfs - guestPath string -} - -func (l *lcowfs) Open(path string) (driver.File, error) { - return l.OpenFile(path, os.O_RDONLY, 0) -} - -func (l *lcowfs) OpenFile(path string, flag int, perm os.FileMode) (_ driver.File, err error) { - flagStr := strconv.FormatInt(int64(flag), 10) - permStr := strconv.FormatUint(uint64(perm), 8) - - commandLine := fmt.Sprintf("%s %s %s %s %s", remotefs.RemotefsCmd, remotefs.OpenFileCmd, path, flagStr, permStr) - env := make(map[string]string) - env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:" - processConfig := &hcsshim.ProcessConfig{ - EmulateConsole: false, - CreateStdInPipe: true, - CreateStdOutPipe: true, - CreateStdErrPipe: true, - CreateInUtilityVm: true, - WorkingDirectory: "/bin", - Environment: env, - CommandLine: commandLine, - } - - process, err := l.currentSVM.config.Uvm.CreateProcess(processConfig) - if err != nil { - return nil, fmt.Errorf("failed to open file %s: %s", path, err) - } - - stdin, stdout, stderr, err := process.Stdio() - if err != nil { - process.Kill() - process.Close() - return nil, fmt.Errorf("failed to open file pipes %s: %s", path, err) - } - - lf := &lcowfile{ - process: process, - stdin: stdin, - stdout: stdout, - stderr: stderr, - fs: l, - guestPath: path, - } - - if _, err := lf.getResponse(); err != nil { - return nil, fmt.Errorf("failed to open file %s: %s", path, err) - } - return lf, nil -} - -func (l *lcowfile) Read(b []byte) (int, error) { - hdr := &remotefs.FileHeader{ - Cmd: remotefs.Read, - Size: uint64(len(b)), - } - - if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil { - return 0, err - } - - buf, err := l.getResponse() - if err != nil { - return 0, err - } - - n := copy(b, buf) - return n, nil -} - -func (l *lcowfile) Write(b []byte) (int, error) { - hdr := &remotefs.FileHeader{ - Cmd: remotefs.Write, - Size: uint64(len(b)), - } - - if err := remotefs.WriteFileHeader(l.stdin, hdr, b); err != nil { - return 0, err - } - - _, err := l.getResponse() - if err != nil { - return 0, err - } - - return len(b), nil -} - -func (l *lcowfile) Seek(offset int64, whence int) (int64, error) { - seekHdr := &remotefs.SeekHeader{ - Offset: offset, - Whence: int32(whence), - } - - buf := &bytes.Buffer{} - if err := binary.Write(buf, binary.BigEndian, seekHdr); err != nil { - return 0, err - } - - hdr := &remotefs.FileHeader{ - Cmd: remotefs.Write, - Size: uint64(buf.Len()), - } - if err := remotefs.WriteFileHeader(l.stdin, hdr, buf.Bytes()); err != nil { - return 0, err - } - - resBuf, err := l.getResponse() - if err != nil { - return 0, err - } - - var res int64 - if err := binary.Read(bytes.NewBuffer(resBuf), binary.BigEndian, &res); err != nil { - return 0, err - } - return res, nil -} - -func (l *lcowfile) Close() error { - hdr := &remotefs.FileHeader{ - Cmd: remotefs.Close, - Size: 0, - } - - if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil { - return err - } - - _, err := l.getResponse() - return err -} - -func (l *lcowfile) Readdir(n int) ([]os.FileInfo, error) { - nStr := strconv.FormatInt(int64(n), 10) - - // Unlike the other File functions, this one can just be run without maintaining state, - // so just do the normal runRemoteFSProcess way. - buf := &bytes.Buffer{} - if err := l.fs.runRemoteFSProcess(nil, buf, remotefs.ReadDirCmd, l.guestPath, nStr); err != nil { - return nil, err - } - - var info []remotefs.FileInfo - if err := json.Unmarshal(buf.Bytes(), &info); err != nil { - return nil, err - } - - osInfo := make([]os.FileInfo, len(info)) - for i := range info { - osInfo[i] = &info[i] - } - return osInfo, nil -} - -func (l *lcowfile) getResponse() ([]byte, error) { - hdr, err := remotefs.ReadFileHeader(l.stdout) - if err != nil { - return nil, err - } - - if hdr.Cmd != remotefs.CmdOK { - // Something went wrong during the openfile in the server. - // Parse stderr and return that as an error - eerr, err := remotefs.ReadError(l.stderr) - if eerr != nil { - return nil, remotefs.ExportedToError(eerr) - } - - // Maybe the parsing went wrong? - if err != nil { - return nil, err - } - - // At this point, we know something went wrong in the remotefs program, but - // we we don't know why. - return nil, fmt.Errorf("unknown error") - } - - // Successful command, we might have some data to read (for Read + Seek) - buf := make([]byte, hdr.Size, hdr.Size) - if _, err := io.ReadFull(l.stdout, buf); err != nil { - return nil, err - } - return buf, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_filedriver.go b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_filedriver.go deleted file mode 100644 index f335868af..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_filedriver.go +++ /dev/null @@ -1,123 +0,0 @@ -// +build windows - -package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow" - -import ( - "bytes" - "encoding/json" - "os" - "strconv" - - "github.com/Microsoft/opengcs/service/gcsutils/remotefs" - - "github.com/containerd/continuity/driver" - "github.com/sirupsen/logrus" -) - -var _ driver.Driver = &lcowfs{} - -func (l *lcowfs) Readlink(p string) (string, error) { - logrus.Debugf("removefs.readlink args: %s", p) - - result := &bytes.Buffer{} - if err := l.runRemoteFSProcess(nil, result, remotefs.ReadlinkCmd, p); err != nil { - return "", err - } - return result.String(), nil -} - -func (l *lcowfs) Mkdir(path string, mode os.FileMode) error { - return l.mkdir(path, mode, remotefs.MkdirCmd) -} - -func (l *lcowfs) MkdirAll(path string, mode os.FileMode) error { - return l.mkdir(path, mode, remotefs.MkdirAllCmd) -} - -func (l *lcowfs) mkdir(path string, mode os.FileMode, cmd string) error { - modeStr := strconv.FormatUint(uint64(mode), 8) - logrus.Debugf("remotefs.%s args: %s %s", cmd, path, modeStr) - return l.runRemoteFSProcess(nil, nil, cmd, path, modeStr) -} - -func (l *lcowfs) Remove(path string) error { - return l.remove(path, remotefs.RemoveCmd) -} - -func (l *lcowfs) RemoveAll(path string) error { - return l.remove(path, remotefs.RemoveAllCmd) -} - -func (l *lcowfs) remove(path string, cmd string) error { - logrus.Debugf("remotefs.%s args: %s", cmd, path) - return l.runRemoteFSProcess(nil, nil, cmd, path) -} - -func (l *lcowfs) Link(oldname, newname string) error { - return l.link(oldname, newname, remotefs.LinkCmd) -} - -func (l *lcowfs) Symlink(oldname, newname string) error { - return l.link(oldname, newname, remotefs.SymlinkCmd) -} - -func (l *lcowfs) link(oldname, newname, cmd string) error { - logrus.Debugf("remotefs.%s args: %s %s", cmd, oldname, newname) - return l.runRemoteFSProcess(nil, nil, cmd, oldname, newname) -} - -func (l *lcowfs) Lchown(name string, uid, gid int64) error { - uidStr := strconv.FormatInt(uid, 10) - gidStr := strconv.FormatInt(gid, 10) - - logrus.Debugf("remotefs.lchown args: %s %s %s", name, uidStr, gidStr) - return l.runRemoteFSProcess(nil, nil, remotefs.LchownCmd, name, uidStr, gidStr) -} - -// Lchmod changes the mode of an file not following symlinks. -func (l *lcowfs) Lchmod(path string, mode os.FileMode) error { - modeStr := strconv.FormatUint(uint64(mode), 8) - logrus.Debugf("remotefs.lchmod args: %s %s", path, modeStr) - return l.runRemoteFSProcess(nil, nil, remotefs.LchmodCmd, path, modeStr) -} - -func (l *lcowfs) Mknod(path string, mode os.FileMode, major, minor int) error { - modeStr := strconv.FormatUint(uint64(mode), 8) - majorStr := strconv.FormatUint(uint64(major), 10) - minorStr := strconv.FormatUint(uint64(minor), 10) - - logrus.Debugf("remotefs.mknod args: %s %s %s %s", path, modeStr, majorStr, minorStr) - return l.runRemoteFSProcess(nil, nil, remotefs.MknodCmd, path, modeStr, majorStr, minorStr) -} - -func (l *lcowfs) Mkfifo(path string, mode os.FileMode) error { - modeStr := strconv.FormatUint(uint64(mode), 8) - logrus.Debugf("remotefs.mkfifo args: %s %s", path, modeStr) - return l.runRemoteFSProcess(nil, nil, remotefs.MkfifoCmd, path, modeStr) -} - -func (l *lcowfs) Stat(p string) (os.FileInfo, error) { - return l.stat(p, remotefs.StatCmd) -} - -func (l *lcowfs) Lstat(p string) (os.FileInfo, error) { - return l.stat(p, remotefs.LstatCmd) -} - -func (l *lcowfs) stat(path string, cmd string) (os.FileInfo, error) { - logrus.Debugf("remotefs.stat inputs: %s %s", cmd, path) - - output := &bytes.Buffer{} - err := l.runRemoteFSProcess(nil, output, cmd, path) - if err != nil { - return nil, err - } - - var fi remotefs.FileInfo - if err := json.Unmarshal(output.Bytes(), &fi); err != nil { - return nil, err - } - - logrus.Debugf("remotefs.stat success. got: %v\n", fi) - return &fi, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_pathdriver.go b/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_pathdriver.go deleted file mode 100644 index 74895b046..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/lcow/remotefs_pathdriver.go +++ /dev/null @@ -1,212 +0,0 @@ -// +build windows - -package lcow // import "github.com/docker/docker/daemon/graphdriver/lcow" - -import ( - "errors" - "os" - pathpkg "path" - "path/filepath" - "sort" - "strings" - - "github.com/containerd/continuity/pathdriver" -) - -var _ pathdriver.PathDriver = &lcowfs{} - -// Continuity Path functions can be done locally -func (l *lcowfs) Join(path ...string) string { - return pathpkg.Join(path...) -} - -func (l *lcowfs) IsAbs(path string) bool { - return pathpkg.IsAbs(path) -} - -func sameWord(a, b string) bool { - return a == b -} - -// Implementation taken from the Go standard library -func (l *lcowfs) Rel(basepath, targpath string) (string, error) { - baseVol := "" - targVol := "" - base := l.Clean(basepath) - targ := l.Clean(targpath) - if sameWord(targ, base) { - return ".", nil - } - base = base[len(baseVol):] - targ = targ[len(targVol):] - if base == "." { - base = "" - } - // Can't use IsAbs - `\a` and `a` are both relative in Windows. - baseSlashed := len(base) > 0 && base[0] == l.Separator() - targSlashed := len(targ) > 0 && targ[0] == l.Separator() - if baseSlashed != targSlashed || !sameWord(baseVol, targVol) { - return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath) - } - // Position base[b0:bi] and targ[t0:ti] at the first differing elements. - bl := len(base) - tl := len(targ) - var b0, bi, t0, ti int - for { - for bi < bl && base[bi] != l.Separator() { - bi++ - } - for ti < tl && targ[ti] != l.Separator() { - ti++ - } - if !sameWord(targ[t0:ti], base[b0:bi]) { - break - } - if bi < bl { - bi++ - } - if ti < tl { - ti++ - } - b0 = bi - t0 = ti - } - if base[b0:bi] == ".." { - return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath) - } - if b0 != bl { - // Base elements left. Must go up before going down. - seps := strings.Count(base[b0:bl], string(l.Separator())) - size := 2 + seps*3 - if tl != t0 { - size += 1 + tl - t0 - } - buf := make([]byte, size) - n := copy(buf, "..") - for i := 0; i < seps; i++ { - buf[n] = l.Separator() - copy(buf[n+1:], "..") - n += 3 - } - if t0 != tl { - buf[n] = l.Separator() - copy(buf[n+1:], targ[t0:]) - } - return string(buf), nil - } - return targ[t0:], nil -} - -func (l *lcowfs) Base(path string) string { - return pathpkg.Base(path) -} - -func (l *lcowfs) Dir(path string) string { - return pathpkg.Dir(path) -} - -func (l *lcowfs) Clean(path string) string { - return pathpkg.Clean(path) -} - -func (l *lcowfs) Split(path string) (dir, file string) { - return pathpkg.Split(path) -} - -func (l *lcowfs) Separator() byte { - return '/' -} - -func (l *lcowfs) Abs(path string) (string, error) { - // Abs is supposed to add the current working directory, which is meaningless in lcow. - // So, return an error. - return "", ErrNotSupported -} - -// Implementation taken from the Go standard library -func (l *lcowfs) Walk(root string, walkFn filepath.WalkFunc) error { - info, err := l.Lstat(root) - if err != nil { - err = walkFn(root, nil, err) - } else { - err = l.walk(root, info, walkFn) - } - if err == filepath.SkipDir { - return nil - } - return err -} - -// walk recursively descends path, calling w. -func (l *lcowfs) walk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error { - err := walkFn(path, info, nil) - if err != nil { - if info.IsDir() && err == filepath.SkipDir { - return nil - } - return err - } - - if !info.IsDir() { - return nil - } - - names, err := l.readDirNames(path) - if err != nil { - return walkFn(path, info, err) - } - - for _, name := range names { - filename := l.Join(path, name) - fileInfo, err := l.Lstat(filename) - if err != nil { - if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { - return err - } - } else { - err = l.walk(filename, fileInfo, walkFn) - if err != nil { - if !fileInfo.IsDir() || err != filepath.SkipDir { - return err - } - } - } - } - return nil -} - -// readDirNames reads the directory named by dirname and returns -// a sorted list of directory entries. -func (l *lcowfs) readDirNames(dirname string) ([]string, error) { - f, err := l.Open(dirname) - if err != nil { - return nil, err - } - files, err := f.Readdir(-1) - f.Close() - if err != nil { - return nil, err - } - - names := make([]string, len(files), len(files)) - for i := range files { - names[i] = files[i].Name() - } - - sort.Strings(names) - return names, nil -} - -// Note that Go's filepath.FromSlash/ToSlash convert between OS paths and '/'. Since the path separator -// for LCOW (and Unix) is '/', they are no-ops. -func (l *lcowfs) FromSlash(path string) string { - return path -} - -func (l *lcowfs) ToSlash(path string) string { - return path -} - -func (l *lcowfs) Match(pattern, name string) (matched bool, err error) { - return pathpkg.Match(pattern, name) -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go deleted file mode 100644 index 0c2167f08..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go +++ /dev/null @@ -1,524 +0,0 @@ -// +build linux - -package overlay // import "github.com/docker/docker/daemon/graphdriver/overlay" - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strconv" - "strings" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/daemon/graphdriver/copy" - "github.com/docker/docker/daemon/graphdriver/overlayutils" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/fsutils" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/locker" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/system" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// This is a small wrapper over the NaiveDiffWriter that lets us have a custom -// implementation of ApplyDiff() - -var ( - // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. - ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") - backingFs = "" -) - -// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. -type ApplyDiffProtoDriver interface { - graphdriver.ProtoDriver - // ApplyDiff writes the diff to the archive for the given id and parent id. - // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. - ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) -} - -type naiveDiffDriverWithApply struct { - graphdriver.Driver - applyDiff ApplyDiffProtoDriver -} - -// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. -func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { - return &naiveDiffDriverWithApply{ - Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), - applyDiff: driver, - } -} - -// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. -func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { - b, err := d.applyDiff.ApplyDiff(id, parent, diff) - if err == ErrApplyDiffFallback { - return d.Driver.ApplyDiff(id, parent, diff) - } - return b, err -} - -// This backend uses the overlay union filesystem for containers -// plus hard link file sharing for images. - -// Each container/image can have a "root" subdirectory which is a plain -// filesystem hierarchy, or they can use overlay. - -// If they use overlay there is a "upper" directory and a "lower-id" -// file, as well as "merged" and "work" directories. The "upper" -// directory has the upper layer of the overlay, and "lower-id" contains -// the id of the parent whose "root" directory shall be used as the lower -// layer in the overlay. The overlay itself is mounted in the "merged" -// directory, and the "work" dir is needed for overlay to work. - -// When an overlay layer is created there are two cases, either the -// parent has a "root" dir, then we start out with an empty "upper" -// directory overlaid on the parents root. This is typically the -// case with the init layer of a container which is based on an image. -// If there is no "root" in the parent, we inherit the lower-id from -// the parent and start by making a copy in the parent's "upper" dir. -// This is typically the case for a container layer which copies -// its parent -init upper layer. - -// Additionally we also have a custom implementation of ApplyLayer -// which makes a recursive copy of the parent "root" layer using -// hardlinks to share file data, and then applies the layer on top -// of that. This means all child images share file (but not directory) -// data with the parent. - -type overlayOptions struct{} - -// Driver contains information about the home directory and the list of active mounts that are created using this driver. -type Driver struct { - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter - supportsDType bool - locker *locker.Locker -} - -func init() { - graphdriver.Register("overlay", Init) -} - -// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. -// If overlay filesystem is not supported on the host, the error -// graphdriver.ErrNotSupported is returned. -// If an overlay filesystem is not supported over an existing filesystem then -// error graphdriver.ErrIncompatibleFS is returned. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - _, err := parseOptions(options) - if err != nil { - return nil, err - } - - if err := supportsOverlay(); err != nil { - return nil, graphdriver.ErrNotSupported - } - - // Perform feature detection on /var/lib/docker/overlay if it's an existing directory. - // This covers situations where /var/lib/docker/overlay is a mount, and on a different - // filesystem than /var/lib/docker. - // If the path does not exist, fall back to using /var/lib/docker for feature detection. - testdir := home - if _, err := os.Stat(testdir); os.IsNotExist(err) { - testdir = filepath.Dir(testdir) - } - - fsMagic, err := graphdriver.GetFSMagic(testdir) - if err != nil { - return nil, err - } - if fsName, ok := graphdriver.FsNames[fsMagic]; ok { - backingFs = fsName - } - - switch fsMagic { - case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs, graphdriver.FsMagicNfsFs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs: - logrus.WithField("storage-driver", "overlay").Errorf("'overlay' is not supported over %s", backingFs) - return nil, graphdriver.ErrIncompatibleFS - } - - supportsDType, err := fsutils.SupportsDType(testdir) - if err != nil { - return nil, err - } - if !supportsDType { - if !graphdriver.IsInitialized(home) { - return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs) - } - // allow running without d_type only for existing setups (#27443) - logrus.WithField("storage-driver", "overlay").Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs)) - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - // Create the driver home dir - if err := idtools.MkdirAllAndChown(home, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { - return nil, err - } - - d := &Driver{ - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), - supportsDType: supportsDType, - locker: locker.New(), - } - - return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil -} - -func parseOptions(options []string) (*overlayOptions, error) { - o := &overlayOptions{} - for _, option := range options { - key, _, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return nil, err - } - key = strings.ToLower(key) - switch key { - default: - return nil, fmt.Errorf("overlay: unknown option %s", key) - } - } - return o, nil -} - -func supportsOverlay() error { - // We can try to modprobe overlay first before looking at - // proc/filesystems for when overlay is supported - exec.Command("modprobe", "overlay").Run() - - f, err := os.Open("/proc/filesystems") - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if s.Text() == "nodev\toverlay" { - return nil - } - } - logrus.WithField("storage-driver", "overlay").Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") - return graphdriver.ErrNotSupported -} - -func (d *Driver) String() string { - return "overlay" -} - -// Status returns current driver information in a two dimensional string array. -// Output contains "Backing Filesystem" used in this implementation. -func (d *Driver) Status() [][2]string { - return [][2]string{ - {"Backing Filesystem", backingFs}, - {"Supports d_type", strconv.FormatBool(d.supportsDType)}, - } -} - -// GetMetadata returns metadata about the overlay driver such as root, -// LowerDir, UpperDir, WorkDir and MergeDir used to store data. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { - return nil, err - } - - metadata := make(map[string]string) - - // If id has a root, it is an image - rootDir := path.Join(dir, "root") - if _, err := os.Stat(rootDir); err == nil { - metadata["RootDir"] = rootDir - return metadata, nil - } - - lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) - if err != nil { - return nil, err - } - - metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") - metadata["UpperDir"] = path.Join(dir, "upper") - metadata["WorkDir"] = path.Join(dir, "work") - metadata["MergedDir"] = path.Join(dir, "merged") - - return metadata, nil -} - -// Cleanup any state created by overlay which should be cleaned when daemon -// is being shutdown. For now, we just have to unmount the bind mounted -// we had created. -func (d *Driver) Cleanup() error { - return mount.RecursiveUnmount(d.home) -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) -} - -// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. -// The parent filesystem is used to configure these directories for the overlay. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { - - if opts != nil && len(opts.StorageOpt) != 0 { - return fmt.Errorf("--storage-opt is not supported for overlay") - } - - dir := d.dir(id) - - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return err - } - root := idtools.IDPair{UID: rootUID, GID: rootGID} - - if err := idtools.MkdirAllAndChown(path.Dir(dir), 0700, root); err != nil { - return err - } - if err := idtools.MkdirAndChown(dir, 0700, root); err != nil { - return err - } - - defer func() { - // Clean up on failure - if retErr != nil { - os.RemoveAll(dir) - } - }() - - // Toplevel images are just a "root" dir - if parent == "" { - return idtools.MkdirAndChown(path.Join(dir, "root"), 0755, root) - } - - parentDir := d.dir(parent) - - // Ensure parent exists - if _, err := os.Lstat(parentDir); err != nil { - return err - } - - // If parent has a root, just do an overlay to it - parentRoot := path.Join(parentDir, "root") - - if s, err := os.Lstat(parentRoot); err == nil { - if err := idtools.MkdirAndChown(path.Join(dir, "upper"), s.Mode(), root); err != nil { - return err - } - if err := idtools.MkdirAndChown(path.Join(dir, "work"), 0700, root); err != nil { - return err - } - return ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666) - } - - // Otherwise, copy the upper and the lower-id from the parent - - lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) - if err != nil { - return err - } - - if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { - return err - } - - parentUpperDir := path.Join(parentDir, "upper") - s, err := os.Lstat(parentUpperDir) - if err != nil { - return err - } - - upperDir := path.Join(dir, "upper") - if err := idtools.MkdirAndChown(upperDir, s.Mode(), root); err != nil { - return err - } - if err := idtools.MkdirAndChown(path.Join(dir, "work"), 0700, root); err != nil { - return err - } - - return copy.DirCopy(parentUpperDir, upperDir, copy.Content, true) -} - -func (d *Driver) dir(id string) string { - return path.Join(d.home, id) -} - -// Remove cleans the directories that are created for this id. -func (d *Driver) Remove(id string) error { - if id == "" { - return fmt.Errorf("refusing to remove the directories: id is empty") - } - d.locker.Lock(id) - defer d.locker.Unlock(id) - return system.EnsureRemoveAll(d.dir(id)) -} - -// Get creates and mounts the required file system for the given id and returns the mount path. -func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, err error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { - return nil, err - } - // If id has a root, just return it - rootDir := path.Join(dir, "root") - if _, err := os.Stat(rootDir); err == nil { - return containerfs.NewLocalContainerFS(rootDir), nil - } - - mergedDir := path.Join(dir, "merged") - if count := d.ctr.Increment(mergedDir); count > 1 { - return containerfs.NewLocalContainerFS(mergedDir), nil - } - defer func() { - if err != nil { - if c := d.ctr.Decrement(mergedDir); c <= 0 { - if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { - logrus.WithField("storage-driver", "overlay").Debugf("Failed to unmount %s: %v: %v", id, mntErr, err) - } - // Cleanup the created merged directory; see the comment in Put's rmdir - if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { - logrus.WithField("storage-driver", "overlay").Warnf("Failed to remove %s: %v: %v", id, rmErr, err) - } - } - } - }() - lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) - if err != nil { - return nil, err - } - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return nil, err - } - if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { - return nil, err - } - var ( - lowerDir = path.Join(d.dir(string(lowerID)), "root") - upperDir = path.Join(dir, "upper") - workDir = path.Join(dir, "work") - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) - ) - if err := unix.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { - return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) - } - // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a - // user namespace requires this to move a directory from lower to upper. - if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { - return nil, err - } - return containerfs.NewLocalContainerFS(mergedDir), nil -} - -// Put unmounts the mount path created for the give id. -// It also removes the 'merged' directory to force the kernel to unmount the -// overlay mount in other namespaces. -func (d *Driver) Put(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - // If id has a root, just return - if _, err := os.Stat(path.Join(d.dir(id), "root")); err == nil { - return nil - } - mountpoint := path.Join(d.dir(id), "merged") - logger := logrus.WithField("storage-driver", "overlay") - if count := d.ctr.Decrement(mountpoint); count > 0 { - return nil - } - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { - logger.Debugf("Failed to unmount %s overlay: %v", id, err) - } - - // Remove the mountpoint here. Removing the mountpoint (in newer kernels) - // will cause all other instances of this mount in other mount namespaces - // to be unmounted. This is necessary to avoid cases where an overlay mount - // that is present in another namespace will cause subsequent mounts - // operations to fail with ebusy. We ignore any errors here because this may - // fail on older kernels which don't have - // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. - if err := unix.Rmdir(mountpoint); err != nil { - logger.Debugf("Failed to remove %s overlay: %v", id, err) - } - return nil -} - -// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return an ErrApplyDiffFallback error. -func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { - dir := d.dir(id) - - if parent == "" { - return 0, ErrApplyDiffFallback - } - - parentRootDir := path.Join(d.dir(parent), "root") - if _, err := os.Stat(parentRootDir); err != nil { - return 0, ErrApplyDiffFallback - } - - // We now know there is a parent, and it has a "root" directory containing - // the full root filesystem. We can just hardlink it and apply the - // layer. This relies on two things: - // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container - // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) - // These are all currently true and are not expected to break - - tmpRootDir, err := ioutil.TempDir(dir, "tmproot") - if err != nil { - return 0, err - } - defer func() { - if err != nil { - os.RemoveAll(tmpRootDir) - } else { - os.RemoveAll(path.Join(dir, "upper")) - os.RemoveAll(path.Join(dir, "work")) - os.RemoveAll(path.Join(dir, "merged")) - os.RemoveAll(path.Join(dir, "lower-id")) - } - }() - - if err = copy.DirCopy(parentRootDir, tmpRootDir, copy.Hardlink, true); err != nil { - return 0, err - } - - options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} - if size, err = graphdriver.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { - return 0, err - } - - rootDir := path.Join(dir, "root") - if err := os.Rename(tmpRootDir, rootDir); err != nil { - return 0, err - } - - return -} - -// Exists checks to see if the id is already mounted. -func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) - return err == nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go deleted file mode 100644 index 8fc06ffec..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package overlay // import "github.com/docker/docker/daemon/graphdriver/overlay" diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go deleted file mode 100644 index d6ee42f47..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/check.go +++ /dev/null @@ -1,134 +0,0 @@ -// +build linux - -package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "syscall" - - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// doesSupportNativeDiff checks whether the filesystem has a bug -// which copies up the opaque flag when copying up an opaque -// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR. -// When these exist naive diff should be used. -func doesSupportNativeDiff(d string) error { - td, err := ioutil.TempDir(d, "opaque-bug-check") - if err != nil { - return err - } - defer func() { - if err := os.RemoveAll(td); err != nil { - logrus.WithField("storage-driver", "overlay2").Warnf("Failed to remove check directory %v: %v", td, err) - } - }() - - // Make directories l1/d, l1/d1, l2/d, l3, work, merged - if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { - return err - } - if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil { - return err - } - if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { - return err - } - if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil { - return err - } - if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { - return err - } - if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { - return err - } - - // Mark l2/d as opaque - if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { - return errors.Wrap(err, "failed to set opaque flag on middle layer") - } - - opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) - if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { - return errors.Wrap(err, "failed to mount overlay") - } - defer func() { - if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { - logrus.WithField("storage-driver", "overlay2").Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) - } - }() - - // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" - if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { - return errors.Wrap(err, "failed to write to merged directory") - } - - // Check l3/d does not have opaque flag - xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), "trusted.overlay.opaque") - if err != nil { - return errors.Wrap(err, "failed to read opaque flag on upper layer") - } - if string(xattrOpaque) == "y" { - return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") - } - - // rename "d1" to "d2" - if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil { - // if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled - if err.(*os.LinkError).Err == syscall.EXDEV { - return nil - } - return errors.Wrap(err, "failed to rename dir in merged directory") - } - // get the xattr of "d2" - xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), "trusted.overlay.redirect") - if err != nil { - return errors.Wrap(err, "failed to read redirect flag on upper layer") - } - - if string(xattrRedirect) == "d1" { - return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled") - } - - return nil -} - -// supportsMultipleLowerDir checks if the system supports multiple lowerdirs, -// which is required for the overlay2 driver. On 4.x kernels, multiple lowerdirs -// are always available (so this check isn't needed), and backported to RHEL and -// CentOS 3.x kernels (3.10.0-693.el7.x86_64 and up). This function is to detect -// support on those kernels, without doing a kernel version compare. -func supportsMultipleLowerDir(d string) error { - td, err := ioutil.TempDir(d, "multiple-lowerdir-check") - if err != nil { - return err - } - defer func() { - if err := os.RemoveAll(td); err != nil { - logrus.WithField("storage-driver", "overlay2").Warnf("Failed to remove check directory %v: %v", td, err) - } - }() - - for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} { - if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil { - return err - } - } - - opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "lower2"), path.Join(td, "lower1"), path.Join(td, "upper"), path.Join(td, "work")) - if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { - return errors.Wrap(err, "failed to mount overlay") - } - if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { - logrus.WithField("storage-driver", "overlay2").Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go deleted file mode 100644 index da409fc81..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/mount.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build linux - -package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "os" - "runtime" - - "github.com/docker/docker/pkg/reexec" - "golang.org/x/sys/unix" -) - -func init() { - reexec.Register("docker-mountfrom", mountFromMain) -} - -func fatal(err error) { - fmt.Fprint(os.Stderr, err) - os.Exit(1) -} - -type mountOptions struct { - Device string - Target string - Type string - Label string - Flag uint32 -} - -func mountFrom(dir, device, target, mType string, flags uintptr, label string) error { - options := &mountOptions{ - Device: device, - Target: target, - Type: mType, - Flag: uint32(flags), - Label: label, - } - - cmd := reexec.Command("docker-mountfrom", dir) - w, err := cmd.StdinPipe() - if err != nil { - return fmt.Errorf("mountfrom error on pipe creation: %v", err) - } - - output := bytes.NewBuffer(nil) - cmd.Stdout = output - cmd.Stderr = output - if err := cmd.Start(); err != nil { - w.Close() - return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) - } - //write the options to the pipe for the untar exec to read - if err := json.NewEncoder(w).Encode(options); err != nil { - w.Close() - return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) - } - w.Close() - - if err := cmd.Wait(); err != nil { - return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output) - } - return nil -} - -// mountfromMain is the entry-point for docker-mountfrom on re-exec. -func mountFromMain() { - runtime.LockOSThread() - flag.Parse() - - var options *mountOptions - - if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { - fatal(err) - } - - if err := os.Chdir(flag.Arg(0)); err != nil { - fatal(err) - } - - if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { - fatal(err) - } - - os.Exit(0) -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go deleted file mode 100644 index 5108a2c05..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay.go +++ /dev/null @@ -1,769 +0,0 @@ -// +build linux - -package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" - -import ( - "bufio" - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/daemon/graphdriver/overlayutils" - "github.com/docker/docker/daemon/graphdriver/quota" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/directory" - "github.com/docker/docker/pkg/fsutils" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/locker" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/system" - "github.com/docker/go-units" - rsystem "github.com/opencontainers/runc/libcontainer/system" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -var ( - // untar defines the untar method - untar = chrootarchive.UntarUncompressed -) - -// This backend uses the overlay union filesystem for containers -// with diff directories for each layer. - -// This version of the overlay driver requires at least kernel -// 4.0.0 in order to support mounting multiple diff directories. - -// Each container/image has at least a "diff" directory and "link" file. -// If there is also a "lower" file when there are diff layers -// below as well as "merged" and "work" directories. The "diff" directory -// has the upper layer of the overlay and is used to capture any -// changes to the layer. The "lower" file contains all the lower layer -// mounts separated by ":" and ordered from uppermost to lowermost -// layers. The overlay itself is mounted in the "merged" directory, -// and the "work" dir is needed for overlay to work. - -// The "link" file for each layer contains a unique string for the layer. -// Under the "l" directory at the root there will be a symbolic link -// with that unique string pointing the "diff" directory for the layer. -// The symbolic links are used to reference lower layers in the "lower" -// file and on mount. The links are used to shorten the total length -// of a layer reference without requiring changes to the layer identifier -// or root directory. Mounts are always done relative to root and -// referencing the symbolic links in order to ensure the number of -// lower directories can fit in a single page for making the mount -// syscall. A hard upper limit of 128 lower layers is enforced to ensure -// that mounts do not fail due to length. - -const ( - driverName = "overlay2" - linkDir = "l" - lowerFile = "lower" - maxDepth = 128 - - // idLength represents the number of random characters - // which can be used to create the unique link identifier - // for every layer. If this value is too long then the - // page size limit for the mount command may be exceeded. - // The idLength should be selected such that following equation - // is true (512 is a buffer for label metadata). - // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) - idLength = 26 -) - -type overlayOptions struct { - overrideKernelCheck bool - quota quota.Quota -} - -// Driver contains information about the home directory and the list of active -// mounts that are created using this driver. -type Driver struct { - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter - quotaCtl *quota.Control - options overlayOptions - naiveDiff graphdriver.DiffDriver - supportsDType bool - locker *locker.Locker -} - -var ( - backingFs = "" - projectQuotaSupported = false - - useNaiveDiffLock sync.Once - useNaiveDiffOnly bool -) - -func init() { - graphdriver.Register(driverName, Init) -} - -// Init returns the native diff driver for overlay filesystem. -// If overlay filesystem is not supported on the host, the error -// graphdriver.ErrNotSupported is returned. -// If an overlay filesystem is not supported over an existing filesystem then -// the error graphdriver.ErrIncompatibleFS is returned. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - opts, err := parseOptions(options) - if err != nil { - return nil, err - } - - if err := supportsOverlay(); err != nil { - return nil, graphdriver.ErrNotSupported - } - - // require kernel 4.0.0 to ensure multiple lower dirs are supported - v, err := kernel.GetKernelVersion() - if err != nil { - return nil, err - } - - // Perform feature detection on /var/lib/docker/overlay2 if it's an existing directory. - // This covers situations where /var/lib/docker/overlay2 is a mount, and on a different - // filesystem than /var/lib/docker. - // If the path does not exist, fall back to using /var/lib/docker for feature detection. - testdir := home - if _, err := os.Stat(testdir); os.IsNotExist(err) { - testdir = filepath.Dir(testdir) - } - - fsMagic, err := graphdriver.GetFSMagic(testdir) - if err != nil { - return nil, err - } - if fsName, ok := graphdriver.FsNames[fsMagic]; ok { - backingFs = fsName - } - - logger := logrus.WithField("storage-driver", "overlay2") - - switch fsMagic { - case graphdriver.FsMagicAufs, graphdriver.FsMagicEcryptfs, graphdriver.FsMagicNfsFs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs: - logger.Errorf("'overlay2' is not supported over %s", backingFs) - return nil, graphdriver.ErrIncompatibleFS - case graphdriver.FsMagicBtrfs: - // Support for OverlayFS on BTRFS was added in kernel 4.7 - // See https://btrfs.wiki.kernel.org/index.php/Changelog - if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 7, Minor: 0}) < 0 { - if !opts.overrideKernelCheck { - logger.Errorf("'overlay2' requires kernel 4.7 to use on %s", backingFs) - return nil, graphdriver.ErrIncompatibleFS - } - logger.Warn("Using pre-4.7.0 kernel for overlay2 on btrfs, may require kernel update") - } - } - - if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { - if opts.overrideKernelCheck { - logger.Warn("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update") - } else { - if err := supportsMultipleLowerDir(testdir); err != nil { - logger.Debugf("Multiple lower dirs not supported: %v", err) - return nil, graphdriver.ErrNotSupported - } - } - } - supportsDType, err := fsutils.SupportsDType(testdir) - if err != nil { - return nil, err - } - if !supportsDType { - if !graphdriver.IsInitialized(home) { - return nil, overlayutils.ErrDTypeNotSupported("overlay2", backingFs) - } - // allow running without d_type only for existing setups (#27443) - logger.Warn(overlayutils.ErrDTypeNotSupported("overlay2", backingFs)) - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - // Create the driver home dir - if err := idtools.MkdirAllAndChown(path.Join(home, linkDir), 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { - return nil, err - } - - d := &Driver{ - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), - supportsDType: supportsDType, - locker: locker.New(), - options: *opts, - } - - d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) - - if backingFs == "xfs" { - // Try to enable project quota support over xfs. - if d.quotaCtl, err = quota.NewControl(home); err == nil { - projectQuotaSupported = true - } else if opts.quota.Size > 0 { - return nil, fmt.Errorf("Storage option overlay2.size not supported. Filesystem does not support Project Quota: %v", err) - } - } else if opts.quota.Size > 0 { - // if xfs is not the backing fs then error out if the storage-opt overlay2.size is used. - return nil, fmt.Errorf("Storage Option overlay2.size only supported for backingFS XFS. Found %v", backingFs) - } - - logger.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) - - return d, nil -} - -func parseOptions(options []string) (*overlayOptions, error) { - o := &overlayOptions{} - for _, option := range options { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return nil, err - } - key = strings.ToLower(key) - switch key { - case "overlay2.override_kernel_check": - o.overrideKernelCheck, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - case "overlay2.size": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - o.quota.Size = uint64(size) - default: - return nil, fmt.Errorf("overlay2: unknown option %s", key) - } - } - return o, nil -} - -func supportsOverlay() error { - // We can try to modprobe overlay first before looking at - // proc/filesystems for when overlay is supported - exec.Command("modprobe", "overlay").Run() - - f, err := os.Open("/proc/filesystems") - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if s.Text() == "nodev\toverlay" { - return nil - } - } - logrus.WithField("storage-driver", "overlay2").Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") - return graphdriver.ErrNotSupported -} - -func useNaiveDiff(home string) bool { - useNaiveDiffLock.Do(func() { - if err := doesSupportNativeDiff(home); err != nil { - logrus.WithField("storage-driver", "overlay2").Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err) - useNaiveDiffOnly = true - } - }) - return useNaiveDiffOnly -} - -func (d *Driver) String() string { - return driverName -} - -// Status returns current driver information in a two dimensional string array. -// Output contains "Backing Filesystem" used in this implementation. -func (d *Driver) Status() [][2]string { - return [][2]string{ - {"Backing Filesystem", backingFs}, - {"Supports d_type", strconv.FormatBool(d.supportsDType)}, - {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, - } -} - -// GetMetadata returns metadata about the overlay driver such as the LowerDir, -// UpperDir, WorkDir, and MergeDir used to store data. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { - return nil, err - } - - metadata := map[string]string{ - "WorkDir": path.Join(dir, "work"), - "MergedDir": path.Join(dir, "merged"), - "UpperDir": path.Join(dir, "diff"), - } - - lowerDirs, err := d.getLowerDirs(id) - if err != nil { - return nil, err - } - if len(lowerDirs) > 0 { - metadata["LowerDir"] = strings.Join(lowerDirs, ":") - } - - return metadata, nil -} - -// Cleanup any state created by overlay which should be cleaned when daemon -// is being shutdown. For now, we just have to unmount the bind mounted -// we had created. -func (d *Driver) Cleanup() error { - return mount.RecursiveUnmount(d.home) -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { - return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") - } - - if opts == nil { - opts = &graphdriver.CreateOpts{ - StorageOpt: map[string]string{}, - } - } - - if _, ok := opts.StorageOpt["size"]; !ok { - if opts.StorageOpt == nil { - opts.StorageOpt = map[string]string{} - } - opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) - } - - return d.create(id, parent, opts) -} - -// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. -// The parent filesystem is used to configure these directories for the overlay. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { - if opts != nil && len(opts.StorageOpt) != 0 { - if _, ok := opts.StorageOpt["size"]; ok { - return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") - } - } - return d.create(id, parent, opts) -} - -func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { - dir := d.dir(id) - - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return err - } - root := idtools.IDPair{UID: rootUID, GID: rootGID} - - if err := idtools.MkdirAllAndChown(path.Dir(dir), 0700, root); err != nil { - return err - } - if err := idtools.MkdirAndChown(dir, 0700, root); err != nil { - return err - } - - defer func() { - // Clean up on failure - if retErr != nil { - os.RemoveAll(dir) - } - }() - - if opts != nil && len(opts.StorageOpt) > 0 { - driver := &Driver{} - if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { - return err - } - - if driver.options.quota.Size > 0 { - // Set container disk quota limit - if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { - return err - } - } - } - - if err := idtools.MkdirAndChown(path.Join(dir, "diff"), 0755, root); err != nil { - return err - } - - lid := generateID(idLength) - if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { - return err - } - - // Write link id to link file - if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { - return err - } - - // if no parent directory, done - if parent == "" { - return nil - } - - if err := idtools.MkdirAndChown(path.Join(dir, "work"), 0700, root); err != nil { - return err - } - - lower, err := d.getLower(parent) - if err != nil { - return err - } - if lower != "" { - if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { - return err - } - } - - return nil -} - -// Parse overlay storage options -func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { - // Read size to set the disk project quota per container - for key, val := range storageOpt { - key := strings.ToLower(key) - switch key { - case "size": - size, err := units.RAMInBytes(val) - if err != nil { - return err - } - driver.options.quota.Size = uint64(size) - default: - return fmt.Errorf("Unknown option %s", key) - } - } - - return nil -} - -func (d *Driver) getLower(parent string) (string, error) { - parentDir := d.dir(parent) - - // Ensure parent exists - if _, err := os.Lstat(parentDir); err != nil { - return "", err - } - - // Read Parent link fileA - parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) - if err != nil { - return "", err - } - lowers := []string{path.Join(linkDir, string(parentLink))} - - parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) - if err == nil { - parentLowers := strings.Split(string(parentLower), ":") - lowers = append(lowers, parentLowers...) - } - if len(lowers) > maxDepth { - return "", errors.New("max depth exceeded") - } - return strings.Join(lowers, ":"), nil -} - -func (d *Driver) dir(id string) string { - return path.Join(d.home, id) -} - -func (d *Driver) getLowerDirs(id string) ([]string, error) { - var lowersArray []string - lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) - if err == nil { - for _, s := range strings.Split(string(lowers), ":") { - lp, err := os.Readlink(path.Join(d.home, s)) - if err != nil { - return nil, err - } - lowersArray = append(lowersArray, path.Clean(path.Join(d.home, linkDir, lp))) - } - } else if !os.IsNotExist(err) { - return nil, err - } - return lowersArray, nil -} - -// Remove cleans the directories that are created for this id. -func (d *Driver) Remove(id string) error { - if id == "" { - return fmt.Errorf("refusing to remove the directories: id is empty") - } - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) - lid, err := ioutil.ReadFile(path.Join(dir, "link")) - if err == nil { - if len(lid) == 0 { - logrus.WithField("storage-driver", "overlay2").Errorf("refusing to remove empty link for layer %v", id) - } else if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { - logrus.WithField("storage-driver", "overlay2").Debugf("Failed to remove link: %v", err) - } - } - - if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// Get creates and mounts the required file system for the given id and returns the mount path. -func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { - return nil, err - } - - diffDir := path.Join(dir, "diff") - lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) - if err != nil { - // If no lower, just return diff directory - if os.IsNotExist(err) { - return containerfs.NewLocalContainerFS(diffDir), nil - } - return nil, err - } - - mergedDir := path.Join(dir, "merged") - if count := d.ctr.Increment(mergedDir); count > 1 { - return containerfs.NewLocalContainerFS(mergedDir), nil - } - defer func() { - if retErr != nil { - if c := d.ctr.Decrement(mergedDir); c <= 0 { - if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { - logrus.WithField("storage-driver", "overlay2").Errorf("error unmounting %v: %v", mergedDir, mntErr) - } - // Cleanup the created merged directory; see the comment in Put's rmdir - if rmErr := unix.Rmdir(mergedDir); rmErr != nil && !os.IsNotExist(rmErr) { - logrus.WithField("storage-driver", "overlay2").Debugf("Failed to remove %s: %v: %v", id, rmErr, err) - } - } - } - }() - - workDir := path.Join(dir, "work") - splitLowers := strings.Split(string(lowers), ":") - absLowers := make([]string, len(splitLowers)) - for i, s := range splitLowers { - absLowers[i] = path.Join(d.home, s) - } - opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(absLowers, ":"), path.Join(dir, "diff"), path.Join(dir, "work")) - mountData := label.FormatMountLabel(opts, mountLabel) - mount := unix.Mount - mountTarget := mergedDir - - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return nil, err - } - if err := idtools.MkdirAndChown(mergedDir, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { - return nil, err - } - - pageSize := unix.Getpagesize() - - // Go can return a larger page size than supported by the system - // as of go 1.7. This will be fixed in 1.8 and this block can be - // removed when building with 1.8. - // See https://github.com/golang/go/commit/1b9499b06989d2831e5b156161d6c07642926ee1 - // See https://github.com/docker/docker/issues/27384 - if pageSize > 4096 { - pageSize = 4096 - } - - // Use relative paths and mountFrom when the mount data has exceeded - // the page size. The mount syscall fails if the mount data cannot - // fit within a page and relative links make the mount data much - // smaller at the expense of requiring a fork exec to chroot. - if len(mountData) > pageSize { - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) - mountData = label.FormatMountLabel(opts, mountLabel) - if len(mountData) > pageSize { - return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) - } - - mount = func(source string, target string, mType string, flags uintptr, label string) error { - return mountFrom(d.home, source, target, mType, flags, label) - } - mountTarget = path.Join(id, "merged") - } - - if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { - return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) - } - - // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a - // user namespace requires this to move a directory from lower to upper. - if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { - return nil, err - } - - return containerfs.NewLocalContainerFS(mergedDir), nil -} - -// Put unmounts the mount path created for the give id. -// It also removes the 'merged' directory to force the kernel to unmount the -// overlay mount in other namespaces. -func (d *Driver) Put(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) - _, err := ioutil.ReadFile(path.Join(dir, lowerFile)) - if err != nil { - // If no lower, no mount happened and just return directly - if os.IsNotExist(err) { - return nil - } - return err - } - - mountpoint := path.Join(dir, "merged") - logger := logrus.WithField("storage-driver", "overlay2") - if count := d.ctr.Decrement(mountpoint); count > 0 { - return nil - } - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { - logger.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) - } - // Remove the mountpoint here. Removing the mountpoint (in newer kernels) - // will cause all other instances of this mount in other mount namespaces - // to be unmounted. This is necessary to avoid cases where an overlay mount - // that is present in another namespace will cause subsequent mounts - // operations to fail with ebusy. We ignore any errors here because this may - // fail on older kernels which don't have - // torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied. - if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { - logger.Debugf("Failed to remove %s overlay: %v", id, err) - } - return nil -} - -// Exists checks to see if the id is already mounted. -func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) - return err == nil -} - -// isParent determines whether the given parent is the direct parent of the -// given layer id -func (d *Driver) isParent(id, parent string) bool { - lowers, err := d.getLowerDirs(id) - if err != nil { - return false - } - if parent == "" && len(lowers) > 0 { - return false - } - - parentDir := d.dir(parent) - var ld string - if len(lowers) > 0 { - ld = filepath.Dir(lowers[0]) - } - if ld == "" && parent == "" { - return true - } - return ld == parentDir -} - -// ApplyDiff applies the new layer into a root -func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) { - if !d.isParent(id, parent) { - return d.naiveDiff.ApplyDiff(id, parent, diff) - } - - applyDir := d.getDiffPath(id) - - logrus.WithField("storage-driver", "overlay2").Debugf("Applying tar in %s", applyDir) - // Overlay doesn't need the parent id to apply the diff - if err := untar(diff, applyDir, &archive.TarOptions{ - UIDMaps: d.uidMaps, - GIDMaps: d.gidMaps, - WhiteoutFormat: archive.OverlayWhiteoutFormat, - InUserNS: rsystem.RunningInUserNS(), - }); err != nil { - return 0, err - } - - return directory.Size(context.TODO(), applyDir) -} - -func (d *Driver) getDiffPath(id string) string { - dir := d.dir(id) - - return path.Join(dir, "diff") -} - -// DiffSize calculates the changes between the specified id -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (d *Driver) DiffSize(id, parent string) (size int64, err error) { - if useNaiveDiff(d.home) || !d.isParent(id, parent) { - return d.naiveDiff.DiffSize(id, parent) - } - return directory.Size(context.TODO(), d.getDiffPath(id)) -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) { - if useNaiveDiff(d.home) || !d.isParent(id, parent) { - return d.naiveDiff.Diff(id, parent) - } - - diffPath := d.getDiffPath(id) - logrus.WithField("storage-driver", "overlay2").Debugf("Tar with options on %s", diffPath) - return archive.TarWithOptions(diffPath, &archive.TarOptions{ - Compression: archive.Uncompressed, - UIDMaps: d.uidMaps, - GIDMaps: d.gidMaps, - WhiteoutFormat: archive.OverlayWhiteoutFormat, - }) -} - -// Changes produces a list of changes between the specified layer and its -// parent layer. If parent is "", then all changes will be ADD changes. -func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { - if useNaiveDiff(d.home) || !d.isParent(id, parent) { - return d.naiveDiff.Changes(id, parent) - } - // Overlay doesn't have snapshots, so we need to get changes from all parent - // layers. - diffPath := d.getDiffPath(id) - layers, err := d.getLowerDirs(id) - if err != nil { - return nil, err - } - - return archive.OverlayChanges(layers, diffPath) -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go deleted file mode 100644 index 68b75a366..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/overlay_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go deleted file mode 100644 index 842c06127..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay2/randomid.go +++ /dev/null @@ -1,81 +0,0 @@ -// +build linux - -package overlay2 // import "github.com/docker/docker/daemon/graphdriver/overlay2" - -import ( - "crypto/rand" - "encoding/base32" - "fmt" - "io" - "os" - "syscall" - "time" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// generateID creates a new random string identifier with the given length -func generateID(l int) string { - const ( - // ensures we backoff for less than 450ms total. Use the following to - // select new value, in units of 10ms: - // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 - maxretries = 9 - backoff = time.Millisecond * 10 - ) - - var ( - totalBackoff time.Duration - count int - retries int - size = (l*5 + 7) / 8 - u = make([]byte, size) - ) - // TODO: Include time component, counter component, random component - - for { - // This should never block but the read may fail. Because of this, - // we just try to read the random number generator until we get - // something. This is a very rare condition but may happen. - b := time.Duration(retries) * backoff - time.Sleep(b) - totalBackoff += b - - n, err := io.ReadFull(rand.Reader, u[count:]) - if err != nil { - if retryOnError(err) && retries < maxretries { - count += n - retries++ - logrus.Errorf("error generating version 4 uuid, retrying: %v", err) - continue - } - - // Any other errors represent a system problem. What did someone - // do to /dev/urandom? - panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) - } - - break - } - - s := base32.StdEncoding.EncodeToString(u) - - return s[:l] -} - -// retryOnError tries to detect whether or not retrying would be fruitful. -func retryOnError(err error) bool { - switch err := err.(type) { - case *os.PathError: - return retryOnError(err.Err) // unpack the target error - case syscall.Errno: - if err == unix.EPERM { - // EPERM represents an entropy pool exhaustion, a condition under - // which we backoff and retry. - return true - } - } - - return false -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go deleted file mode 100644 index 71f6d2d46..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlayutils/overlayutils.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux - -package overlayutils // import "github.com/docker/docker/daemon/graphdriver/overlayutils" - -import ( - "fmt" - - "github.com/docker/docker/daemon/graphdriver" -) - -// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. -func ErrDTypeNotSupported(driver, backingFs string) error { - msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs) - if backingFs == "xfs" { - msg += " Reformat the filesystem with ftype=1 to enable d_type support." - } - - if backingFs == "extfs" { - msg += " Reformat the filesystem (or use tune2fs) with -O filetype flag to enable d_type support." - } - - msg += " Backing filesystems without d_type support are not supported." - - return graphdriver.NotSupportedError(msg) -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go deleted file mode 100644 index b0983c566..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go +++ /dev/null @@ -1,55 +0,0 @@ -package graphdriver // import "github.com/docker/docker/daemon/graphdriver" - -import ( - "fmt" - "path/filepath" - - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/plugins" - "github.com/docker/docker/plugin/v2" - "github.com/pkg/errors" -) - -func lookupPlugin(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { - if !config.ExperimentalEnabled { - return nil, fmt.Errorf("graphdriver plugins are only supported with experimental mode") - } - pl, err := pg.Get(name, "GraphDriver", plugingetter.Acquire) - if err != nil { - return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) - } - return newPluginDriver(name, pl, config) -} - -func newPluginDriver(name string, pl plugingetter.CompatPlugin, config Options) (Driver, error) { - home := config.Root - if !pl.IsV1() { - if p, ok := pl.(*v2.Plugin); ok { - if p.PluginObj.Config.PropagatedMount != "" { - home = p.PluginObj.Config.PropagatedMount - } - } - } - - var proxy *graphDriverProxy - - switch pt := pl.(type) { - case plugingetter.PluginWithV1Client: - proxy = &graphDriverProxy{name, pl, Capabilities{}, pt.Client()} - case plugingetter.PluginAddr: - if pt.Protocol() != plugins.ProtocolSchemeHTTPV1 { - return nil, errors.Errorf("plugin protocol not supported: %s", pt.Protocol()) - } - addr := pt.Addr() - client, err := plugins.NewClientWithTimeout(addr.Network()+"://"+addr.String(), nil, pt.Timeout()) - if err != nil { - return nil, errors.Wrap(err, "error creating plugin client") - } - proxy = &graphDriverProxy{name, pl, Capabilities{}, client} - default: - return nil, errdefs.System(errors.Errorf("got unknown plugin type %T", pt)) - } - - return proxy, proxy.Init(filepath.Join(home, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go deleted file mode 100644 index cb350d807..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go +++ /dev/null @@ -1,264 +0,0 @@ -package graphdriver // import "github.com/docker/docker/daemon/graphdriver" - -import ( - "errors" - "fmt" - "io" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/plugins" -) - -type graphDriverProxy struct { - name string - p plugingetter.CompatPlugin - caps Capabilities - client *plugins.Client -} - -type graphDriverRequest struct { - ID string `json:",omitempty"` - Parent string `json:",omitempty"` - MountLabel string `json:",omitempty"` - StorageOpt map[string]string `json:",omitempty"` -} - -type graphDriverResponse struct { - Err string `json:",omitempty"` - Dir string `json:",omitempty"` - Exists bool `json:",omitempty"` - Status [][2]string `json:",omitempty"` - Changes []archive.Change `json:",omitempty"` - Size int64 `json:",omitempty"` - Metadata map[string]string `json:",omitempty"` - Capabilities Capabilities `json:",omitempty"` -} - -type graphDriverInitRequest struct { - Home string - Opts []string `json:"Opts"` - UIDMaps []idtools.IDMap `json:"UIDMaps"` - GIDMaps []idtools.IDMap `json:"GIDMaps"` -} - -func (d *graphDriverProxy) Init(home string, opts []string, uidMaps, gidMaps []idtools.IDMap) error { - if !d.p.IsV1() { - if cp, ok := d.p.(plugingetter.CountedPlugin); ok { - // always acquire here, it will be cleaned up on daemon shutdown - cp.Acquire() - } - } - args := &graphDriverInitRequest{ - Home: home, - Opts: opts, - UIDMaps: uidMaps, - GIDMaps: gidMaps, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - caps, err := d.fetchCaps() - if err != nil { - return err - } - d.caps = caps - return nil -} - -func (d *graphDriverProxy) fetchCaps() (Capabilities, error) { - args := &graphDriverRequest{} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Capabilities", args, &ret); err != nil { - if !plugins.IsNotFound(err) { - return Capabilities{}, err - } - } - return ret.Capabilities, nil -} - -func (d *graphDriverProxy) String() string { - return d.name -} - -func (d *graphDriverProxy) Capabilities() Capabilities { - return d.caps -} - -func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error { - return d.create("GraphDriver.CreateReadWrite", id, parent, opts) -} - -func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error { - return d.create("GraphDriver.Create", id, parent, opts) -} - -func (d *graphDriverProxy) create(method, id, parent string, opts *CreateOpts) error { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - if opts != nil { - args.MountLabel = opts.MountLabel - args.StorageOpt = opts.StorageOpt - } - var ret graphDriverResponse - if err := d.client.Call(method, args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Remove(id string) error { - args := &graphDriverRequest{ID: id} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Get(id, mountLabel string) (containerfs.ContainerFS, error) { - args := &graphDriverRequest{ - ID: id, - MountLabel: mountLabel, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil { - return nil, err - } - var err error - if ret.Err != "" { - err = errors.New(ret.Err) - } - return containerfs.NewLocalContainerFS(d.p.ScopedPath(ret.Dir)), err -} - -func (d *graphDriverProxy) Put(id string) error { - args := &graphDriverRequest{ID: id} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Exists(id string) bool { - args := &graphDriverRequest{ID: id} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil { - return false - } - return ret.Exists -} - -func (d *graphDriverProxy) Status() [][2]string { - args := &graphDriverRequest{} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil { - return nil - } - return ret.Status -} - -func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { - args := &graphDriverRequest{ - ID: id, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil { - return nil, err - } - if ret.Err != "" { - return nil, errors.New(ret.Err) - } - return ret.Metadata, nil -} - -func (d *graphDriverProxy) Cleanup() error { - if !d.p.IsV1() { - if cp, ok := d.p.(plugingetter.CountedPlugin); ok { - // always release - defer cp.Release() - } - } - - args := &graphDriverRequest{} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil { - return nil - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - body, err := d.client.Stream("GraphDriver.Diff", args) - if err != nil { - return nil, err - } - return body, nil -} - -func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil { - return nil, err - } - if ret.Err != "" { - return nil, errors.New(ret.Err) - } - - return ret.Changes, nil -} - -func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { - var ret graphDriverResponse - if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { - return -1, err - } - if ret.Err != "" { - return -1, errors.New(ret.Err) - } - return ret.Size, nil -} - -func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil { - return -1, err - } - if ret.Err != "" { - return -1, errors.New(ret.Err) - } - return ret.Size, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/quota/errors.go b/vendor/github.com/docker/docker/daemon/graphdriver/quota/errors.go deleted file mode 100644 index 68e797470..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/quota/errors.go +++ /dev/null @@ -1,19 +0,0 @@ -package quota // import "github.com/docker/docker/daemon/graphdriver/quota" - -import "github.com/docker/docker/errdefs" - -var ( - _ errdefs.ErrNotImplemented = (*errQuotaNotSupported)(nil) -) - -// ErrQuotaNotSupported indicates if were found the FS didn't have projects quotas available -var ErrQuotaNotSupported = errQuotaNotSupported{} - -type errQuotaNotSupported struct { -} - -func (e errQuotaNotSupported) NotImplemented() {} - -func (e errQuotaNotSupported) Error() string { - return "Filesystem does not support, or has not enabled quotas" -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go b/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go deleted file mode 100644 index 93e85823a..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/quota/projectquota.go +++ /dev/null @@ -1,384 +0,0 @@ -// +build linux - -// -// projectquota.go - implements XFS project quota controls -// for setting quota limits on a newly created directory. -// It currently supports the legacy XFS specific ioctls. -// -// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR -// for both xfs/ext4 for kernel version >= v4.5 -// - -package quota // import "github.com/docker/docker/daemon/graphdriver/quota" - -/* -#include -#include -#include -#include -#include - -#ifndef FS_XFLAG_PROJINHERIT -struct fsxattr { - __u32 fsx_xflags; - __u32 fsx_extsize; - __u32 fsx_nextents; - __u32 fsx_projid; - unsigned char fsx_pad[12]; -}; -#define FS_XFLAG_PROJINHERIT 0x00000200 -#endif -#ifndef FS_IOC_FSGETXATTR -#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) -#endif -#ifndef FS_IOC_FSSETXATTR -#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) -#endif - -#ifndef PRJQUOTA -#define PRJQUOTA 2 -#endif -#ifndef XFS_PROJ_QUOTA -#define XFS_PROJ_QUOTA 2 -#endif -#ifndef Q_XSETPQLIM -#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) -#endif -#ifndef Q_XGETPQUOTA -#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) -#endif - -const int Q_XGETQSTAT_PRJQUOTA = QCMD(Q_XGETQSTAT, PRJQUOTA); -*/ -import "C" -import ( - "fmt" - "io/ioutil" - "path" - "path/filepath" - "unsafe" - - rsystem "github.com/opencontainers/runc/libcontainer/system" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// Quota limit params - currently we only control blocks hard limit -type Quota struct { - Size uint64 -} - -// Control - Context to be used by storage driver (e.g. overlay) -// who wants to apply project quotas to container dirs -type Control struct { - backingFsBlockDev string - nextProjectID uint32 - quotas map[string]uint32 -} - -// NewControl - initialize project quota support. -// Test to make sure that quota can be set on a test dir and find -// the first project id to be used for the next container create. -// -// Returns nil (and error) if project quota is not supported. -// -// First get the project id of the home directory. -// This test will fail if the backing fs is not xfs. -// -// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: -// echo 999:/var/lib/docker/overlay2 >> /etc/projects -// echo docker:999 >> /etc/projid -// xfs_quota -x -c 'project -s docker' / -// -// In that case, the home directory project id will be used as a "start offset" -// and all containers will be assigned larger project ids (e.g. >= 1000). -// This is a way to prevent xfs_quota management from conflicting with docker. -// -// Then try to create a test directory with the next project id and set a quota -// on it. If that works, continue to scan existing containers to map allocated -// project ids. -// -func NewControl(basePath string) (*Control, error) { - // - // If we are running in a user namespace quota won't be supported for - // now since makeBackingFsDev() will try to mknod(). - // - if rsystem.RunningInUserNS() { - return nil, ErrQuotaNotSupported - } - - // - // create backing filesystem device node - // - backingFsBlockDev, err := makeBackingFsDev(basePath) - if err != nil { - return nil, err - } - - // check if we can call quotactl with project quotas - // as a mechanism to determine (early) if we have support - hasQuotaSupport, err := hasQuotaSupport(backingFsBlockDev) - if err != nil { - return nil, err - } - if !hasQuotaSupport { - return nil, ErrQuotaNotSupported - } - - // - // Get project id of parent dir as minimal id to be used by driver - // - minProjectID, err := getProjectID(basePath) - if err != nil { - return nil, err - } - minProjectID++ - - // - // Test if filesystem supports project quotas by trying to set - // a quota on the first available project id - // - quota := Quota{ - Size: 0, - } - if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil { - return nil, err - } - - q := Control{ - backingFsBlockDev: backingFsBlockDev, - nextProjectID: minProjectID + 1, - quotas: make(map[string]uint32), - } - - // - // get first project id to be used for next container - // - err = q.findNextProjectID(basePath) - if err != nil { - return nil, err - } - - logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) - return &q, nil -} - -// SetQuota - assign a unique project id to directory and set the quota limits -// for that project id -func (q *Control) SetQuota(targetPath string, quota Quota) error { - - projectID, ok := q.quotas[targetPath] - if !ok { - projectID = q.nextProjectID - - // - // assign project id to new container directory - // - err := setProjectID(targetPath, projectID) - if err != nil { - return err - } - - q.quotas[targetPath] = projectID - q.nextProjectID++ - } - - // - // set the quota limit for the container's project id - // - logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID) - return setProjectQuota(q.backingFsBlockDev, projectID, quota) -} - -// setProjectQuota - set the quota for project id on xfs block device -func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error { - var d C.fs_disk_quota_t - d.d_version = C.FS_DQUOT_VERSION - d.d_id = C.__u32(projectID) - d.d_flags = C.XFS_PROJ_QUOTA - - d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT - d.d_blk_hardlimit = C.__u64(quota.Size / 512) - d.d_blk_softlimit = d.d_blk_hardlimit - - var cs = C.CString(backingFsBlockDev) - defer C.free(unsafe.Pointer(cs)) - - _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, - uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), - uintptr(unsafe.Pointer(&d)), 0, 0) - if errno != 0 { - return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v", - projectID, backingFsBlockDev, errno.Error()) - } - - return nil -} - -// GetQuota - get the quota limits of a directory that was configured with SetQuota -func (q *Control) GetQuota(targetPath string, quota *Quota) error { - - projectID, ok := q.quotas[targetPath] - if !ok { - return fmt.Errorf("quota not found for path : %s", targetPath) - } - - // - // get the quota limit for the container's project id - // - var d C.fs_disk_quota_t - - var cs = C.CString(q.backingFsBlockDev) - defer C.free(unsafe.Pointer(cs)) - - _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, - uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), - uintptr(unsafe.Pointer(&d)), 0, 0) - if errno != 0 { - return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v", - projectID, q.backingFsBlockDev, errno.Error()) - } - quota.Size = uint64(d.d_blk_hardlimit) * 512 - - return nil -} - -// getProjectID - get the project id of path on xfs -func getProjectID(targetPath string) (uint32, error) { - dir, err := openDir(targetPath) - if err != nil { - return 0, err - } - defer closeDir(dir) - - var fsx C.struct_fsxattr - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) - } - - return uint32(fsx.fsx_projid), nil -} - -// setProjectID - set the project id of path on xfs -func setProjectID(targetPath string, projectID uint32) error { - dir, err := openDir(targetPath) - if err != nil { - return err - } - defer closeDir(dir) - - var fsx C.struct_fsxattr - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) - } - fsx.fsx_projid = C.__u32(projectID) - fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT - _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) - } - - return nil -} - -// findNextProjectID - find the next project id to be used for containers -// by scanning driver home directory to find used project ids -func (q *Control) findNextProjectID(home string) error { - files, err := ioutil.ReadDir(home) - if err != nil { - return fmt.Errorf("read directory failed : %s", home) - } - for _, file := range files { - if !file.IsDir() { - continue - } - path := filepath.Join(home, file.Name()) - projid, err := getProjectID(path) - if err != nil { - return err - } - if projid > 0 { - q.quotas[path] = projid - } - if q.nextProjectID <= projid { - q.nextProjectID = projid + 1 - } - } - - return nil -} - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func openDir(path string) (*C.DIR, error) { - Cpath := C.CString(path) - defer free(Cpath) - - dir := C.opendir(Cpath) - if dir == nil { - return nil, fmt.Errorf("Can't open dir") - } - return dir, nil -} - -func closeDir(dir *C.DIR) { - if dir != nil { - C.closedir(dir) - } -} - -func getDirFd(dir *C.DIR) uintptr { - return uintptr(C.dirfd(dir)) -} - -// Get the backing block device of the driver home directory -// and create a block device node under the home directory -// to be used by quotactl commands -func makeBackingFsDev(home string) (string, error) { - var stat unix.Stat_t - if err := unix.Stat(home, &stat); err != nil { - return "", err - } - - backingFsBlockDev := path.Join(home, "backingFsBlockDev") - // Re-create just in case someone copied the home directory over to a new device - unix.Unlink(backingFsBlockDev) - err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)) - switch err { - case nil: - return backingFsBlockDev, nil - - case unix.ENOSYS: - return "", ErrQuotaNotSupported - - default: - return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) - } -} - -func hasQuotaSupport(backingFsBlockDev string) (bool, error) { - var cs = C.CString(backingFsBlockDev) - defer free(cs) - var qstat C.fs_quota_stat_t - - _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, uintptr(C.Q_XGETQSTAT_PRJQUOTA), uintptr(unsafe.Pointer(cs)), 0, uintptr(unsafe.Pointer(&qstat)), 0, 0) - if errno == 0 && qstat.qs_flags&C.FS_QUOTA_PDQ_ENFD > 0 && qstat.qs_flags&C.FS_QUOTA_PDQ_ACCT > 0 { - return true, nil - } - - switch errno { - // These are the known fatal errors, consider all other errors (ENOTTY, etc.. not supporting quota) - case unix.EFAULT, unix.ENOENT, unix.ENOTBLK, unix.EPERM: - default: - return false, nil - } - - return false, errno -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go deleted file mode 100644 index ec18d1d37..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_aufs.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_aufs,linux - -package register // import "github.com/docker/docker/daemon/graphdriver/register" - -import ( - // register the aufs graphdriver - _ "github.com/docker/docker/daemon/graphdriver/aufs" -) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go deleted file mode 100644 index 2f8c67056..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_btrfs.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_btrfs,linux - -package register // import "github.com/docker/docker/daemon/graphdriver/register" - -import ( - // register the btrfs graphdriver - _ "github.com/docker/docker/daemon/graphdriver/btrfs" -) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go deleted file mode 100644 index ccbb8bfab..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_devicemapper.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_devicemapper,!static_build,linux - -package register // import "github.com/docker/docker/daemon/graphdriver/register" - -import ( - // register the devmapper graphdriver - _ "github.com/docker/docker/daemon/graphdriver/devmapper" -) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go deleted file mode 100644 index a2e384d54..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_overlay,linux - -package register // import "github.com/docker/docker/daemon/graphdriver/register" - -import ( - // register the overlay graphdriver - _ "github.com/docker/docker/daemon/graphdriver/overlay" -) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay2.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay2.go deleted file mode 100644 index bcd2cee20..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_overlay2.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_overlay2,linux - -package register // import "github.com/docker/docker/daemon/graphdriver/register" - -import ( - // register the overlay2 graphdriver - _ "github.com/docker/docker/daemon/graphdriver/overlay2" -) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go deleted file mode 100644 index 26f33a21b..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_vfs.go +++ /dev/null @@ -1,6 +0,0 @@ -package register // import "github.com/docker/docker/daemon/graphdriver/register" - -import ( - // register vfs - _ "github.com/docker/docker/daemon/graphdriver/vfs" -) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go deleted file mode 100644 index cd612cbea..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package register // import "github.com/docker/docker/daemon/graphdriver/register" - -import ( - // register the windows graph drivers - _ "github.com/docker/docker/daemon/graphdriver/lcow" - _ "github.com/docker/docker/daemon/graphdriver/windows" -) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go deleted file mode 100644 index b137ad25b..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/register/register_zfs.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd - -package register // import "github.com/docker/docker/daemon/graphdriver/register" - -import ( - // register the zfs driver - _ "github.com/docker/docker/daemon/graphdriver/zfs" -) diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_linux.go deleted file mode 100644 index 7276b3837..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_linux.go +++ /dev/null @@ -1,7 +0,0 @@ -package vfs // import "github.com/docker/docker/daemon/graphdriver/vfs" - -import "github.com/docker/docker/daemon/graphdriver/copy" - -func dirCopy(srcDir, dstDir string) error { - return copy.DirCopy(srcDir, dstDir, copy.Content, false) -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_unsupported.go deleted file mode 100644 index 894ff02f0..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/copy_unsupported.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !linux - -package vfs // import "github.com/docker/docker/daemon/graphdriver/vfs" - -import "github.com/docker/docker/pkg/chrootarchive" - -func dirCopy(srcDir, dstDir string) error { - return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir) -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go deleted file mode 100644 index e51cb6c25..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/driver.go +++ /dev/null @@ -1,167 +0,0 @@ -package vfs // import "github.com/docker/docker/daemon/graphdriver/vfs" - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/daemon/graphdriver/quota" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/system" - "github.com/docker/go-units" - "github.com/opencontainers/selinux/go-selinux/label" -) - -var ( - // CopyDir defines the copy method to use. - CopyDir = dirCopy -) - -func init() { - graphdriver.Register("vfs", Init) -} - -// Init returns a new VFS driver. -// This sets the home directory for the driver and returns NaiveDiffDriver. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - d := &Driver{ - home: home, - idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), - } - rootIDs := d.idMappings.RootPair() - if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil { - return nil, err - } - - setupDriverQuota(d) - - return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil -} - -// Driver holds information about the driver, home directory of the driver. -// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. -// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. -// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver -type Driver struct { - driverQuota - home string - idMappings *idtools.IDMappings -} - -func (d *Driver) String() string { - return "vfs" -} - -// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. -func (d *Driver) Status() [][2]string { - return nil -} - -// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - return nil, nil -} - -// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. -func (d *Driver) Cleanup() error { - return nil -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - var err error - var size int64 - - if opts != nil { - for key, val := range opts.StorageOpt { - switch key { - case "size": - if !d.quotaSupported() { - return quota.ErrQuotaNotSupported - } - if size, err = units.RAMInBytes(val); err != nil { - return err - } - default: - return fmt.Errorf("Storage opt %s not supported", key) - } - } - } - - return d.create(id, parent, uint64(size)) -} - -// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - if opts != nil && len(opts.StorageOpt) != 0 { - return fmt.Errorf("--storage-opt is not supported for vfs on read-only layers") - } - - return d.create(id, parent, 0) -} - -func (d *Driver) create(id, parent string, size uint64) error { - dir := d.dir(id) - rootIDs := d.idMappings.RootPair() - if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil { - return err - } - if err := idtools.MkdirAndChown(dir, 0755, rootIDs); err != nil { - return err - } - - if size != 0 { - if err := d.setupQuota(dir, size); err != nil { - return err - } - } - - labelOpts := []string{"level:s0"} - if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { - label.SetFileLabel(dir, mountLabel) - } - if parent == "" { - return nil - } - parentDir, err := d.Get(parent, "") - if err != nil { - return fmt.Errorf("%s: %s", parent, err) - } - return CopyDir(parentDir.Path(), dir) -} - -func (d *Driver) dir(id string) string { - return filepath.Join(d.home, "dir", filepath.Base(id)) -} - -// Remove deletes the content from the directory for a given id. -func (d *Driver) Remove(id string) error { - return system.EnsureRemoveAll(d.dir(id)) -} - -// Get returns the directory for the given id. -func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { - dir := d.dir(id) - if st, err := os.Stat(dir); err != nil { - return nil, err - } else if !st.IsDir() { - return nil, fmt.Errorf("%s: not a directory", dir) - } - return containerfs.NewLocalContainerFS(dir), nil -} - -// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. -func (d *Driver) Put(id string) error { - // The vfs driver has no runtime resources (e.g. mounts) - // to clean up, so we don't need anything here - return nil -} - -// Exists checks to see if the directory exists for the given id. -func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) - return err == nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_linux.go deleted file mode 100644 index 0d5c3a7b9..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -package vfs // import "github.com/docker/docker/daemon/graphdriver/vfs" - -import ( - "github.com/docker/docker/daemon/graphdriver/quota" - "github.com/sirupsen/logrus" -) - -type driverQuota struct { - quotaCtl *quota.Control -} - -func setupDriverQuota(driver *Driver) { - if quotaCtl, err := quota.NewControl(driver.home); err == nil { - driver.quotaCtl = quotaCtl - } else if err != quota.ErrQuotaNotSupported { - logrus.Warnf("Unable to setup quota: %v\n", err) - } -} - -func (d *Driver) setupQuota(dir string, size uint64) error { - return d.quotaCtl.SetQuota(dir, quota.Quota{Size: size}) -} - -func (d *Driver) quotaSupported() bool { - return d.quotaCtl != nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_unsupported.go deleted file mode 100644 index 3ae60ac07..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/vfs/quota_unsupported.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !linux - -package vfs // import "github.com/docker/docker/daemon/graphdriver/vfs" - -import "github.com/docker/docker/daemon/graphdriver/quota" - -type driverQuota struct { -} - -func setupDriverQuota(driver *Driver) error { - return nil -} - -func (d *Driver) setupQuota(dir string, size uint64) error { - return quota.ErrQuotaNotSupported -} - -func (d *Driver) quotaSupported() bool { - return false -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go deleted file mode 100644 index 16a522920..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/windows/windows.go +++ /dev/null @@ -1,942 +0,0 @@ -//+build windows - -package windows // import "github.com/docker/docker/daemon/graphdriver/windows" - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - "unsafe" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/go-winio/archive/tar" - "github.com/Microsoft/go-winio/backuptar" - "github.com/Microsoft/hcsshim" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/longpath" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/docker/pkg/system" - units "github.com/docker/go-units" - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -// filterDriver is an HCSShim driver type for the Windows Filter driver. -const filterDriver = 1 - -var ( - // mutatedFiles is a list of files that are mutated by the import process - // and must be backed up and restored. - mutatedFiles = map[string]string{ - "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", - "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", - "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", - "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", - } - noreexec = false -) - -// init registers the windows graph drivers to the register. -func init() { - graphdriver.Register("windowsfilter", InitFilter) - // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes - // debugging issues in the re-exec codepath significantly easier. - if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { - logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") - noreexec = true - } else { - reexec.Register("docker-windows-write-layer", writeLayerReexec) - } -} - -type checker struct { -} - -func (c *checker) IsMounted(path string) bool { - return false -} - -// Driver represents a windows graph driver. -type Driver struct { - // info stores the shim driver information - info hcsshim.DriverInfo - ctr *graphdriver.RefCounter - // it is safe for windows to use a cache here because it does not support - // restoring containers when the daemon dies. - cacheMu sync.Mutex - cache map[string]string -} - -// InitFilter returns a new Windows storage filter driver. -func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) - - fsType, err := getFileSystemType(string(home[0])) - if err != nil { - return nil, err - } - if strings.ToLower(fsType) == "refs" { - return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) - } - - if err := idtools.MkdirAllAndChown(home, 0700, idtools.IDPair{UID: 0, GID: 0}); err != nil { - return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err) - } - - d := &Driver{ - info: hcsshim.DriverInfo{ - HomeDir: home, - Flavour: filterDriver, - }, - cache: make(map[string]string), - ctr: graphdriver.NewRefCounter(&checker{}), - } - return d, nil -} - -// win32FromHresult is a helper function to get the win32 error code from an HRESULT -func win32FromHresult(hr uintptr) uintptr { - if hr&0x1fff0000 == 0x00070000 { - return hr & 0xffff - } - return hr -} - -// getFileSystemType obtains the type of a file system through GetVolumeInformation -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx -func getFileSystemType(drive string) (fsType string, hr error) { - var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW") - buf = make([]uint16, 255) - size = windows.MAX_PATH + 1 - ) - if len(drive) != 1 { - hr = errors.New("getFileSystemType must be called with a drive letter") - return - } - drive += `:\` - n := uintptr(unsafe.Pointer(nil)) - r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - fsType = windows.UTF16ToString(buf) - return -} - -// String returns the string representation of a driver. This should match -// the name the graph driver has been registered with. -func (d *Driver) String() string { - return "windowsfilter" -} - -// Status returns the status of the driver. -func (d *Driver) Status() [][2]string { - return [][2]string{ - {"Windows", ""}, - } -} - -// Exists returns true if the given id is registered with this driver. -func (d *Driver) Exists(id string) bool { - rID, err := d.resolveID(id) - if err != nil { - return false - } - result, err := hcsshim.LayerExists(d.info, rID) - if err != nil { - return false - } - return result -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - if opts != nil { - return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) - } - return d.create(id, parent, "", false, nil) -} - -// Create creates a new read-only layer with the given id. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - if opts != nil { - return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) - } - return d.create(id, parent, "", true, nil) -} - -func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { - rPId, err := d.resolveID(parent) - if err != nil { - return err - } - - parentChain, err := d.getLayerChain(rPId) - if err != nil { - return err - } - - var layerChain []string - - if rPId != "" { - parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) - if err != nil { - return err - } - if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { - // This is a legitimate parent layer (not the empty "-init" layer), - // so include it in the layer chain. - layerChain = []string{parentPath} - } - } - - layerChain = append(layerChain, parentChain...) - - if readOnly { - if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { - return err - } - } else { - var parentPath string - if len(layerChain) != 0 { - parentPath = layerChain[0] - } - - if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { - return err - } - - storageOptions, err := parseStorageOpt(storageOpt) - if err != nil { - return fmt.Errorf("Failed to parse storage options - %s", err) - } - - if storageOptions.size != 0 { - if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil { - return err - } - } - } - - if _, err := os.Lstat(d.dir(parent)); err != nil { - if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { - logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) - } - return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) - } - - if err := d.setLayerChain(id, layerChain); err != nil { - if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { - logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) - } - return err - } - - return nil -} - -// dir returns the absolute path to the layer. -func (d *Driver) dir(id string) string { - return filepath.Join(d.info.HomeDir, filepath.Base(id)) -} - -// Remove unmounts and removes the dir information. -func (d *Driver) Remove(id string) error { - rID, err := d.resolveID(id) - if err != nil { - return err - } - - // This retry loop is due to a bug in Windows (Internal bug #9432268) - // if GetContainers fails with ErrVmcomputeOperationInvalidState - // it is a transient error. Retry until it succeeds. - var computeSystems []hcsshim.ContainerProperties - retryCount := 0 - osv := system.GetOSVersion() - for { - // Get and terminate any template VMs that are currently using the layer. - // Note: It is unfortunate that we end up in the graphdrivers Remove() call - // for both containers and images, but the logic for template VMs is only - // needed for images - specifically we are looking to see if a base layer - // is in use by a template VM as a result of having started a Hyper-V - // container at some point. - // - // We have a retry loop for ErrVmcomputeOperationInvalidState and - // ErrVmcomputeOperationAccessIsDenied as there is a race condition - // in RS1 and RS2 building during enumeration when a silo is going away - // for example under it, in HCS. AccessIsDenied added to fix 30278. - // - // TODO @jhowardmsft - For RS3, we can remove the retries. Also consider - // using platform APIs (if available) to get this more succinctly. Also - // consider enhancing the Remove() interface to have context of why - // the remove is being called - that could improve efficiency by not - // enumerating compute systems during a remove of a container as it's - // not required. - computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) - if err != nil { - if (osv.Build < 15139) && - ((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) { - if retryCount >= 500 { - break - } - retryCount++ - time.Sleep(10 * time.Millisecond) - continue - } - return err - } - break - } - - for _, computeSystem := range computeSystems { - if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { - container, err := hcsshim.OpenContainer(computeSystem.ID) - if err != nil { - return err - } - defer container.Close() - err = container.Terminate() - if hcsshim.IsPending(err) { - err = container.Wait() - } else if hcsshim.IsAlreadyStopped(err) { - err = nil - } - - if err != nil { - return err - } - } - } - - layerPath := filepath.Join(d.info.HomeDir, rID) - tmpID := fmt.Sprintf("%s-removing", rID) - tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) - if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { - return err - } - if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { - logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) - } - - return nil -} - -// GetLayerPath gets the layer path on host -func (d *Driver) GetLayerPath(id string) (string, error) { - return d.dir(id), nil -} - -// Get returns the rootfs path for the id. This will mount the dir at its given path. -func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) { - logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) - var dir string - - rID, err := d.resolveID(id) - if err != nil { - return nil, err - } - if count := d.ctr.Increment(rID); count > 1 { - return containerfs.NewLocalContainerFS(d.cache[rID]), nil - } - - // Getting the layer paths must be done outside of the lock. - layerChain, err := d.getLayerChain(rID) - if err != nil { - d.ctr.Decrement(rID) - return nil, err - } - - if err := hcsshim.ActivateLayer(d.info, rID); err != nil { - d.ctr.Decrement(rID) - return nil, err - } - if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { - d.ctr.Decrement(rID) - if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { - logrus.Warnf("Failed to Deactivate %s: %s", id, err) - } - return nil, err - } - - mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) - if err != nil { - d.ctr.Decrement(rID) - if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { - logrus.Warnf("Failed to Unprepare %s: %s", id, err) - } - if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { - logrus.Warnf("Failed to Deactivate %s: %s", id, err) - } - return nil, err - } - d.cacheMu.Lock() - d.cache[rID] = mountPath - d.cacheMu.Unlock() - - // If the layer has a mount path, use that. Otherwise, use the - // folder path. - if mountPath != "" { - dir = mountPath - } else { - dir = d.dir(id) - } - - return containerfs.NewLocalContainerFS(dir), nil -} - -// Put adds a new layer to the driver. -func (d *Driver) Put(id string) error { - logrus.Debugf("WindowsGraphDriver Put() id %s", id) - - rID, err := d.resolveID(id) - if err != nil { - return err - } - if count := d.ctr.Decrement(rID); count > 0 { - return nil - } - d.cacheMu.Lock() - _, exists := d.cache[rID] - delete(d.cache, rID) - d.cacheMu.Unlock() - - // If the cache was not populated, then the layer was left unprepared and deactivated - if !exists { - return nil - } - - if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { - return err - } - return hcsshim.DeactivateLayer(d.info, rID) -} - -// Cleanup ensures the information the driver stores is properly removed. -// We use this opportunity to cleanup any -removing folders which may be -// still left if the daemon was killed while it was removing a layer. -func (d *Driver) Cleanup() error { - items, err := ioutil.ReadDir(d.info.HomeDir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - // Note we don't return an error below - it's possible the files - // are locked. However, next time around after the daemon exits, - // we likely will be able to to cleanup successfully. Instead we log - // warnings if there are errors. - for _, item := range items { - if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { - if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil { - logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err) - } else { - logrus.Infof("Cleaned up %s", item.Name()) - } - } - } - - return nil -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -// The layer should be mounted when calling this function -func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) { - rID, err := d.resolveID(id) - if err != nil { - return - } - - layerChain, err := d.getLayerChain(rID) - if err != nil { - return - } - - // this is assuming that the layer is unmounted - if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { - return nil, err - } - prepare := func() { - if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { - logrus.Warnf("Failed to Deactivate %s: %s", rID, err) - } - } - - arch, err := d.exportLayer(rID, layerChain) - if err != nil { - prepare() - return - } - return ioutils.NewReadCloserWrapper(arch, func() error { - err := arch.Close() - prepare() - return err - }), nil -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -// The layer should not be mounted when calling this function. -func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { - rID, err := d.resolveID(id) - if err != nil { - return nil, err - } - parentChain, err := d.getLayerChain(rID) - if err != nil { - return nil, err - } - - if err := hcsshim.ActivateLayer(d.info, rID); err != nil { - return nil, err - } - defer func() { - if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { - logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) - } - }() - - var changes []archive.Change - err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { - r, err := hcsshim.NewLayerReader(d.info, id, parentChain) - if err != nil { - return err - } - defer r.Close() - - for { - name, _, fileInfo, err := r.Next() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - name = filepath.ToSlash(name) - if fileInfo == nil { - changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete}) - } else { - // Currently there is no way to tell between an add and a modify. - changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify}) - } - } - }) - if err != nil { - return nil, err - } - - return changes, nil -} - -// ApplyDiff extracts the changeset from the given diff into the -// layer with the specified id and parent, returning the size of the -// new layer in bytes. -// The layer should not be mounted when calling this function -func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { - var layerChain []string - if parent != "" { - rPId, err := d.resolveID(parent) - if err != nil { - return 0, err - } - parentChain, err := d.getLayerChain(rPId) - if err != nil { - return 0, err - } - parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) - if err != nil { - return 0, err - } - layerChain = append(layerChain, parentPath) - layerChain = append(layerChain, parentChain...) - } - - size, err := d.importLayer(id, diff, layerChain) - if err != nil { - return 0, err - } - - if err = d.setLayerChain(id, layerChain); err != nil { - return 0, err - } - - return size, nil -} - -// DiffSize calculates the changes between the specified layer -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (d *Driver) DiffSize(id, parent string) (size int64, err error) { - rPId, err := d.resolveID(parent) - if err != nil { - return - } - - changes, err := d.Changes(id, rPId) - if err != nil { - return - } - - layerFs, err := d.Get(id, "") - if err != nil { - return - } - defer d.Put(id) - - return archive.ChangesSize(layerFs.Path(), changes), nil -} - -// GetMetadata returns custom driver information. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - m := make(map[string]string) - m["dir"] = d.dir(id) - return m, nil -} - -func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { - t := tar.NewWriter(w) - for { - name, size, fileInfo, err := r.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - if fileInfo == nil { - // Write a whiteout file. - hdr := &tar.Header{ - Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))), - } - err := t.WriteHeader(hdr) - if err != nil { - return err - } - } else { - err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) - if err != nil { - return err - } - } - } - return t.Close() -} - -// exportLayer generates an archive from a layer based on the given ID. -func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { - archive, w := io.Pipe() - go func() { - err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { - r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths) - if err != nil { - return err - } - - err = writeTarFromLayer(r, w) - cerr := r.Close() - if err == nil { - err = cerr - } - return err - }) - w.CloseWithError(err) - }() - - return archive, nil -} - -// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and -// writes it to a backup stream, and also saves any files that will be mutated -// by the import layer process to a backup location. -func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { - var bcdBackup *os.File - var bcdBackupWriter *winio.BackupFileWriter - if backupPath, ok := mutatedFiles[hdr.Name]; ok { - bcdBackup, err = os.Create(filepath.Join(root, backupPath)) - if err != nil { - return nil, err - } - defer func() { - cerr := bcdBackup.Close() - if err == nil { - err = cerr - } - }() - - bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) - defer func() { - cerr := bcdBackupWriter.Close() - if err == nil { - err = cerr - } - }() - - buf.Reset(io.MultiWriter(w, bcdBackupWriter)) - } else { - buf.Reset(w) - } - - defer func() { - ferr := buf.Flush() - if err == nil { - err = ferr - } - }() - - return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) -} - -func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { - t := tar.NewReader(r) - hdr, err := t.Next() - totalSize := int64(0) - buf := bufio.NewWriter(nil) - for err == nil { - base := path.Base(hdr.Name) - if strings.HasPrefix(base, archive.WhiteoutPrefix) { - name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) - err = w.Remove(filepath.FromSlash(name)) - if err != nil { - return 0, err - } - hdr, err = t.Next() - } else if hdr.Typeflag == tar.TypeLink { - err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) - if err != nil { - return 0, err - } - hdr, err = t.Next() - } else { - var ( - name string - size int64 - fileInfo *winio.FileBasicInfo - ) - name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) - if err != nil { - return 0, err - } - err = w.Add(filepath.FromSlash(name), fileInfo) - if err != nil { - return 0, err - } - hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) - totalSize += size - } - } - if err != io.EOF { - return 0, err - } - return totalSize, nil -} - -// importLayer adds a new layer to the tag and graph store based on the given data. -func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { - if !noreexec { - cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) - output := bytes.NewBuffer(nil) - cmd.Stdin = layerData - cmd.Stdout = output - cmd.Stderr = output - - if err = cmd.Start(); err != nil { - return - } - - if err = cmd.Wait(); err != nil { - return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) - } - - return strconv.ParseInt(output.String(), 10, 64) - } - return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) -} - -// writeLayerReexec is the re-exec entry point for writing a layer from a tar file -func writeLayerReexec() { - size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) - if err != nil { - fmt.Fprint(os.Stderr, err) - os.Exit(1) - } - fmt.Fprint(os.Stdout, size) -} - -// writeLayer writes a layer from a tar file. -func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (size int64, retErr error) { - err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) - if err != nil { - return 0, err - } - if noreexec { - defer func() { - if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { - // This should never happen, but just in case when in debugging mode. - // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. - panic("Failed to disabled process privileges while in non re-exec mode") - } - }() - } - - info := hcsshim.DriverInfo{ - Flavour: filterDriver, - HomeDir: home, - } - - w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) - if err != nil { - return 0, err - } - - defer func() { - if err := w.Close(); err != nil { - // This error should not be discarded as a failure here - // could result in an invalid layer on disk - if retErr == nil { - retErr = err - } - } - }() - - return writeLayerFromTar(layerData, w, filepath.Join(home, id)) -} - -// resolveID computes the layerID information based on the given id. -func (d *Driver) resolveID(id string) (string, error) { - content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) - if os.IsNotExist(err) { - return id, nil - } else if err != nil { - return "", err - } - return string(content), nil -} - -// setID stores the layerId in disk. -func (d *Driver) setID(id, altID string) error { - return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) -} - -// getLayerChain returns the layer chain information. -func (d *Driver) getLayerChain(id string) ([]string, error) { - jPath := filepath.Join(d.dir(id), "layerchain.json") - content, err := ioutil.ReadFile(jPath) - if os.IsNotExist(err) { - return nil, nil - } else if err != nil { - return nil, fmt.Errorf("Unable to read layerchain file - %s", err) - } - - var layerChain []string - err = json.Unmarshal(content, &layerChain) - if err != nil { - return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) - } - - return layerChain, nil -} - -// setLayerChain stores the layer chain information in disk. -func (d *Driver) setLayerChain(id string, chain []string) error { - content, err := json.Marshal(&chain) - if err != nil { - return fmt.Errorf("Failed to marshall layerchain json - %s", err) - } - - jPath := filepath.Join(d.dir(id), "layerchain.json") - err = ioutil.WriteFile(jPath, content, 0600) - if err != nil { - return fmt.Errorf("Unable to write layerchain file - %s", err) - } - - return nil -} - -type fileGetCloserWithBackupPrivileges struct { - path string -} - -func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { - if backupPath, ok := mutatedFiles[filename]; ok { - return os.Open(filepath.Join(fg.path, backupPath)) - } - - var f *os.File - // Open the file while holding the Windows backup privilege. This ensures that the - // file can be opened even if the caller does not actually have access to it according - // to the security descriptor. Also use sequential file access to avoid depleting the - // standby list - Microsoft VSO Bug Tracker #9900466 - err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { - path := longpath.AddPrefix(filepath.Join(fg.path, filename)) - p, err := windows.UTF16FromString(path) - if err != nil { - return err - } - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0) - if err != nil { - return &os.PathError{Op: "open", Path: path, Err: err} - } - f = os.NewFile(uintptr(h), path) - return nil - }) - return f, err -} - -func (fg *fileGetCloserWithBackupPrivileges) Close() error { - return nil -} - -// DiffGetter returns a FileGetCloser that can read files from the directory that -// contains files for the layer differences. Used for direct access for tar-split. -func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { - id, err := d.resolveID(id) - if err != nil { - return nil, err - } - - return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil -} - -type storageOptions struct { - size uint64 -} - -func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { - options := storageOptions{} - - // Read size to change the block device size per container. - for key, val := range storageOpt { - key := strings.ToLower(key) - switch key { - case "size": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - options.size = uint64(size) - } - } - return &options, nil -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go deleted file mode 100644 index 1d9153e17..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs.go +++ /dev/null @@ -1,431 +0,0 @@ -// +build linux freebsd - -package zfs // import "github.com/docker/docker/daemon/graphdriver/zfs" - -import ( - "fmt" - "os" - "os/exec" - "path" - "strconv" - "strings" - "sync" - "time" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/parsers" - "github.com/mistifyio/go-zfs" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -type zfsOptions struct { - fsName string - mountPath string -} - -func init() { - graphdriver.Register("zfs", Init) -} - -// Logger returns a zfs logger implementation. -type Logger struct{} - -// Log wraps log message from ZFS driver with a prefix '[zfs]'. -func (*Logger) Log(cmd []string) { - logrus.WithField("storage-driver", "zfs").Debugf("[zfs] %s", strings.Join(cmd, " ")) -} - -// Init returns a new ZFS driver. -// It takes base mount path and an array of options which are represented as key value pairs. -// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. -func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - var err error - - logger := logrus.WithField("storage-driver", "zfs") - - if _, err := exec.LookPath("zfs"); err != nil { - logger.Debugf("zfs command is not available: %v", err) - return nil, graphdriver.ErrPrerequisites - } - - file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) - if err != nil { - logger.Debugf("cannot open /dev/zfs: %v", err) - return nil, graphdriver.ErrPrerequisites - } - defer file.Close() - - options, err := parseOptions(opt) - if err != nil { - return nil, err - } - options.mountPath = base - - rootdir := path.Dir(base) - - if options.fsName == "" { - err = checkRootdirFs(rootdir) - if err != nil { - return nil, err - } - } - - if options.fsName == "" { - options.fsName, err = lookupZfsDataset(rootdir) - if err != nil { - return nil, err - } - } - - zfs.SetLogger(new(Logger)) - - filesystems, err := zfs.Filesystems(options.fsName) - if err != nil { - return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) - } - - filesystemsCache := make(map[string]bool, len(filesystems)) - var rootDataset *zfs.Dataset - for _, fs := range filesystems { - if fs.Name == options.fsName { - rootDataset = fs - } - filesystemsCache[fs.Name] = true - } - - if rootDataset == nil { - return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, fmt.Errorf("Failed to get root uid/guid: %v", err) - } - if err := idtools.MkdirAllAndChown(base, 0700, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { - return nil, fmt.Errorf("Failed to create '%s': %v", base, err) - } - - d := &Driver{ - dataset: rootDataset, - options: options, - filesystemsCache: filesystemsCache, - uidMaps: uidMaps, - gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), - } - return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil -} - -func parseOptions(opt []string) (zfsOptions, error) { - var options zfsOptions - options.fsName = "" - for _, option := range opt { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return options, err - } - key = strings.ToLower(key) - switch key { - case "zfs.fsname": - options.fsName = val - default: - return options, fmt.Errorf("Unknown option %s", key) - } - } - return options, nil -} - -func lookupZfsDataset(rootdir string) (string, error) { - var stat unix.Stat_t - if err := unix.Stat(rootdir, &stat); err != nil { - return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) - } - wantedDev := stat.Dev - - mounts, err := mount.GetMounts(nil) - if err != nil { - return "", err - } - for _, m := range mounts { - if err := unix.Stat(m.Mountpoint, &stat); err != nil { - logrus.WithField("storage-driver", "zfs").Debugf("failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) - continue // may fail on fuse file systems - } - - if stat.Dev == wantedDev && m.Fstype == "zfs" { - return m.Source, nil - } - } - - return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) -} - -// Driver holds information about the driver, such as zfs dataset, options and cache. -type Driver struct { - dataset *zfs.Dataset - options zfsOptions - sync.Mutex // protects filesystem cache against concurrent access - filesystemsCache map[string]bool - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter -} - -func (d *Driver) String() string { - return "zfs" -} - -// Cleanup is called on daemon shutdown, it is a no-op for ZFS. -// TODO(@cpuguy83): Walk layer tree and check mounts? -func (d *Driver) Cleanup() error { - return nil -} - -// Status returns information about the ZFS filesystem. It returns a two dimensional array of information -// such as pool name, dataset name, disk usage, parent quota and compression used. -// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', -// 'Space Available', 'Parent Quota' and 'Compression'. -func (d *Driver) Status() [][2]string { - parts := strings.Split(d.dataset.Name, "/") - pool, err := zfs.GetZpool(parts[0]) - - var poolName, poolHealth string - if err == nil { - poolName = pool.Name - poolHealth = pool.Health - } else { - poolName = fmt.Sprintf("error while getting pool information %v", err) - poolHealth = "not available" - } - - quota := "no" - if d.dataset.Quota != 0 { - quota = strconv.FormatUint(d.dataset.Quota, 10) - } - - return [][2]string{ - {"Zpool", poolName}, - {"Zpool Health", poolHealth}, - {"Parent Dataset", d.dataset.Name}, - {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, - {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, - {"Parent Quota", quota}, - {"Compression", d.dataset.Compression}, - } -} - -// GetMetadata returns image/container metadata related to graph driver -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - return map[string]string{ - "Mountpoint": d.mountPath(id), - "Dataset": d.zfsPath(id), - }, nil -} - -func (d *Driver) cloneFilesystem(name, parentName string) error { - snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) - parentDataset := zfs.Dataset{Name: parentName} - snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) - if err != nil { - return err - } - - _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) - if err == nil { - d.Lock() - d.filesystemsCache[name] = true - d.Unlock() - } - - if err != nil { - snapshot.Destroy(zfs.DestroyDeferDeletion) - return err - } - return snapshot.Destroy(zfs.DestroyDeferDeletion) -} - -func (d *Driver) zfsPath(id string) string { - return d.options.fsName + "/" + id -} - -func (d *Driver) mountPath(id string) string { - return path.Join(d.options.mountPath, "graph", getMountpoint(id)) -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) -} - -// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - var storageOpt map[string]string - if opts != nil { - storageOpt = opts.StorageOpt - } - - err := d.create(id, parent, storageOpt) - if err == nil { - return nil - } - if zfsError, ok := err.(*zfs.Error); ok { - if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { - return err - } - // aborted build -> cleanup - } else { - return err - } - - dataset := zfs.Dataset{Name: d.zfsPath(id)} - if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { - return err - } - - // retry - return d.create(id, parent, storageOpt) -} - -func (d *Driver) create(id, parent string, storageOpt map[string]string) error { - name := d.zfsPath(id) - quota, err := parseStorageOpt(storageOpt) - if err != nil { - return err - } - if parent == "" { - mountoptions := map[string]string{"mountpoint": "legacy"} - fs, err := zfs.CreateFilesystem(name, mountoptions) - if err == nil { - err = setQuota(name, quota) - if err == nil { - d.Lock() - d.filesystemsCache[fs.Name] = true - d.Unlock() - } - } - return err - } - err = d.cloneFilesystem(name, d.zfsPath(parent)) - if err == nil { - err = setQuota(name, quota) - } - return err -} - -func parseStorageOpt(storageOpt map[string]string) (string, error) { - // Read size to change the disk quota per container - for k, v := range storageOpt { - key := strings.ToLower(k) - switch key { - case "size": - return v, nil - default: - return "0", fmt.Errorf("Unknown option %s", key) - } - } - return "0", nil -} - -func setQuota(name string, quota string) error { - if quota == "0" { - return nil - } - fs, err := zfs.GetDataset(name) - if err != nil { - return err - } - return fs.SetProperty("quota", quota) -} - -// Remove deletes the dataset, filesystem and the cache for the given id. -func (d *Driver) Remove(id string) error { - name := d.zfsPath(id) - dataset := zfs.Dataset{Name: name} - err := dataset.Destroy(zfs.DestroyRecursive) - if err == nil { - d.Lock() - delete(d.filesystemsCache, name) - d.Unlock() - } - return err -} - -// Get returns the mountpoint for the given id after creating the target directories if necessary. -func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) { - mountpoint := d.mountPath(id) - if count := d.ctr.Increment(mountpoint); count > 1 { - return containerfs.NewLocalContainerFS(mountpoint), nil - } - defer func() { - if retErr != nil { - if c := d.ctr.Decrement(mountpoint); c <= 0 { - if mntErr := unix.Unmount(mountpoint, 0); mntErr != nil { - logrus.WithField("storage-driver", "zfs").Errorf("Error unmounting %v: %v", mountpoint, mntErr) - } - if rmErr := unix.Rmdir(mountpoint); rmErr != nil && !os.IsNotExist(rmErr) { - logrus.WithField("storage-driver", "zfs").Debugf("Failed to remove %s: %v", id, rmErr) - } - - } - } - }() - - filesystem := d.zfsPath(id) - options := label.FormatMountLabel("", mountLabel) - logrus.WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, options) - - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return nil, err - } - // Create the target directories if they don't exist - if err := idtools.MkdirAllAndChown(mountpoint, 0755, idtools.IDPair{UID: rootUID, GID: rootGID}); err != nil { - return nil, err - } - - if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil { - return nil, fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) - } - - // this could be our first mount after creation of the filesystem, and the root dir may still have root - // permissions instead of the remapped root uid:gid (if user namespaces are enabled): - if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { - return nil, fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) - } - - return containerfs.NewLocalContainerFS(mountpoint), nil -} - -// Put removes the existing mountpoint for the given id if it exists. -func (d *Driver) Put(id string) error { - mountpoint := d.mountPath(id) - if count := d.ctr.Decrement(mountpoint); count > 0 { - return nil - } - - logger := logrus.WithField("storage-driver", "zfs") - - logger.Debugf(`unmount("%s")`, mountpoint) - - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { - logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err) - } - if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) { - logger.Debugf("Failed to remove %s mount point %s: %v", id, mountpoint, err) - } - - return nil -} - -// Exists checks to see if the cache entry exists for the given id. -func (d *Driver) Exists(id string) bool { - d.Lock() - defer d.Unlock() - return d.filesystemsCache[d.zfsPath(id)] -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go deleted file mode 100644 index f15aae059..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_freebsd.go +++ /dev/null @@ -1,38 +0,0 @@ -package zfs // import "github.com/docker/docker/daemon/graphdriver/zfs" - -import ( - "fmt" - "strings" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -func checkRootdirFs(rootdir string) error { - var buf unix.Statfs_t - if err := unix.Statfs(rootdir, &buf); err != nil { - return fmt.Errorf("Failed to access '%s': %s", rootdir, err) - } - - // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] - if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { - logrus.WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir) - return graphdriver.ErrPrerequisites - } - - return nil -} - -func getMountpoint(id string) string { - maxlen := 12 - - // we need to preserve filesystem suffix - suffix := strings.SplitN(id, "-", 2) - - if len(suffix) > 1 { - return id[:maxlen] + "-" + suffix[1] - } - - return id[:maxlen] -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go deleted file mode 100644 index 589ecbd17..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_linux.go +++ /dev/null @@ -1,28 +0,0 @@ -package zfs // import "github.com/docker/docker/daemon/graphdriver/zfs" - -import ( - "github.com/docker/docker/daemon/graphdriver" - "github.com/sirupsen/logrus" -) - -func checkRootdirFs(rootDir string) error { - fsMagic, err := graphdriver.GetFSMagic(rootDir) - if err != nil { - return err - } - backingFS := "unknown" - if fsName, ok := graphdriver.FsNames[fsMagic]; ok { - backingFS = fsName - } - - if fsMagic != graphdriver.FsMagicZfs { - logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root") - return graphdriver.ErrPrerequisites - } - - return nil -} - -func getMountpoint(id string) string { - return id -} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go deleted file mode 100644 index 1b7703068..000000000 --- a/vendor/github.com/docker/docker/daemon/graphdriver/zfs/zfs_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !linux,!freebsd - -package zfs // import "github.com/docker/docker/daemon/graphdriver/zfs" - -func checkRootdirFs(rootdir string) error { - return nil -} - -func getMountpoint(id string) string { - return id -} diff --git a/vendor/github.com/docker/docker/daemon/health.go b/vendor/github.com/docker/docker/daemon/health.go deleted file mode 100644 index ae0d7f892..000000000 --- a/vendor/github.com/docker/docker/daemon/health.go +++ /dev/null @@ -1,381 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "bytes" - "context" - "fmt" - "runtime" - "strings" - "sync" - "time" - - "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/sirupsen/logrus" -) - -const ( - // Longest healthcheck probe output message to store. Longer messages will be truncated. - maxOutputLen = 4096 - - // Default interval between probe runs (from the end of the first to the start of the second). - // Also the time before the first probe. - defaultProbeInterval = 30 * time.Second - - // The maximum length of time a single probe run should take. If the probe takes longer - // than this, the check is considered to have failed. - defaultProbeTimeout = 30 * time.Second - - // The time given for the container to start before the health check starts considering - // the container unstable. Defaults to none. - defaultStartPeriod = 0 * time.Second - - // Default number of consecutive failures of the health check - // for the container to be considered unhealthy. - defaultProbeRetries = 3 - - // Maximum number of entries to record - maxLogEntries = 5 -) - -const ( - // Exit status codes that can be returned by the probe command. - - exitStatusHealthy = 0 // Container is healthy -) - -// probe implementations know how to run a particular type of probe. -type probe interface { - // Perform one run of the check. Returns the exit code and an optional - // short diagnostic string. - run(context.Context, *Daemon, *container.Container) (*types.HealthcheckResult, error) -} - -// cmdProbe implements the "CMD" probe type. -type cmdProbe struct { - // Run the command with the system's default shell instead of execing it directly. - shell bool -} - -// exec the healthcheck command in the container. -// Returns the exit code and probe output (if any) -func (p *cmdProbe) run(ctx context.Context, d *Daemon, cntr *container.Container) (*types.HealthcheckResult, error) { - cmdSlice := strslice.StrSlice(cntr.Config.Healthcheck.Test)[1:] - if p.shell { - cmdSlice = append(getShell(cntr.Config), cmdSlice...) - } - entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmdSlice) - execConfig := exec.NewConfig() - execConfig.OpenStdin = false - execConfig.OpenStdout = true - execConfig.OpenStderr = true - execConfig.ContainerID = cntr.ID - execConfig.DetachKeys = []byte{} - execConfig.Entrypoint = entrypoint - execConfig.Args = args - execConfig.Tty = false - execConfig.Privileged = false - execConfig.User = cntr.Config.User - execConfig.WorkingDir = cntr.Config.WorkingDir - - linkedEnv, err := d.setupLinkedContainers(cntr) - if err != nil { - return nil, err - } - execConfig.Env = container.ReplaceOrAppendEnvValues(cntr.CreateDaemonEnvironment(execConfig.Tty, linkedEnv), execConfig.Env) - - d.registerExecCommand(cntr, execConfig) - attributes := map[string]string{ - "execID": execConfig.ID, - } - d.LogContainerEventWithAttributes(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " "), attributes) - - output := &limitedBuffer{} - err = d.ContainerExecStart(ctx, execConfig.ID, nil, output, output) - if err != nil { - return nil, err - } - info, err := d.getExecConfig(execConfig.ID) - if err != nil { - return nil, err - } - if info.ExitCode == nil { - return nil, fmt.Errorf("healthcheck for container %s has no exit code", cntr.ID) - } - // Note: Go's json package will handle invalid UTF-8 for us - out := output.String() - return &types.HealthcheckResult{ - End: time.Now(), - ExitCode: *info.ExitCode, - Output: out, - }, nil -} - -// Update the container's Status.Health struct based on the latest probe's result. -func handleProbeResult(d *Daemon, c *container.Container, result *types.HealthcheckResult, done chan struct{}) { - c.Lock() - defer c.Unlock() - - // probe may have been cancelled while waiting on lock. Ignore result then - select { - case <-done: - return - default: - } - - retries := c.Config.Healthcheck.Retries - if retries <= 0 { - retries = defaultProbeRetries - } - - h := c.State.Health - oldStatus := h.Status() - - if len(h.Log) >= maxLogEntries { - h.Log = append(h.Log[len(h.Log)+1-maxLogEntries:], result) - } else { - h.Log = append(h.Log, result) - } - - if result.ExitCode == exitStatusHealthy { - h.FailingStreak = 0 - h.SetStatus(types.Healthy) - } else { // Failure (including invalid exit code) - shouldIncrementStreak := true - - // If the container is starting (i.e. we never had a successful health check) - // then we check if we are within the start period of the container in which - // case we do not increment the failure streak. - if h.Status() == types.Starting { - startPeriod := timeoutWithDefault(c.Config.Healthcheck.StartPeriod, defaultStartPeriod) - timeSinceStart := result.Start.Sub(c.State.StartedAt) - - // If still within the start period, then don't increment failing streak. - if timeSinceStart < startPeriod { - shouldIncrementStreak = false - } - } - - if shouldIncrementStreak { - h.FailingStreak++ - - if h.FailingStreak >= retries { - h.SetStatus(types.Unhealthy) - } - } - // Else we're starting or healthy. Stay in that state. - } - - // replicate Health status changes - if err := c.CheckpointTo(d.containersReplica); err != nil { - // queries will be inconsistent until the next probe runs or other state mutations - // checkpoint the container - logrus.Errorf("Error replicating health state for container %s: %v", c.ID, err) - } - - current := h.Status() - if oldStatus != current { - d.LogContainerEvent(c, "health_status: "+current) - } -} - -// Run the container's monitoring thread until notified via "stop". -// There is never more than one monitor thread running per container at a time. -func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) { - probeTimeout := timeoutWithDefault(c.Config.Healthcheck.Timeout, defaultProbeTimeout) - probeInterval := timeoutWithDefault(c.Config.Healthcheck.Interval, defaultProbeInterval) - for { - select { - case <-stop: - logrus.Debugf("Stop healthcheck monitoring for container %s (received while idle)", c.ID) - return - case <-time.After(probeInterval): - logrus.Debugf("Running health check for container %s ...", c.ID) - startTime := time.Now() - ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout) - results := make(chan *types.HealthcheckResult, 1) - go func() { - healthChecksCounter.Inc() - result, err := probe.run(ctx, d, c) - if err != nil { - healthChecksFailedCounter.Inc() - logrus.Warnf("Health check for container %s error: %v", c.ID, err) - results <- &types.HealthcheckResult{ - ExitCode: -1, - Output: err.Error(), - Start: startTime, - End: time.Now(), - } - } else { - result.Start = startTime - logrus.Debugf("Health check for container %s done (exitCode=%d)", c.ID, result.ExitCode) - results <- result - } - close(results) - }() - select { - case <-stop: - logrus.Debugf("Stop healthcheck monitoring for container %s (received while probing)", c.ID) - cancelProbe() - // Wait for probe to exit (it might take a while to respond to the TERM - // signal and we don't want dying probes to pile up). - <-results - return - case result := <-results: - handleProbeResult(d, c, result, stop) - // Stop timeout - cancelProbe() - case <-ctx.Done(): - logrus.Debugf("Health check for container %s taking too long", c.ID) - handleProbeResult(d, c, &types.HealthcheckResult{ - ExitCode: -1, - Output: fmt.Sprintf("Health check exceeded timeout (%v)", probeTimeout), - Start: startTime, - End: time.Now(), - }, stop) - cancelProbe() - // Wait for probe to exit (it might take a while to respond to the TERM - // signal and we don't want dying probes to pile up). - <-results - } - } - } -} - -// Get a suitable probe implementation for the container's healthcheck configuration. -// Nil will be returned if no healthcheck was configured or NONE was set. -func getProbe(c *container.Container) probe { - config := c.Config.Healthcheck - if config == nil || len(config.Test) == 0 { - return nil - } - switch config.Test[0] { - case "CMD": - return &cmdProbe{shell: false} - case "CMD-SHELL": - return &cmdProbe{shell: true} - case "NONE": - return nil - default: - logrus.Warnf("Unknown healthcheck type '%s' (expected 'CMD') in container %s", config.Test[0], c.ID) - return nil - } -} - -// Ensure the health-check monitor is running or not, depending on the current -// state of the container. -// Called from monitor.go, with c locked. -func (d *Daemon) updateHealthMonitor(c *container.Container) { - h := c.State.Health - if h == nil { - return // No healthcheck configured - } - - probe := getProbe(c) - wantRunning := c.Running && !c.Paused && probe != nil - if wantRunning { - if stop := h.OpenMonitorChannel(); stop != nil { - go monitor(d, c, stop, probe) - } - } else { - h.CloseMonitorChannel() - } -} - -// Reset the health state for a newly-started, restarted or restored container. -// initHealthMonitor is called from monitor.go and we should never be running -// two instances at once. -// Called with c locked. -func (d *Daemon) initHealthMonitor(c *container.Container) { - // If no healthcheck is setup then don't init the monitor - if getProbe(c) == nil { - return - } - - // This is needed in case we're auto-restarting - d.stopHealthchecks(c) - - if h := c.State.Health; h != nil { - h.SetStatus(types.Starting) - h.FailingStreak = 0 - } else { - h := &container.Health{} - h.SetStatus(types.Starting) - c.State.Health = h - } - - d.updateHealthMonitor(c) -} - -// Called when the container is being stopped (whether because the health check is -// failing or for any other reason). -func (d *Daemon) stopHealthchecks(c *container.Container) { - h := c.State.Health - if h != nil { - h.CloseMonitorChannel() - } -} - -// Buffer up to maxOutputLen bytes. Further data is discarded. -type limitedBuffer struct { - buf bytes.Buffer - mu sync.Mutex - truncated bool // indicates that data has been lost -} - -// Append to limitedBuffer while there is room. -func (b *limitedBuffer) Write(data []byte) (int, error) { - b.mu.Lock() - defer b.mu.Unlock() - - bufLen := b.buf.Len() - dataLen := len(data) - keep := min(maxOutputLen-bufLen, dataLen) - if keep > 0 { - b.buf.Write(data[:keep]) - } - if keep < dataLen { - b.truncated = true - } - return dataLen, nil -} - -// The contents of the buffer, with "..." appended if it overflowed. -func (b *limitedBuffer) String() string { - b.mu.Lock() - defer b.mu.Unlock() - - out := b.buf.String() - if b.truncated { - out = out + "..." - } - return out -} - -// If configuredValue is zero, use defaultValue instead. -func timeoutWithDefault(configuredValue time.Duration, defaultValue time.Duration) time.Duration { - if configuredValue == 0 { - return defaultValue - } - return configuredValue -} - -func min(x, y int) int { - if x < y { - return x - } - return y -} - -func getShell(config *containertypes.Config) []string { - if len(config.Shell) != 0 { - return config.Shell - } - if runtime.GOOS != "windows" { - return []string{"/bin/sh", "-c"} - } - return []string{"cmd", "/S", "/C"} -} diff --git a/vendor/github.com/docker/docker/daemon/images/cache.go b/vendor/github.com/docker/docker/daemon/images/cache.go deleted file mode 100644 index 3b433106e..000000000 --- a/vendor/github.com/docker/docker/daemon/images/cache.go +++ /dev/null @@ -1,27 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "github.com/docker/docker/builder" - "github.com/docker/docker/image/cache" - "github.com/sirupsen/logrus" -) - -// MakeImageCache creates a stateful image cache. -func (i *ImageService) MakeImageCache(sourceRefs []string) builder.ImageCache { - if len(sourceRefs) == 0 { - return cache.NewLocal(i.imageStore) - } - - cache := cache.New(i.imageStore) - - for _, ref := range sourceRefs { - img, err := i.GetImage(ref) - if err != nil { - logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) - continue - } - cache.Populate(img) - } - - return cache -} diff --git a/vendor/github.com/docker/docker/daemon/images/image.go b/vendor/github.com/docker/docker/daemon/images/image.go deleted file mode 100644 index 79cc07c4f..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image.go +++ /dev/null @@ -1,64 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "fmt" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" -) - -// ErrImageDoesNotExist is error returned when no image can be found for a reference. -type ErrImageDoesNotExist struct { - ref reference.Reference -} - -func (e ErrImageDoesNotExist) Error() string { - ref := e.ref - if named, ok := ref.(reference.Named); ok { - ref = reference.TagNameOnly(named) - } - return fmt.Sprintf("No such image: %s", reference.FamiliarString(ref)) -} - -// NotFound implements the NotFound interface -func (e ErrImageDoesNotExist) NotFound() {} - -// GetImage returns an image corresponding to the image referred to by refOrID. -func (i *ImageService) GetImage(refOrID string) (*image.Image, error) { - ref, err := reference.ParseAnyReference(refOrID) - if err != nil { - return nil, errdefs.InvalidParameter(err) - } - namedRef, ok := ref.(reference.Named) - if !ok { - digested, ok := ref.(reference.Digested) - if !ok { - return nil, ErrImageDoesNotExist{ref} - } - id := image.IDFromDigest(digested.Digest()) - if img, err := i.imageStore.Get(id); err == nil { - return img, nil - } - return nil, ErrImageDoesNotExist{ref} - } - - if digest, err := i.referenceStore.Get(namedRef); err == nil { - // Search the image stores to get the operating system, defaulting to host OS. - id := image.IDFromDigest(digest) - if img, err := i.imageStore.Get(id); err == nil { - return img, nil - } - } - - // Search based on ID - if id, err := i.imageStore.Search(refOrID); err == nil { - img, err := i.imageStore.Get(id) - if err != nil { - return nil, ErrImageDoesNotExist{ref} - } - return img, nil - } - - return nil, ErrImageDoesNotExist{ref} -} diff --git a/vendor/github.com/docker/docker/daemon/images/image_builder.go b/vendor/github.com/docker/docker/daemon/images/image_builder.go deleted file mode 100644 index ca7d0fda4..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image_builder.go +++ /dev/null @@ -1,219 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "context" - "io" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/builder" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/registry" - "github.com/pkg/errors" -) - -type roLayer struct { - released bool - layerStore layer.Store - roLayer layer.Layer -} - -func (l *roLayer) DiffID() layer.DiffID { - if l.roLayer == nil { - return layer.DigestSHA256EmptyTar - } - return l.roLayer.DiffID() -} - -func (l *roLayer) Release() error { - if l.released { - return nil - } - if l.roLayer != nil { - metadata, err := l.layerStore.Release(l.roLayer) - layer.LogReleaseMetadata(metadata) - if err != nil { - return errors.Wrap(err, "failed to release ROLayer") - } - } - l.roLayer = nil - l.released = true - return nil -} - -func (l *roLayer) NewRWLayer() (builder.RWLayer, error) { - var chainID layer.ChainID - if l.roLayer != nil { - chainID = l.roLayer.ChainID() - } - - mountID := stringid.GenerateRandomID() - newLayer, err := l.layerStore.CreateRWLayer(mountID, chainID, nil) - if err != nil { - return nil, errors.Wrap(err, "failed to create rwlayer") - } - - rwLayer := &rwLayer{layerStore: l.layerStore, rwLayer: newLayer} - - fs, err := newLayer.Mount("") - if err != nil { - rwLayer.Release() - return nil, err - } - - rwLayer.fs = fs - - return rwLayer, nil -} - -type rwLayer struct { - released bool - layerStore layer.Store - rwLayer layer.RWLayer - fs containerfs.ContainerFS -} - -func (l *rwLayer) Root() containerfs.ContainerFS { - return l.fs -} - -func (l *rwLayer) Commit() (builder.ROLayer, error) { - stream, err := l.rwLayer.TarStream() - if err != nil { - return nil, err - } - defer stream.Close() - - var chainID layer.ChainID - if parent := l.rwLayer.Parent(); parent != nil { - chainID = parent.ChainID() - } - - newLayer, err := l.layerStore.Register(stream, chainID) - if err != nil { - return nil, err - } - // TODO: An optimization would be to handle empty layers before returning - return &roLayer{layerStore: l.layerStore, roLayer: newLayer}, nil -} - -func (l *rwLayer) Release() error { - if l.released { - return nil - } - - if l.fs != nil { - if err := l.rwLayer.Unmount(); err != nil { - return errors.Wrap(err, "failed to unmount RWLayer") - } - l.fs = nil - } - - metadata, err := l.layerStore.ReleaseRWLayer(l.rwLayer) - layer.LogReleaseMetadata(metadata) - if err != nil { - return errors.Wrap(err, "failed to release RWLayer") - } - l.released = true - return nil -} - -func newROLayerForImage(img *image.Image, layerStore layer.Store) (builder.ROLayer, error) { - if img == nil || img.RootFS.ChainID() == "" { - return &roLayer{layerStore: layerStore}, nil - } - // Hold a reference to the image layer so that it can't be removed before - // it is released - layer, err := layerStore.Get(img.RootFS.ChainID()) - if err != nil { - return nil, errors.Wrapf(err, "failed to get layer for image %s", img.ImageID()) - } - return &roLayer{layerStore: layerStore, roLayer: layer}, nil -} - -// TODO: could this use the regular daemon PullImage ? -func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer, os string) (*image.Image, error) { - ref, err := reference.ParseNormalizedNamed(name) - if err != nil { - return nil, err - } - ref = reference.TagNameOnly(ref) - - pullRegistryAuth := &types.AuthConfig{} - if len(authConfigs) > 0 { - // The request came with a full auth config, use it - repoInfo, err := i.registryService.ResolveRepository(ref) - if err != nil { - return nil, err - } - - resolvedConfig := registry.ResolveAuthConfig(authConfigs, repoInfo.Index) - pullRegistryAuth = &resolvedConfig - } - - if err := i.pullImageWithReference(ctx, ref, os, nil, pullRegistryAuth, output); err != nil { - return nil, err - } - return i.GetImage(name) -} - -// GetImageAndReleasableLayer returns an image and releaseable layer for a reference or ID. -// Every call to GetImageAndReleasableLayer MUST call releasableLayer.Release() to prevent -// leaking of layers. -func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ROLayer, error) { - if refOrID == "" { - if !system.IsOSSupported(opts.OS) { - return nil, nil, system.ErrNotSupportedOperatingSystem - } - layer, err := newROLayerForImage(nil, i.layerStores[opts.OS]) - return nil, layer, err - } - - if opts.PullOption != backend.PullOptionForcePull { - image, err := i.GetImage(refOrID) - if err != nil && opts.PullOption == backend.PullOptionNoPull { - return nil, nil, err - } - // TODO: shouldn't we error out if error is different from "not found" ? - if image != nil { - if !system.IsOSSupported(image.OperatingSystem()) { - return nil, nil, system.ErrNotSupportedOperatingSystem - } - layer, err := newROLayerForImage(image, i.layerStores[image.OperatingSystem()]) - return image, layer, err - } - } - - image, err := i.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output, opts.OS) - if err != nil { - return nil, nil, err - } - if !system.IsOSSupported(image.OperatingSystem()) { - return nil, nil, system.ErrNotSupportedOperatingSystem - } - layer, err := newROLayerForImage(image, i.layerStores[image.OperatingSystem()]) - return image, layer, err -} - -// CreateImage creates a new image by adding a config and ID to the image store. -// This is similar to LoadImage() except that it receives JSON encoded bytes of -// an image instead of a tar archive. -func (i *ImageService) CreateImage(config []byte, parent string) (builder.Image, error) { - id, err := i.imageStore.Create(config) - if err != nil { - return nil, errors.Wrapf(err, "failed to create image") - } - - if parent != "" { - if err := i.imageStore.SetParent(id, image.ID(parent)); err != nil { - return nil, errors.Wrapf(err, "failed to set parent %s", parent) - } - } - - return i.imageStore.Get(id) -} diff --git a/vendor/github.com/docker/docker/daemon/images/image_commit.go b/vendor/github.com/docker/docker/daemon/images/image_commit.go deleted file mode 100644 index 4caba9f27..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image_commit.go +++ /dev/null @@ -1,127 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "encoding/json" - "io" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" -) - -// CommitImage creates a new image from a commit config -func (i *ImageService) CommitImage(c backend.CommitConfig) (image.ID, error) { - layerStore, ok := i.layerStores[c.ContainerOS] - if !ok { - return "", system.ErrNotSupportedOperatingSystem - } - rwTar, err := exportContainerRw(layerStore, c.ContainerID, c.ContainerMountLabel) - if err != nil { - return "", err - } - defer func() { - if rwTar != nil { - rwTar.Close() - } - }() - - var parent *image.Image - if c.ParentImageID == "" { - parent = new(image.Image) - parent.RootFS = image.NewRootFS() - } else { - parent, err = i.imageStore.Get(image.ID(c.ParentImageID)) - if err != nil { - return "", err - } - } - - l, err := layerStore.Register(rwTar, parent.RootFS.ChainID()) - if err != nil { - return "", err - } - defer layer.ReleaseAndLog(layerStore, l) - - cc := image.ChildConfig{ - ContainerID: c.ContainerID, - Author: c.Author, - Comment: c.Comment, - ContainerConfig: c.ContainerConfig, - Config: c.Config, - DiffID: l.DiffID(), - } - config, err := json.Marshal(image.NewChildImage(parent, cc, c.ContainerOS)) - if err != nil { - return "", err - } - - id, err := i.imageStore.Create(config) - if err != nil { - return "", err - } - - if c.ParentImageID != "" { - if err := i.imageStore.SetParent(id, image.ID(c.ParentImageID)); err != nil { - return "", err - } - } - return id, nil -} - -func exportContainerRw(layerStore layer.Store, id, mountLabel string) (arch io.ReadCloser, err error) { - rwlayer, err := layerStore.GetRWLayer(id) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - layerStore.ReleaseRWLayer(rwlayer) - } - }() - - // TODO: this mount call is not necessary as we assume that TarStream() should - // mount the layer if needed. But the Diff() function for windows requests that - // the layer should be mounted when calling it. So we reserve this mount call - // until windows driver can implement Diff() interface correctly. - _, err = rwlayer.Mount(mountLabel) - if err != nil { - return nil, err - } - - archive, err := rwlayer.TarStream() - if err != nil { - rwlayer.Unmount() - return nil, err - } - return ioutils.NewReadCloserWrapper(archive, func() error { - archive.Close() - err = rwlayer.Unmount() - layerStore.ReleaseRWLayer(rwlayer) - return err - }), - nil -} - -// CommitBuildStep is used by the builder to create an image for each step in -// the build. -// -// This method is different from CreateImageFromContainer: -// * it doesn't attempt to validate container state -// * it doesn't send a commit action to metrics -// * it doesn't log a container commit event -// -// This is a temporary shim. Should be removed when builder stops using commit. -func (i *ImageService) CommitBuildStep(c backend.CommitConfig) (image.ID, error) { - container := i.containers.Get(c.ContainerID) - if container == nil { - // TODO: use typed error - return "", errors.Errorf("container not found: %s", c.ContainerID) - } - c.ContainerMountLabel = container.MountLabel - c.ContainerOS = container.OS - c.ParentImageID = string(container.ImageID) - return i.CommitImage(c) -} diff --git a/vendor/github.com/docker/docker/daemon/images/image_delete.go b/vendor/github.com/docker/docker/daemon/images/image_delete.go deleted file mode 100644 index 94d6f872d..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image_delete.go +++ /dev/null @@ -1,414 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "fmt" - "strings" - "time" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/container" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" -) - -type conflictType int - -const ( - conflictDependentChild conflictType = 1 << iota - conflictRunningContainer - conflictActiveReference - conflictStoppedContainer - conflictHard = conflictDependentChild | conflictRunningContainer - conflictSoft = conflictActiveReference | conflictStoppedContainer -) - -// ImageDelete deletes the image referenced by the given imageRef from this -// daemon. The given imageRef can be an image ID, ID prefix, or a repository -// reference (with an optional tag or digest, defaulting to the tag name -// "latest"). There is differing behavior depending on whether the given -// imageRef is a repository reference or not. -// -// If the given imageRef is a repository reference then that repository -// reference will be removed. However, if there exists any containers which -// were created using the same image reference then the repository reference -// cannot be removed unless either there are other repository references to the -// same image or force is true. Following removal of the repository reference, -// the referenced image itself will attempt to be deleted as described below -// but quietly, meaning any image delete conflicts will cause the image to not -// be deleted and the conflict will not be reported. -// -// There may be conflicts preventing deletion of an image and these conflicts -// are divided into two categories grouped by their severity: -// -// Hard Conflict: -// - a pull or build using the image. -// - any descendant image. -// - any running container using the image. -// -// Soft Conflict: -// - any stopped container using the image. -// - any repository tag or digest references to the image. -// -// The image cannot be removed if there are any hard conflicts and can be -// removed if there are soft conflicts only if force is true. -// -// If prune is true, ancestor images will each attempt to be deleted quietly, -// meaning any delete conflicts will cause the image to not be deleted and the -// conflict will not be reported. -// -func (i *ImageService) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) { - start := time.Now() - records := []types.ImageDeleteResponseItem{} - - img, err := i.GetImage(imageRef) - if err != nil { - return nil, err - } - if !system.IsOSSupported(img.OperatingSystem()) { - return nil, errors.Errorf("unable to delete image: %q", system.ErrNotSupportedOperatingSystem) - } - - imgID := img.ID() - repoRefs := i.referenceStore.References(imgID.Digest()) - - using := func(c *container.Container) bool { - return c.ImageID == imgID - } - - var removedRepositoryRef bool - if !isImageIDPrefix(imgID.String(), imageRef) { - // A repository reference was given and should be removed - // first. We can only remove this reference if either force is - // true, there are multiple repository references to this - // image, or there are no containers using the given reference. - if !force && isSingleReference(repoRefs) { - if container := i.containers.First(using); container != nil { - // If we removed the repository reference then - // this image would remain "dangling" and since - // we really want to avoid that the client must - // explicitly force its removal. - err := errors.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) - return nil, errdefs.Conflict(err) - } - } - - parsedRef, err := reference.ParseNormalizedNamed(imageRef) - if err != nil { - return nil, err - } - - parsedRef, err = i.removeImageRef(parsedRef) - if err != nil { - return nil, err - } - - untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} - - i.LogImageEvent(imgID.String(), imgID.String(), "untag") - records = append(records, untaggedRecord) - - repoRefs = i.referenceStore.References(imgID.Digest()) - - // If a tag reference was removed and the only remaining - // references to the same repository are digest references, - // then clean up those digest references. - if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { - foundRepoTagRef := false - for _, repoRef := range repoRefs { - if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { - foundRepoTagRef = true - break - } - } - if !foundRepoTagRef { - // Remove canonical references from same repository - var remainingRefs []reference.Named - for _, repoRef := range repoRefs { - if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { - if _, err := i.removeImageRef(repoRef); err != nil { - return records, err - } - - untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(repoRef)} - records = append(records, untaggedRecord) - } else { - remainingRefs = append(remainingRefs, repoRef) - - } - } - repoRefs = remainingRefs - } - } - - // If it has remaining references then the untag finished the remove - if len(repoRefs) > 0 { - return records, nil - } - - removedRepositoryRef = true - } else { - // If an ID reference was given AND there is at most one tag - // reference to the image AND all references are within one - // repository, then remove all references. - if isSingleReference(repoRefs) { - c := conflictHard - if !force { - c |= conflictSoft &^ conflictActiveReference - } - if conflict := i.checkImageDeleteConflict(imgID, c); conflict != nil { - return nil, conflict - } - - for _, repoRef := range repoRefs { - parsedRef, err := i.removeImageRef(repoRef) - if err != nil { - return nil, err - } - - untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} - - i.LogImageEvent(imgID.String(), imgID.String(), "untag") - records = append(records, untaggedRecord) - } - } - } - - if err := i.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef); err != nil { - return nil, err - } - - imageActions.WithValues("delete").UpdateSince(start) - - return records, nil -} - -// isSingleReference returns true when all references are from one repository -// and there is at most one tag. Returns false for empty input. -func isSingleReference(repoRefs []reference.Named) bool { - if len(repoRefs) <= 1 { - return len(repoRefs) == 1 - } - var singleRef reference.Named - canonicalRefs := map[string]struct{}{} - for _, repoRef := range repoRefs { - if _, isCanonical := repoRef.(reference.Canonical); isCanonical { - canonicalRefs[repoRef.Name()] = struct{}{} - } else if singleRef == nil { - singleRef = repoRef - } else { - return false - } - } - if singleRef == nil { - // Just use first canonical ref - singleRef = repoRefs[0] - } - _, ok := canonicalRefs[singleRef.Name()] - return len(canonicalRefs) == 1 && ok -} - -// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the -// given imageID. -func isImageIDPrefix(imageID, possiblePrefix string) bool { - if strings.HasPrefix(imageID, possiblePrefix) { - return true - } - - if i := strings.IndexRune(imageID, ':'); i >= 0 { - return strings.HasPrefix(imageID[i+1:], possiblePrefix) - } - - return false -} - -// removeImageRef attempts to parse and remove the given image reference from -// this daemon's store of repository tag/digest references. The given -// repositoryRef must not be an image ID but a repository name followed by an -// optional tag or digest reference. If tag or digest is omitted, the default -// tag is used. Returns the resolved image reference and an error. -func (i *ImageService) removeImageRef(ref reference.Named) (reference.Named, error) { - ref = reference.TagNameOnly(ref) - - // Ignore the boolean value returned, as far as we're concerned, this - // is an idempotent operation and it's okay if the reference didn't - // exist in the first place. - _, err := i.referenceStore.Delete(ref) - - return ref, err -} - -// removeAllReferencesToImageID attempts to remove every reference to the given -// imgID from this daemon's store of repository tag/digest references. Returns -// on the first encountered error. Removed references are logged to this -// daemon's event service. An "Untagged" types.ImageDeleteResponseItem is added to the -// given list of records. -func (i *ImageService) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDeleteResponseItem) error { - imageRefs := i.referenceStore.References(imgID.Digest()) - - for _, imageRef := range imageRefs { - parsedRef, err := i.removeImageRef(imageRef) - if err != nil { - return err - } - - untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} - - i.LogImageEvent(imgID.String(), imgID.String(), "untag") - *records = append(*records, untaggedRecord) - } - - return nil -} - -// ImageDeleteConflict holds a soft or hard conflict and an associated error. -// Implements the error interface. -type imageDeleteConflict struct { - hard bool - used bool - imgID image.ID - message string -} - -func (idc *imageDeleteConflict) Error() string { - var forceMsg string - if idc.hard { - forceMsg = "cannot be forced" - } else { - forceMsg = "must be forced" - } - - return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) -} - -func (idc *imageDeleteConflict) Conflict() {} - -// imageDeleteHelper attempts to delete the given image from this daemon. If -// the image has any hard delete conflicts (child images or running containers -// using the image) then it cannot be deleted. If the image has any soft delete -// conflicts (any tags/digests referencing the image or any stopped container -// using the image) then it can only be deleted if force is true. If the delete -// succeeds and prune is true, the parent images are also deleted if they do -// not have any soft or hard delete conflicts themselves. Any deleted images -// and untagged references are appended to the given records. If any error or -// conflict is encountered, it will be returned immediately without deleting -// the image. If quiet is true, any encountered conflicts will be ignored and -// the function will return nil immediately without deleting the image. -func (i *ImageService) imageDeleteHelper(imgID image.ID, records *[]types.ImageDeleteResponseItem, force, prune, quiet bool) error { - // First, determine if this image has any conflicts. Ignore soft conflicts - // if force is true. - c := conflictHard - if !force { - c |= conflictSoft - } - if conflict := i.checkImageDeleteConflict(imgID, c); conflict != nil { - if quiet && (!i.imageIsDangling(imgID) || conflict.used) { - // Ignore conflicts UNLESS the image is "dangling" or not being used in - // which case we want the user to know. - return nil - } - - // There was a conflict and it's either a hard conflict OR we are not - // forcing deletion on soft conflicts. - return conflict - } - - parent, err := i.imageStore.GetParent(imgID) - if err != nil { - // There may be no parent - parent = "" - } - - // Delete all repository tag/digest references to this image. - if err := i.removeAllReferencesToImageID(imgID, records); err != nil { - return err - } - - removedLayers, err := i.imageStore.Delete(imgID) - if err != nil { - return err - } - - i.LogImageEvent(imgID.String(), imgID.String(), "delete") - *records = append(*records, types.ImageDeleteResponseItem{Deleted: imgID.String()}) - for _, removedLayer := range removedLayers { - *records = append(*records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) - } - - if !prune || parent == "" { - return nil - } - - // We need to prune the parent image. This means delete it if there are - // no tags/digests referencing it and there are no containers using it ( - // either running or stopped). - // Do not force prunings, but do so quietly (stopping on any encountered - // conflicts). - return i.imageDeleteHelper(parent, records, false, true, true) -} - -// checkImageDeleteConflict determines whether there are any conflicts -// preventing deletion of the given image from this daemon. A hard conflict is -// any image which has the given image as a parent or any running container -// using the image. A soft conflict is any tags/digest referencing the given -// image or any stopped container using the image. If ignoreSoftConflicts is -// true, this function will not check for soft conflict conditions. -func (i *ImageService) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { - // Check if the image has any descendant images. - if mask&conflictDependentChild != 0 && len(i.imageStore.Children(imgID)) > 0 { - return &imageDeleteConflict{ - hard: true, - imgID: imgID, - message: "image has dependent child images", - } - } - - if mask&conflictRunningContainer != 0 { - // Check if any running container is using the image. - running := func(c *container.Container) bool { - return c.IsRunning() && c.ImageID == imgID - } - if container := i.containers.First(running); container != nil { - return &imageDeleteConflict{ - imgID: imgID, - hard: true, - used: true, - message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), - } - } - } - - // Check if any repository tags/digest reference this image. - if mask&conflictActiveReference != 0 && len(i.referenceStore.References(imgID.Digest())) > 0 { - return &imageDeleteConflict{ - imgID: imgID, - message: "image is referenced in multiple repositories", - } - } - - if mask&conflictStoppedContainer != 0 { - // Check if any stopped containers reference this image. - stopped := func(c *container.Container) bool { - return !c.IsRunning() && c.ImageID == imgID - } - if container := i.containers.First(stopped); container != nil { - return &imageDeleteConflict{ - imgID: imgID, - used: true, - message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), - } - } - } - - return nil -} - -// imageIsDangling returns whether the given image is "dangling" which means -// that there are no repository references to the given image and it has no -// child images. -func (i *ImageService) imageIsDangling(imgID image.ID) bool { - return !(len(i.referenceStore.References(imgID.Digest())) > 0 || len(i.imageStore.Children(imgID)) > 0) -} diff --git a/vendor/github.com/docker/docker/daemon/images/image_events.go b/vendor/github.com/docker/docker/daemon/images/image_events.go deleted file mode 100644 index d0b3064d7..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image_events.go +++ /dev/null @@ -1,39 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "github.com/docker/docker/api/types/events" -) - -// LogImageEvent generates an event related to an image with only the default attributes. -func (i *ImageService) LogImageEvent(imageID, refName, action string) { - i.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) -} - -// LogImageEventWithAttributes generates an event related to an image with specific given attributes. -func (i *ImageService) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { - img, err := i.GetImage(imageID) - if err == nil && img.Config != nil { - // image has not been removed yet. - // it could be missing if the event is `delete`. - copyAttributes(attributes, img.Config.Labels) - } - if refName != "" { - attributes["name"] = refName - } - actor := events.Actor{ - ID: imageID, - Attributes: attributes, - } - - i.eventsService.Log(action, events.ImageEventType, actor) -} - -// copyAttributes guarantees that labels are not mutated by event triggers. -func copyAttributes(attributes, labels map[string]string) { - if labels == nil { - return - } - for k, v := range labels { - attributes[k] = v - } -} diff --git a/vendor/github.com/docker/docker/daemon/images/image_exporter.go b/vendor/github.com/docker/docker/daemon/images/image_exporter.go deleted file mode 100644 index 58105dcb7..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image_exporter.go +++ /dev/null @@ -1,25 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "io" - - "github.com/docker/docker/image/tarexport" -) - -// ExportImage exports a list of images to the given output stream. The -// exported images are archived into a tar when written to the output -// stream. All images with the given tag and all versions containing -// the same tag are exported. names is the set of tags to export, and -// outStream is the writer which the images are written to. -func (i *ImageService) ExportImage(names []string, outStream io.Writer) error { - imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i) - return imageExporter.Save(names, outStream) -} - -// LoadImage uploads a set of images into the repository. This is the -// complement of ImageExport. The input stream is an uncompressed tar -// ball containing images and metadata. -func (i *ImageService) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStores, i.referenceStore, i) - return imageExporter.Load(inTar, outStream, quiet) -} diff --git a/vendor/github.com/docker/docker/daemon/images/image_history.go b/vendor/github.com/docker/docker/daemon/images/image_history.go deleted file mode 100644 index b4ca25b1b..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image_history.go +++ /dev/null @@ -1,87 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "fmt" - "time" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/system" -) - -// ImageHistory returns a slice of ImageHistory structures for the specified image -// name by walking the image lineage. -func (i *ImageService) ImageHistory(name string) ([]*image.HistoryResponseItem, error) { - start := time.Now() - img, err := i.GetImage(name) - if err != nil { - return nil, err - } - - history := []*image.HistoryResponseItem{} - - layerCounter := 0 - rootFS := *img.RootFS - rootFS.DiffIDs = nil - - for _, h := range img.History { - var layerSize int64 - - if !h.EmptyLayer { - if len(img.RootFS.DiffIDs) <= layerCounter { - return nil, fmt.Errorf("too many non-empty layers in History section") - } - if !system.IsOSSupported(img.OperatingSystem()) { - return nil, system.ErrNotSupportedOperatingSystem - } - rootFS.Append(img.RootFS.DiffIDs[layerCounter]) - l, err := i.layerStores[img.OperatingSystem()].Get(rootFS.ChainID()) - if err != nil { - return nil, err - } - layerSize, err = l.DiffSize() - layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) - if err != nil { - return nil, err - } - - layerCounter++ - } - - history = append([]*image.HistoryResponseItem{{ - ID: "", - Created: h.Created.Unix(), - CreatedBy: h.CreatedBy, - Comment: h.Comment, - Size: layerSize, - }}, history...) - } - - // Fill in image IDs and tags - histImg := img - id := img.ID() - for _, h := range history { - h.ID = id.String() - - var tags []string - for _, r := range i.referenceStore.References(id.Digest()) { - if _, ok := r.(reference.NamedTagged); ok { - tags = append(tags, reference.FamiliarString(r)) - } - } - - h.Tags = tags - - id = histImg.Parent - if id == "" { - break - } - histImg, err = i.GetImage(id.String()) - if err != nil { - break - } - } - imageActions.WithValues("history").UpdateSince(start) - return history, nil -} diff --git a/vendor/github.com/docker/docker/daemon/images/image_import.go b/vendor/github.com/docker/docker/daemon/images/image_import.go deleted file mode 100644 index 8d54e0704..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image_import.go +++ /dev/null @@ -1,138 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "encoding/json" - "io" - "net/http" - "net/url" - "runtime" - "strings" - "time" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/builder/dockerfile" - "github.com/docker/docker/builder/remotecontext" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/pkg/errors" -) - -// ImportImage imports an image, getting the archived layer data either from -// inConfig (if src is "-"), or from a URI specified in src. Progress output is -// written to outStream. Repository and tag names can optionally be given in -// the repo and tag arguments, respectively. -func (i *ImageService) ImportImage(src string, repository, os string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { - var ( - rc io.ReadCloser - resp *http.Response - newRef reference.Named - ) - - // Default the operating system if not supplied. - if os == "" { - os = runtime.GOOS - } - - if repository != "" { - var err error - newRef, err = reference.ParseNormalizedNamed(repository) - if err != nil { - return errdefs.InvalidParameter(err) - } - if _, isCanonical := newRef.(reference.Canonical); isCanonical { - return errdefs.InvalidParameter(errors.New("cannot import digest reference")) - } - - if tag != "" { - newRef, err = reference.WithTag(newRef, tag) - if err != nil { - return errdefs.InvalidParameter(err) - } - } - } - - config, err := dockerfile.BuildFromConfig(&container.Config{}, changes, os) - if err != nil { - return err - } - if src == "-" { - rc = inConfig - } else { - inConfig.Close() - if len(strings.Split(src, "://")) == 1 { - src = "http://" + src - } - u, err := url.Parse(src) - if err != nil { - return errdefs.InvalidParameter(err) - } - - resp, err = remotecontext.GetWithStatusError(u.String()) - if err != nil { - return err - } - outStream.Write(streamformatter.FormatStatus("", "Downloading from %s", u)) - progressOutput := streamformatter.NewJSONProgressOutput(outStream, true) - rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") - } - - defer rc.Close() - if len(msg) == 0 { - msg = "Imported from " + src - } - - inflatedLayerData, err := archive.DecompressStream(rc) - if err != nil { - return err - } - l, err := i.layerStores[os].Register(inflatedLayerData, "") - if err != nil { - return err - } - defer layer.ReleaseAndLog(i.layerStores[os], l) - - created := time.Now().UTC() - imgConfig, err := json.Marshal(&image.Image{ - V1Image: image.V1Image{ - DockerVersion: dockerversion.Version, - Config: config, - Architecture: runtime.GOARCH, - OS: os, - Created: created, - Comment: msg, - }, - RootFS: &image.RootFS{ - Type: "layers", - DiffIDs: []layer.DiffID{l.DiffID()}, - }, - History: []image.History{{ - Created: created, - Comment: msg, - }}, - }) - if err != nil { - return err - } - - id, err := i.imageStore.Create(imgConfig) - if err != nil { - return err - } - - // FIXME: connect with commit code and call refstore directly - if newRef != nil { - if err := i.TagImageWithReference(id, newRef); err != nil { - return err - } - } - - i.LogImageEvent(id.String(), id.String(), "import") - outStream.Write(streamformatter.FormatStatus("", id.String())) - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/images/image_inspect.go b/vendor/github.com/docker/docker/daemon/images/image_inspect.go deleted file mode 100644 index 16c4c9b2d..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image_inspect.go +++ /dev/null @@ -1,104 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "time" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" -) - -// LookupImage looks up an image by name and returns it as an ImageInspect -// structure. -func (i *ImageService) LookupImage(name string) (*types.ImageInspect, error) { - img, err := i.GetImage(name) - if err != nil { - return nil, errors.Wrapf(err, "no such image: %s", name) - } - if !system.IsOSSupported(img.OperatingSystem()) { - return nil, system.ErrNotSupportedOperatingSystem - } - refs := i.referenceStore.References(img.ID().Digest()) - repoTags := []string{} - repoDigests := []string{} - for _, ref := range refs { - switch ref.(type) { - case reference.NamedTagged: - repoTags = append(repoTags, reference.FamiliarString(ref)) - case reference.Canonical: - repoDigests = append(repoDigests, reference.FamiliarString(ref)) - } - } - - var size int64 - var layerMetadata map[string]string - layerID := img.RootFS.ChainID() - if layerID != "" { - l, err := i.layerStores[img.OperatingSystem()].Get(layerID) - if err != nil { - return nil, err - } - defer layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) - size, err = l.Size() - if err != nil { - return nil, err - } - - layerMetadata, err = l.Metadata() - if err != nil { - return nil, err - } - } - - comment := img.Comment - if len(comment) == 0 && len(img.History) > 0 { - comment = img.History[len(img.History)-1].Comment - } - - lastUpdated, err := i.imageStore.GetLastUpdated(img.ID()) - if err != nil { - return nil, err - } - - imageInspect := &types.ImageInspect{ - ID: img.ID().String(), - RepoTags: repoTags, - RepoDigests: repoDigests, - Parent: img.Parent.String(), - Comment: comment, - Created: img.Created.Format(time.RFC3339Nano), - Container: img.Container, - ContainerConfig: &img.ContainerConfig, - DockerVersion: img.DockerVersion, - Author: img.Author, - Config: img.Config, - Architecture: img.Architecture, - Os: img.OperatingSystem(), - OsVersion: img.OSVersion, - Size: size, - VirtualSize: size, // TODO: field unused, deprecate - RootFS: rootFSToAPIType(img.RootFS), - Metadata: types.ImageMetadata{ - LastTagTime: lastUpdated, - }, - } - - imageInspect.GraphDriver.Name = i.layerStores[img.OperatingSystem()].DriverName() - imageInspect.GraphDriver.Data = layerMetadata - - return imageInspect, nil -} - -func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { - var layers []string - for _, l := range rootfs.DiffIDs { - layers = append(layers, l.String()) - } - return types.RootFS{ - Type: rootfs.Type, - Layers: layers, - } -} diff --git a/vendor/github.com/docker/docker/daemon/images/image_prune.go b/vendor/github.com/docker/docker/daemon/images/image_prune.go deleted file mode 100644 index 313494f2f..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image_prune.go +++ /dev/null @@ -1,211 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "context" - "fmt" - "sync/atomic" - "time" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - timetypes "github.com/docker/docker/api/types/time" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var imagesAcceptedFilters = map[string]bool{ - "dangling": true, - "label": true, - "label!": true, - "until": true, -} - -// errPruneRunning is returned when a prune request is received while -// one is in progress -var errPruneRunning = errdefs.Conflict(errors.New("a prune operation is already running")) - -// ImagesPrune removes unused images -func (i *ImageService) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) { - if !atomic.CompareAndSwapInt32(&i.pruneRunning, 0, 1) { - return nil, errPruneRunning - } - defer atomic.StoreInt32(&i.pruneRunning, 0) - - // make sure that only accepted filters have been received - err := pruneFilters.Validate(imagesAcceptedFilters) - if err != nil { - return nil, err - } - - rep := &types.ImagesPruneReport{} - - danglingOnly := true - if pruneFilters.Contains("dangling") { - if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") { - danglingOnly = false - } else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") { - return nil, invalidFilter{"dangling", pruneFilters.Get("dangling")} - } - } - - until, err := getUntilFromPruneFilters(pruneFilters) - if err != nil { - return nil, err - } - - var allImages map[image.ID]*image.Image - if danglingOnly { - allImages = i.imageStore.Heads() - } else { - allImages = i.imageStore.Map() - } - - // Filter intermediary images and get their unique size - allLayers := make(map[layer.ChainID]layer.Layer) - for _, ls := range i.layerStores { - for k, v := range ls.Map() { - allLayers[k] = v - } - } - topImages := map[image.ID]*image.Image{} - for id, img := range allImages { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - dgst := digest.Digest(id) - if len(i.referenceStore.References(dgst)) == 0 && len(i.imageStore.Children(id)) != 0 { - continue - } - if !until.IsZero() && img.Created.After(until) { - continue - } - if img.Config != nil && !matchLabels(pruneFilters, img.Config.Labels) { - continue - } - topImages[id] = img - } - } - - canceled := false -deleteImagesLoop: - for id := range topImages { - select { - case <-ctx.Done(): - // we still want to calculate freed size and return the data - canceled = true - break deleteImagesLoop - default: - } - - deletedImages := []types.ImageDeleteResponseItem{} - refs := i.referenceStore.References(id.Digest()) - if len(refs) > 0 { - shouldDelete := !danglingOnly - if !shouldDelete { - hasTag := false - for _, ref := range refs { - if _, ok := ref.(reference.NamedTagged); ok { - hasTag = true - break - } - } - - // Only delete if it's untagged (i.e. repo:) - shouldDelete = !hasTag - } - - if shouldDelete { - for _, ref := range refs { - imgDel, err := i.ImageDelete(ref.String(), false, true) - if imageDeleteFailed(ref.String(), err) { - continue - } - deletedImages = append(deletedImages, imgDel...) - } - } - } else { - hex := id.Digest().Hex() - imgDel, err := i.ImageDelete(hex, false, true) - if imageDeleteFailed(hex, err) { - continue - } - deletedImages = append(deletedImages, imgDel...) - } - - rep.ImagesDeleted = append(rep.ImagesDeleted, deletedImages...) - } - - // Compute how much space was freed - for _, d := range rep.ImagesDeleted { - if d.Deleted != "" { - chid := layer.ChainID(d.Deleted) - if l, ok := allLayers[chid]; ok { - diffSize, err := l.DiffSize() - if err != nil { - logrus.Warnf("failed to get layer %s size: %v", chid, err) - continue - } - rep.SpaceReclaimed += uint64(diffSize) - } - } - } - - if canceled { - logrus.Debugf("ImagesPrune operation cancelled: %#v", *rep) - } - - return rep, nil -} - -func imageDeleteFailed(ref string, err error) bool { - switch { - case err == nil: - return false - case errdefs.IsConflict(err): - return true - default: - logrus.Warnf("failed to prune image %s: %v", ref, err) - return true - } -} - -func matchLabels(pruneFilters filters.Args, labels map[string]string) bool { - if !pruneFilters.MatchKVList("label", labels) { - return false - } - // By default MatchKVList will return true if field (like 'label!') does not exist - // So we have to add additional Contains("label!") check - if pruneFilters.Contains("label!") { - if pruneFilters.MatchKVList("label!", labels) { - return false - } - } - return true -} - -func getUntilFromPruneFilters(pruneFilters filters.Args) (time.Time, error) { - until := time.Time{} - if !pruneFilters.Contains("until") { - return until, nil - } - untilFilters := pruneFilters.Get("until") - if len(untilFilters) > 1 { - return until, fmt.Errorf("more than one until filter specified") - } - ts, err := timetypes.GetTimestamp(untilFilters[0], time.Now()) - if err != nil { - return until, err - } - seconds, nanoseconds, err := timetypes.ParseTimestamps(ts, 0) - if err != nil { - return until, err - } - until = time.Unix(seconds, nanoseconds) - return until, nil -} diff --git a/vendor/github.com/docker/docker/daemon/images/image_pull.go b/vendor/github.com/docker/docker/daemon/images/image_pull.go deleted file mode 100644 index 238c38b6b..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image_pull.go +++ /dev/null @@ -1,131 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "context" - "io" - "runtime" - "strings" - "time" - - dist "github.com/docker/distribution" - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/distribution" - progressutils "github.com/docker/docker/distribution/utils" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/registry" - "github.com/opencontainers/go-digest" -) - -// PullImage initiates a pull operation. image is the repository name to pull, and -// tag may be either empty, or indicate a specific tag to pull. -func (i *ImageService) PullImage(ctx context.Context, image, tag, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { - start := time.Now() - // Special case: "pull -a" may send an image name with a - // trailing :. This is ugly, but let's not break API - // compatibility. - image = strings.TrimSuffix(image, ":") - - ref, err := reference.ParseNormalizedNamed(image) - if err != nil { - return errdefs.InvalidParameter(err) - } - - if tag != "" { - // The "tag" could actually be a digest. - var dgst digest.Digest - dgst, err = digest.Parse(tag) - if err == nil { - ref, err = reference.WithDigest(reference.TrimNamed(ref), dgst) - } else { - ref, err = reference.WithTag(ref, tag) - } - if err != nil { - return errdefs.InvalidParameter(err) - } - } - - err = i.pullImageWithReference(ctx, ref, os, metaHeaders, authConfig, outStream) - imageActions.WithValues("pull").UpdateSince(start) - return err -} - -func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference.Named, os string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { - // Include a buffer so that slow client connections don't affect - // transfer performance. - progressChan := make(chan progress.Progress, 100) - - writesDone := make(chan struct{}) - - ctx, cancelFunc := context.WithCancel(ctx) - - go func() { - progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) - close(writesDone) - }() - - // Default to the host OS platform in case it hasn't been populated with an explicit value. - if os == "" { - os = runtime.GOOS - } - - imagePullConfig := &distribution.ImagePullConfig{ - Config: distribution.Config{ - MetaHeaders: metaHeaders, - AuthConfig: authConfig, - ProgressOutput: progress.ChanOutput(progressChan), - RegistryService: i.registryService, - ImageEventLogger: i.LogImageEvent, - MetadataStore: i.distributionMetadataStore, - ImageStore: distribution.NewImageConfigStoreFromStore(i.imageStore), - ReferenceStore: i.referenceStore, - }, - DownloadManager: i.downloadManager, - Schema2Types: distribution.ImageTypes, - OS: os, - } - - err := distribution.Pull(ctx, ref, imagePullConfig) - close(progressChan) - <-writesDone - return err -} - -// GetRepository returns a repository from the registry. -func (i *ImageService) GetRepository(ctx context.Context, ref reference.Named, authConfig *types.AuthConfig) (dist.Repository, bool, error) { - // get repository info - repoInfo, err := i.registryService.ResolveRepository(ref) - if err != nil { - return nil, false, err - } - // makes sure name is not empty or `scratch` - if err := distribution.ValidateRepoName(repoInfo.Name); err != nil { - return nil, false, errdefs.InvalidParameter(err) - } - - // get endpoints - endpoints, err := i.registryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) - if err != nil { - return nil, false, err - } - - // retrieve repository - var ( - confirmedV2 bool - repository dist.Repository - lastError error - ) - - for _, endpoint := range endpoints { - if endpoint.Version == registry.APIVersion1 { - continue - } - - repository, confirmedV2, lastError = distribution.NewV2Repository(ctx, repoInfo, endpoint, nil, authConfig, "pull") - if lastError == nil && confirmedV2 { - break - } - } - return repository, confirmedV2, lastError -} diff --git a/vendor/github.com/docker/docker/daemon/images/image_push.go b/vendor/github.com/docker/docker/daemon/images/image_push.go deleted file mode 100644 index 4c7be8d2e..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image_push.go +++ /dev/null @@ -1,66 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "context" - "io" - "time" - - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/distribution" - progressutils "github.com/docker/docker/distribution/utils" - "github.com/docker/docker/pkg/progress" -) - -// PushImage initiates a push operation on the repository named localName. -func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { - start := time.Now() - ref, err := reference.ParseNormalizedNamed(image) - if err != nil { - return err - } - if tag != "" { - // Push by digest is not supported, so only tags are supported. - ref, err = reference.WithTag(ref, tag) - if err != nil { - return err - } - } - - // Include a buffer so that slow client connections don't affect - // transfer performance. - progressChan := make(chan progress.Progress, 100) - - writesDone := make(chan struct{}) - - ctx, cancelFunc := context.WithCancel(ctx) - - go func() { - progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) - close(writesDone) - }() - - imagePushConfig := &distribution.ImagePushConfig{ - Config: distribution.Config{ - MetaHeaders: metaHeaders, - AuthConfig: authConfig, - ProgressOutput: progress.ChanOutput(progressChan), - RegistryService: i.registryService, - ImageEventLogger: i.LogImageEvent, - MetadataStore: i.distributionMetadataStore, - ImageStore: distribution.NewImageConfigStoreFromStore(i.imageStore), - ReferenceStore: i.referenceStore, - }, - ConfigMediaType: schema2.MediaTypeImageConfig, - LayerStores: distribution.NewLayerProvidersFromStores(i.layerStores), - TrustKey: i.trustKey, - UploadManager: i.uploadManager, - } - - err = distribution.Push(ctx, ref, imagePushConfig) - close(progressChan) - <-writesDone - imageActions.WithValues("push").UpdateSince(start) - return err -} diff --git a/vendor/github.com/docker/docker/daemon/images/image_search.go b/vendor/github.com/docker/docker/daemon/images/image_search.go deleted file mode 100644 index 8b65ec709..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image_search.go +++ /dev/null @@ -1,95 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "context" - "strconv" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/dockerversion" -) - -var acceptedSearchFilterTags = map[string]bool{ - "is-automated": true, - "is-official": true, - "stars": true, -} - -// SearchRegistryForImages queries the registry for images matching -// term. authConfig is used to login. -// -// TODO: this could be implemented in a registry service instead of the image -// service. -func (i *ImageService) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, - authConfig *types.AuthConfig, - headers map[string][]string) (*registrytypes.SearchResults, error) { - - searchFilters, err := filters.FromJSON(filtersArgs) - if err != nil { - return nil, err - } - if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { - return nil, err - } - - var isAutomated, isOfficial bool - var hasStarFilter = 0 - if searchFilters.Contains("is-automated") { - if searchFilters.UniqueExactMatch("is-automated", "true") { - isAutomated = true - } else if !searchFilters.UniqueExactMatch("is-automated", "false") { - return nil, invalidFilter{"is-automated", searchFilters.Get("is-automated")} - } - } - if searchFilters.Contains("is-official") { - if searchFilters.UniqueExactMatch("is-official", "true") { - isOfficial = true - } else if !searchFilters.UniqueExactMatch("is-official", "false") { - return nil, invalidFilter{"is-official", searchFilters.Get("is-official")} - } - } - if searchFilters.Contains("stars") { - hasStars := searchFilters.Get("stars") - for _, hasStar := range hasStars { - iHasStar, err := strconv.Atoi(hasStar) - if err != nil { - return nil, invalidFilter{"stars", hasStar} - } - if iHasStar > hasStarFilter { - hasStarFilter = iHasStar - } - } - } - - unfilteredResult, err := i.registryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers) - if err != nil { - return nil, err - } - - filteredResults := []registrytypes.SearchResult{} - for _, result := range unfilteredResult.Results { - if searchFilters.Contains("is-automated") { - if isAutomated != result.IsAutomated { - continue - } - } - if searchFilters.Contains("is-official") { - if isOfficial != result.IsOfficial { - continue - } - } - if searchFilters.Contains("stars") { - if result.StarCount < hasStarFilter { - continue - } - } - filteredResults = append(filteredResults, result) - } - - return ®istrytypes.SearchResults{ - Query: unfilteredResult.Query, - NumResults: len(filteredResults), - Results: filteredResults, - }, nil -} diff --git a/vendor/github.com/docker/docker/daemon/images/image_tag.go b/vendor/github.com/docker/docker/daemon/images/image_tag.go deleted file mode 100644 index 4693611c3..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image_tag.go +++ /dev/null @@ -1,41 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "github.com/docker/distribution/reference" - "github.com/docker/docker/image" -) - -// TagImage creates the tag specified by newTag, pointing to the image named -// imageName (alternatively, imageName can also be an image ID). -func (i *ImageService) TagImage(imageName, repository, tag string) (string, error) { - img, err := i.GetImage(imageName) - if err != nil { - return "", err - } - - newTag, err := reference.ParseNormalizedNamed(repository) - if err != nil { - return "", err - } - if tag != "" { - if newTag, err = reference.WithTag(reference.TrimNamed(newTag), tag); err != nil { - return "", err - } - } - - err = i.TagImageWithReference(img.ID(), newTag) - return reference.FamiliarString(newTag), err -} - -// TagImageWithReference adds the given reference to the image ID provided. -func (i *ImageService) TagImageWithReference(imageID image.ID, newTag reference.Named) error { - if err := i.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { - return err - } - - if err := i.imageStore.SetLastUpdated(imageID); err != nil { - return err - } - i.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag") - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/images/image_unix.go b/vendor/github.com/docker/docker/daemon/images/image_unix.go deleted file mode 100644 index 3f577271a..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image_unix.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build linux freebsd - -package images // import "github.com/docker/docker/daemon/images" - -import ( - "runtime" - - "github.com/sirupsen/logrus" -) - -// GetContainerLayerSize returns the real size & virtual size of the container. -func (i *ImageService) GetContainerLayerSize(containerID string) (int64, int64) { - var ( - sizeRw, sizeRootfs int64 - err error - ) - - // Safe to index by runtime.GOOS as Unix hosts don't support multiple - // container operating systems. - rwlayer, err := i.layerStores[runtime.GOOS].GetRWLayer(containerID) - if err != nil { - logrus.Errorf("Failed to compute size of container rootfs %v: %v", containerID, err) - return sizeRw, sizeRootfs - } - defer i.layerStores[runtime.GOOS].ReleaseRWLayer(rwlayer) - - sizeRw, err = rwlayer.Size() - if err != nil { - logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", - i.layerStores[runtime.GOOS].DriverName(), containerID, err) - // FIXME: GetSize should return an error. Not changing it now in case - // there is a side-effect. - sizeRw = -1 - } - - if parent := rwlayer.Parent(); parent != nil { - sizeRootfs, err = parent.Size() - if err != nil { - sizeRootfs = -1 - } else if sizeRw != -1 { - sizeRootfs += sizeRw - } - } - return sizeRw, sizeRootfs -} diff --git a/vendor/github.com/docker/docker/daemon/images/image_windows.go b/vendor/github.com/docker/docker/daemon/images/image_windows.go deleted file mode 100644 index 6f4be4973..000000000 --- a/vendor/github.com/docker/docker/daemon/images/image_windows.go +++ /dev/null @@ -1,41 +0,0 @@ -package images - -import ( - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" -) - -// GetContainerLayerSize returns real size & virtual size -func (i *ImageService) GetContainerLayerSize(containerID string) (int64, int64) { - // TODO Windows - return 0, 0 -} - -// GetLayerFolders returns the layer folders from an image RootFS -func (i *ImageService) GetLayerFolders(img *image.Image, rwLayer layer.RWLayer) ([]string, error) { - folders := []string{} - max := len(img.RootFS.DiffIDs) - for index := 1; index <= max; index++ { - // FIXME: why does this mutate the RootFS? - img.RootFS.DiffIDs = img.RootFS.DiffIDs[:index] - if !system.IsOSSupported(img.OperatingSystem()) { - return nil, errors.Wrapf(system.ErrNotSupportedOperatingSystem, "cannot get layerpath for ImageID %s", img.RootFS.ChainID()) - } - layerPath, err := layer.GetLayerPath(i.layerStores[img.OperatingSystem()], img.RootFS.ChainID()) - if err != nil { - return nil, errors.Wrapf(err, "failed to get layer path from graphdriver %s for ImageID %s", i.layerStores[img.OperatingSystem()], img.RootFS.ChainID()) - } - // Reverse order, expecting parent first - folders = append([]string{layerPath}, folders...) - } - if rwLayer == nil { - return nil, errors.New("RWLayer is unexpectedly nil") - } - m, err := rwLayer.Metadata() - if err != nil { - return nil, errors.Wrap(err, "failed to get layer metadata") - } - return append(folders, m["dir"]), nil -} diff --git a/vendor/github.com/docker/docker/daemon/images/images.go b/vendor/github.com/docker/docker/daemon/images/images.go deleted file mode 100644 index 49212341c..000000000 --- a/vendor/github.com/docker/docker/daemon/images/images.go +++ /dev/null @@ -1,348 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "encoding/json" - "fmt" - "sort" - "time" - - "github.com/pkg/errors" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/container" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/system" -) - -var acceptedImageFilterTags = map[string]bool{ - "dangling": true, - "label": true, - "before": true, - "since": true, - "reference": true, -} - -// byCreated is a temporary type used to sort a list of images by creation -// time. -type byCreated []*types.ImageSummary - -func (r byCreated) Len() int { return len(r) } -func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } - -// Map returns a map of all images in the ImageStore -func (i *ImageService) Map() map[image.ID]*image.Image { - return i.imageStore.Map() -} - -// Images returns a filtered list of images. filterArgs is a JSON-encoded set -// of filter arguments which will be interpreted by api/types/filters. -// filter is a shell glob string applied to repository names. The argument -// named all controls whether all images in the graph are filtered, or just -// the heads. -func (i *ImageService) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) { - var ( - allImages map[image.ID]*image.Image - err error - danglingOnly = false - ) - - if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { - return nil, err - } - - if imageFilters.Contains("dangling") { - if imageFilters.ExactMatch("dangling", "true") { - danglingOnly = true - } else if !imageFilters.ExactMatch("dangling", "false") { - return nil, invalidFilter{"dangling", imageFilters.Get("dangling")} - } - } - if danglingOnly { - allImages = i.imageStore.Heads() - } else { - allImages = i.imageStore.Map() - } - - var beforeFilter, sinceFilter *image.Image - err = imageFilters.WalkValues("before", func(value string) error { - beforeFilter, err = i.GetImage(value) - return err - }) - if err != nil { - return nil, err - } - - err = imageFilters.WalkValues("since", func(value string) error { - sinceFilter, err = i.GetImage(value) - return err - }) - if err != nil { - return nil, err - } - - images := []*types.ImageSummary{} - var imagesMap map[*image.Image]*types.ImageSummary - var layerRefs map[layer.ChainID]int - var allLayers map[layer.ChainID]layer.Layer - var allContainers []*container.Container - - for id, img := range allImages { - if beforeFilter != nil { - if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) { - continue - } - } - - if sinceFilter != nil { - if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) { - continue - } - } - - if imageFilters.Contains("label") { - // Very old image that do not have image.Config (or even labels) - if img.Config == nil { - continue - } - // We are now sure image.Config is not nil - if !imageFilters.MatchKVList("label", img.Config.Labels) { - continue - } - } - - // Skip any images with an unsupported operating system to avoid a potential - // panic when indexing through the layerstore. Don't error as we want to list - // the other images. This should never happen, but here as a safety precaution. - if !system.IsOSSupported(img.OperatingSystem()) { - continue - } - - layerID := img.RootFS.ChainID() - var size int64 - if layerID != "" { - l, err := i.layerStores[img.OperatingSystem()].Get(layerID) - if err != nil { - // The layer may have been deleted between the call to `Map()` or - // `Heads()` and the call to `Get()`, so we just ignore this error - if err == layer.ErrLayerDoesNotExist { - continue - } - return nil, err - } - - size, err = l.Size() - layer.ReleaseAndLog(i.layerStores[img.OperatingSystem()], l) - if err != nil { - return nil, err - } - } - - newImage := newImage(img, size) - - for _, ref := range i.referenceStore.References(id.Digest()) { - if imageFilters.Contains("reference") { - var found bool - var matchErr error - for _, pattern := range imageFilters.Get("reference") { - found, matchErr = reference.FamiliarMatch(pattern, ref) - if matchErr != nil { - return nil, matchErr - } - } - if !found { - continue - } - } - if _, ok := ref.(reference.Canonical); ok { - newImage.RepoDigests = append(newImage.RepoDigests, reference.FamiliarString(ref)) - } - if _, ok := ref.(reference.NamedTagged); ok { - newImage.RepoTags = append(newImage.RepoTags, reference.FamiliarString(ref)) - } - } - if newImage.RepoDigests == nil && newImage.RepoTags == nil { - if all || len(i.imageStore.Children(id)) == 0 { - - if imageFilters.Contains("dangling") && !danglingOnly { - //dangling=false case, so dangling image is not needed - continue - } - if imageFilters.Contains("reference") { // skip images with no references if filtering by reference - continue - } - newImage.RepoDigests = []string{"@"} - newImage.RepoTags = []string{":"} - } else { - continue - } - } else if danglingOnly && len(newImage.RepoTags) > 0 { - continue - } - - if withExtraAttrs { - // lazily init variables - if imagesMap == nil { - allContainers = i.containers.List() - allLayers = i.layerStores[img.OperatingSystem()].Map() - imagesMap = make(map[*image.Image]*types.ImageSummary) - layerRefs = make(map[layer.ChainID]int) - } - - // Get container count - newImage.Containers = 0 - for _, c := range allContainers { - if c.ImageID == id { - newImage.Containers++ - } - } - - // count layer references - rootFS := *img.RootFS - rootFS.DiffIDs = nil - for _, id := range img.RootFS.DiffIDs { - rootFS.Append(id) - chid := rootFS.ChainID() - layerRefs[chid]++ - if _, ok := allLayers[chid]; !ok { - return nil, fmt.Errorf("layer %v was not found (corruption?)", chid) - } - } - imagesMap[img] = newImage - } - - images = append(images, newImage) - } - - if withExtraAttrs { - // Get Shared sizes - for img, newImage := range imagesMap { - rootFS := *img.RootFS - rootFS.DiffIDs = nil - - newImage.SharedSize = 0 - for _, id := range img.RootFS.DiffIDs { - rootFS.Append(id) - chid := rootFS.ChainID() - - diffSize, err := allLayers[chid].DiffSize() - if err != nil { - return nil, err - } - - if layerRefs[chid] > 1 { - newImage.SharedSize += diffSize - } - } - } - } - - sort.Sort(sort.Reverse(byCreated(images))) - - return images, nil -} - -// SquashImage creates a new image with the diff of the specified image and the specified parent. -// This new image contains only the layers from it's parent + 1 extra layer which contains the diff of all the layers in between. -// The existing image(s) is not destroyed. -// If no parent is specified, a new image with the diff of all the specified image's layers merged into a new layer that has no parents. -func (i *ImageService) SquashImage(id, parent string) (string, error) { - - var ( - img *image.Image - err error - ) - if img, err = i.imageStore.Get(image.ID(id)); err != nil { - return "", err - } - - var parentImg *image.Image - var parentChainID layer.ChainID - if len(parent) != 0 { - parentImg, err = i.imageStore.Get(image.ID(parent)) - if err != nil { - return "", errors.Wrap(err, "error getting specified parent layer") - } - parentChainID = parentImg.RootFS.ChainID() - } else { - rootFS := image.NewRootFS() - parentImg = &image.Image{RootFS: rootFS} - } - if !system.IsOSSupported(img.OperatingSystem()) { - return "", errors.Wrap(err, system.ErrNotSupportedOperatingSystem.Error()) - } - l, err := i.layerStores[img.OperatingSystem()].Get(img.RootFS.ChainID()) - if err != nil { - return "", errors.Wrap(err, "error getting image layer") - } - defer i.layerStores[img.OperatingSystem()].Release(l) - - ts, err := l.TarStreamFrom(parentChainID) - if err != nil { - return "", errors.Wrapf(err, "error getting tar stream to parent") - } - defer ts.Close() - - newL, err := i.layerStores[img.OperatingSystem()].Register(ts, parentChainID) - if err != nil { - return "", errors.Wrap(err, "error registering layer") - } - defer i.layerStores[img.OperatingSystem()].Release(newL) - - newImage := *img - newImage.RootFS = nil - - rootFS := *parentImg.RootFS - rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID()) - newImage.RootFS = &rootFS - - for i, hi := range newImage.History { - if i >= len(parentImg.History) { - hi.EmptyLayer = true - } - newImage.History[i] = hi - } - - now := time.Now() - var historyComment string - if len(parent) > 0 { - historyComment = fmt.Sprintf("merge %s to %s", id, parent) - } else { - historyComment = fmt.Sprintf("create new from %s", id) - } - - newImage.History = append(newImage.History, image.History{ - Created: now, - Comment: historyComment, - }) - newImage.Created = now - - b, err := json.Marshal(&newImage) - if err != nil { - return "", errors.Wrap(err, "error marshalling image config") - } - - newImgID, err := i.imageStore.Create(b) - if err != nil { - return "", errors.Wrap(err, "error creating new image after squash") - } - return string(newImgID), nil -} - -func newImage(image *image.Image, size int64) *types.ImageSummary { - newImage := new(types.ImageSummary) - newImage.ParentID = image.Parent.String() - newImage.ID = image.ID().String() - newImage.Created = image.Created.Unix() - newImage.Size = size - newImage.VirtualSize = size - newImage.SharedSize = -1 - newImage.Containers = -1 - if image.Config != nil { - newImage.Labels = image.Config.Labels - } - return newImage -} diff --git a/vendor/github.com/docker/docker/daemon/images/locals.go b/vendor/github.com/docker/docker/daemon/images/locals.go deleted file mode 100644 index 5ffc460a0..000000000 --- a/vendor/github.com/docker/docker/daemon/images/locals.go +++ /dev/null @@ -1,32 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "fmt" - - "github.com/docker/go-metrics" -) - -type invalidFilter struct { - filter string - value interface{} -} - -func (e invalidFilter) Error() string { - msg := "Invalid filter '" + e.filter - if e.value != nil { - msg += fmt.Sprintf("=%s", e.value) - } - return msg + "'" -} - -func (e invalidFilter) InvalidParameter() {} - -var imageActions metrics.LabeledTimer - -func init() { - ns := metrics.NewNamespace("engine", "daemon", nil) - imageActions = ns.NewLabeledTimer("image_actions", "The number of seconds it takes to process each image action", "action") - // TODO: is it OK to register a namespace with the same name? Or does this - // need to be exported from somewhere? - metrics.Register(ns) -} diff --git a/vendor/github.com/docker/docker/daemon/images/service.go b/vendor/github.com/docker/docker/daemon/images/service.go deleted file mode 100644 index 4af48959b..000000000 --- a/vendor/github.com/docker/docker/daemon/images/service.go +++ /dev/null @@ -1,229 +0,0 @@ -package images // import "github.com/docker/docker/daemon/images" - -import ( - "context" - "os" - - "github.com/docker/docker/container" - daemonevents "github.com/docker/docker/daemon/events" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - dockerreference "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type containerStore interface { - // used by image delete - First(container.StoreFilter) *container.Container - // used by image prune, and image list - List() []*container.Container - // TODO: remove, only used for CommitBuildStep - Get(string) *container.Container -} - -// ImageServiceConfig is the configuration used to create a new ImageService -type ImageServiceConfig struct { - ContainerStore containerStore - DistributionMetadataStore metadata.Store - EventsService *daemonevents.Events - ImageStore image.Store - LayerStores map[string]layer.Store - MaxConcurrentDownloads int - MaxConcurrentUploads int - ReferenceStore dockerreference.Store - RegistryService registry.Service - TrustKey libtrust.PrivateKey -} - -// NewImageService returns a new ImageService from a configuration -func NewImageService(config ImageServiceConfig) *ImageService { - logrus.Debugf("Max Concurrent Downloads: %d", config.MaxConcurrentDownloads) - logrus.Debugf("Max Concurrent Uploads: %d", config.MaxConcurrentUploads) - return &ImageService{ - containers: config.ContainerStore, - distributionMetadataStore: config.DistributionMetadataStore, - downloadManager: xfer.NewLayerDownloadManager(config.LayerStores, config.MaxConcurrentDownloads), - eventsService: config.EventsService, - imageStore: config.ImageStore, - layerStores: config.LayerStores, - referenceStore: config.ReferenceStore, - registryService: config.RegistryService, - trustKey: config.TrustKey, - uploadManager: xfer.NewLayerUploadManager(config.MaxConcurrentUploads), - } -} - -// ImageService provides a backend for image management -type ImageService struct { - containers containerStore - distributionMetadataStore metadata.Store - downloadManager *xfer.LayerDownloadManager - eventsService *daemonevents.Events - imageStore image.Store - layerStores map[string]layer.Store // By operating system - pruneRunning int32 - referenceStore dockerreference.Store - registryService registry.Service - trustKey libtrust.PrivateKey - uploadManager *xfer.LayerUploadManager -} - -// CountImages returns the number of images stored by ImageService -// called from info.go -func (i *ImageService) CountImages() int { - return i.imageStore.Len() -} - -// Children returns the children image.IDs for a parent image. -// called from list.go to filter containers -// TODO: refactor to expose an ancestry for image.ID? -func (i *ImageService) Children(id image.ID) []image.ID { - return i.imageStore.Children(id) -} - -// CreateLayer creates a filesystem layer for a container. -// called from create.go -// TODO: accept an opt struct instead of container? -func (i *ImageService) CreateLayer(container *container.Container, initFunc layer.MountInit) (layer.RWLayer, error) { - var layerID layer.ChainID - if container.ImageID != "" { - img, err := i.imageStore.Get(container.ImageID) - if err != nil { - return nil, err - } - layerID = img.RootFS.ChainID() - } - - rwLayerOpts := &layer.CreateRWLayerOpts{ - MountLabel: container.MountLabel, - InitFunc: initFunc, - StorageOpt: container.HostConfig.StorageOpt, - } - - // Indexing by OS is safe here as validation of OS has already been performed in create() (the only - // caller), and guaranteed non-nil - return i.layerStores[container.OS].CreateRWLayer(container.ID, layerID, rwLayerOpts) -} - -// GetLayerByID returns a layer by ID and operating system -// called from daemon.go Daemon.restore(), and Daemon.containerExport() -func (i *ImageService) GetLayerByID(cid string, os string) (layer.RWLayer, error) { - return i.layerStores[os].GetRWLayer(cid) -} - -// LayerStoreStatus returns the status for each layer store -// called from info.go -func (i *ImageService) LayerStoreStatus() map[string][][2]string { - result := make(map[string][][2]string) - for os, store := range i.layerStores { - result[os] = store.DriverStatus() - } - return result -} - -// GetLayerMountID returns the mount ID for a layer -// called from daemon.go Daemon.Shutdown(), and Daemon.Cleanup() (cleanup is actually continerCleanup) -// TODO: needs to be refactored to Unmount (see callers), or removed and replaced -// with GetLayerByID -func (i *ImageService) GetLayerMountID(cid string, os string) (string, error) { - return i.layerStores[os].GetMountID(cid) -} - -// Cleanup resources before the process is shutdown. -// called from daemon.go Daemon.Shutdown() -func (i *ImageService) Cleanup() { - for os, ls := range i.layerStores { - if ls != nil { - if err := ls.Cleanup(); err != nil { - logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, os) - } - } - } -} - -// GraphDriverForOS returns the name of the graph drvier -// moved from Daemon.GraphDriverName, used by: -// - newContainer -// - to report an error in Daemon.Mount(container) -func (i *ImageService) GraphDriverForOS(os string) string { - return i.layerStores[os].DriverName() -} - -// ReleaseLayer releases a layer allowing it to be removed -// called from delete.go Daemon.cleanupContainer(), and Daemon.containerExport() -func (i *ImageService) ReleaseLayer(rwlayer layer.RWLayer, containerOS string) error { - metadata, err := i.layerStores[containerOS].ReleaseRWLayer(rwlayer) - layer.LogReleaseMetadata(metadata) - if err != nil && err != layer.ErrMountDoesNotExist && !os.IsNotExist(errors.Cause(err)) { - return errors.Wrapf(err, "driver %q failed to remove root filesystem", - i.layerStores[containerOS].DriverName()) - } - return nil -} - -// LayerDiskUsage returns the number of bytes used by layer stores -// called from disk_usage.go -func (i *ImageService) LayerDiskUsage(ctx context.Context) (int64, error) { - var allLayersSize int64 - layerRefs := i.getLayerRefs() - for _, ls := range i.layerStores { - allLayers := ls.Map() - for _, l := range allLayers { - select { - case <-ctx.Done(): - return allLayersSize, ctx.Err() - default: - size, err := l.DiffSize() - if err == nil { - if _, ok := layerRefs[l.ChainID()]; ok { - allLayersSize += size - } else { - logrus.Warnf("found leaked image layer %v", l.ChainID()) - } - } else { - logrus.Warnf("failed to get diff size for layer %v", l.ChainID()) - } - } - } - } - return allLayersSize, nil -} - -func (i *ImageService) getLayerRefs() map[layer.ChainID]int { - tmpImages := i.imageStore.Map() - layerRefs := map[layer.ChainID]int{} - for id, img := range tmpImages { - dgst := digest.Digest(id) - if len(i.referenceStore.References(dgst)) == 0 && len(i.imageStore.Children(id)) != 0 { - continue - } - - rootFS := *img.RootFS - rootFS.DiffIDs = nil - for _, id := range img.RootFS.DiffIDs { - rootFS.Append(id) - chid := rootFS.ChainID() - layerRefs[chid]++ - } - } - - return layerRefs -} - -// UpdateConfig values -// -// called from reload.go -func (i *ImageService) UpdateConfig(maxDownloads, maxUploads *int) { - if i.downloadManager != nil && maxDownloads != nil { - i.downloadManager.SetConcurrency(*maxDownloads) - } - if i.uploadManager != nil && maxUploads != nil { - i.uploadManager.SetConcurrency(*maxUploads) - } -} diff --git a/vendor/github.com/docker/docker/daemon/info.go b/vendor/github.com/docker/docker/daemon/info.go deleted file mode 100644 index 7b011fe32..000000000 --- a/vendor/github.com/docker/docker/daemon/info.go +++ /dev/null @@ -1,206 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "os" - "runtime" - "strings" - "time" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/cli/debug" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/parsers/operatingsystem" - "github.com/docker/docker/pkg/platform" - "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/registry" - "github.com/docker/go-connections/sockets" - "github.com/sirupsen/logrus" -) - -// SystemInfo returns information about the host server the daemon is running on. -func (daemon *Daemon) SystemInfo() (*types.Info, error) { - kernelVersion := "" - if kv, err := kernel.GetKernelVersion(); err != nil { - logrus.Warnf("Could not get kernel version: %v", err) - } else { - kernelVersion = kv.String() - } - - operatingSystem := "" - if s, err := operatingsystem.GetOperatingSystem(); err != nil { - logrus.Warnf("Could not get operating system name: %v", err) - } else { - operatingSystem = s - } - - // Don't do containerized check on Windows - if runtime.GOOS != "windows" { - if inContainer, err := operatingsystem.IsContainerized(); err != nil { - logrus.Errorf("Could not determine if daemon is containerized: %v", err) - operatingSystem += " (error determining if containerized)" - } else if inContainer { - operatingSystem += " (containerized)" - } - } - - meminfo, err := system.ReadMemInfo() - if err != nil { - logrus.Errorf("Could not read system memory info: %v", err) - meminfo = &system.MemInfo{} - } - - sysInfo := sysinfo.New(true) - cRunning, cPaused, cStopped := stateCtr.get() - - securityOptions := []string{} - if sysInfo.AppArmor { - securityOptions = append(securityOptions, "name=apparmor") - } - if sysInfo.Seccomp && supportsSeccomp { - profile := daemon.seccompProfilePath - if profile == "" { - profile = "default" - } - securityOptions = append(securityOptions, fmt.Sprintf("name=seccomp,profile=%s", profile)) - } - if selinuxEnabled() { - securityOptions = append(securityOptions, "name=selinux") - } - rootIDs := daemon.idMappings.RootPair() - if rootIDs.UID != 0 || rootIDs.GID != 0 { - securityOptions = append(securityOptions, "name=userns") - } - - var ds [][2]string - drivers := "" - statuses := daemon.imageService.LayerStoreStatus() - for os, gd := range daemon.graphDrivers { - ds = append(ds, statuses[os]...) - drivers += gd - if len(daemon.graphDrivers) > 1 { - drivers += fmt.Sprintf(" (%s) ", os) - } - } - drivers = strings.TrimSpace(drivers) - - v := &types.Info{ - ID: daemon.ID, - Containers: cRunning + cPaused + cStopped, - ContainersRunning: cRunning, - ContainersPaused: cPaused, - ContainersStopped: cStopped, - Images: daemon.imageService.CountImages(), - Driver: drivers, - DriverStatus: ds, - Plugins: daemon.showPluginsInfo(), - IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, - BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, - BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled, - Debug: debug.IsEnabled(), - NFd: fileutils.GetTotalUsedFds(), - NGoroutines: runtime.NumGoroutine(), - SystemTime: time.Now().Format(time.RFC3339Nano), - LoggingDriver: daemon.defaultLogConfig.Type, - CgroupDriver: daemon.getCgroupDriver(), - NEventsListener: daemon.EventsService.SubscribersCount(), - KernelVersion: kernelVersion, - OperatingSystem: operatingSystem, - IndexServerAddress: registry.IndexServer, - OSType: platform.OSType, - Architecture: platform.Architecture, - RegistryConfig: daemon.RegistryService.ServiceConfig(), - NCPU: sysinfo.NumCPU(), - MemTotal: meminfo.MemTotal, - GenericResources: daemon.genericResources, - DockerRootDir: daemon.configStore.Root, - Labels: daemon.configStore.Labels, - ExperimentalBuild: daemon.configStore.Experimental, - ServerVersion: dockerversion.Version, - ClusterStore: daemon.configStore.ClusterStore, - ClusterAdvertise: daemon.configStore.ClusterAdvertise, - HTTPProxy: sockets.GetProxyEnv("http_proxy"), - HTTPSProxy: sockets.GetProxyEnv("https_proxy"), - NoProxy: sockets.GetProxyEnv("no_proxy"), - LiveRestoreEnabled: daemon.configStore.LiveRestoreEnabled, - SecurityOptions: securityOptions, - Isolation: daemon.defaultIsolation, - } - - // Retrieve platform specific info - daemon.FillPlatformInfo(v, sysInfo) - - hostname := "" - if hn, err := os.Hostname(); err != nil { - logrus.Warnf("Could not get hostname: %v", err) - } else { - hostname = hn - } - v.Name = hostname - - return v, nil -} - -// SystemVersion returns version information about the daemon. -func (daemon *Daemon) SystemVersion() types.Version { - kernelVersion := "" - if kv, err := kernel.GetKernelVersion(); err != nil { - logrus.Warnf("Could not get kernel version: %v", err) - } else { - kernelVersion = kv.String() - } - - v := types.Version{ - Components: []types.ComponentVersion{ - { - Name: "Engine", - Version: dockerversion.Version, - Details: map[string]string{ - "GitCommit": dockerversion.GitCommit, - "ApiVersion": api.DefaultVersion, - "MinAPIVersion": api.MinVersion, - "GoVersion": runtime.Version(), - "Os": runtime.GOOS, - "Arch": runtime.GOARCH, - "BuildTime": dockerversion.BuildTime, - "KernelVersion": kernelVersion, - "Experimental": fmt.Sprintf("%t", daemon.configStore.Experimental), - }, - }, - }, - - // Populate deprecated fields for older clients - Version: dockerversion.Version, - GitCommit: dockerversion.GitCommit, - APIVersion: api.DefaultVersion, - MinAPIVersion: api.MinVersion, - GoVersion: runtime.Version(), - Os: runtime.GOOS, - Arch: runtime.GOARCH, - BuildTime: dockerversion.BuildTime, - KernelVersion: kernelVersion, - Experimental: daemon.configStore.Experimental, - } - - v.Platform.Name = dockerversion.PlatformName - - return v -} - -func (daemon *Daemon) showPluginsInfo() types.PluginsInfo { - var pluginsInfo types.PluginsInfo - - pluginsInfo.Volume = daemon.volumes.GetDriverList() - pluginsInfo.Network = daemon.GetNetworkDriverList() - // The authorization plugins are returned in the order they are - // used as they constitute a request/response modification chain. - pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins - pluginsInfo.Log = logger.ListDrivers() - - return pluginsInfo -} diff --git a/vendor/github.com/docker/docker/daemon/info_unix.go b/vendor/github.com/docker/docker/daemon/info_unix.go deleted file mode 100644 index 56be9c06f..000000000 --- a/vendor/github.com/docker/docker/daemon/info_unix.go +++ /dev/null @@ -1,93 +0,0 @@ -// +build !windows - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "os/exec" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/sysinfo" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// FillPlatformInfo fills the platform related info. -func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { - v.MemoryLimit = sysInfo.MemoryLimit - v.SwapLimit = sysInfo.SwapLimit - v.KernelMemory = sysInfo.KernelMemory - v.OomKillDisable = sysInfo.OomKillDisable - v.CPUCfsPeriod = sysInfo.CPUCfsPeriod - v.CPUCfsQuota = sysInfo.CPUCfsQuota - v.CPUShares = sysInfo.CPUShares - v.CPUSet = sysInfo.Cpuset - v.Runtimes = daemon.configStore.GetAllRuntimes() - v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() - v.InitBinary = daemon.configStore.GetInitPath() - - v.RuncCommit.Expected = dockerversion.RuncCommitID - defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path - if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil { - parts := strings.Split(strings.TrimSpace(string(rv)), "\n") - if len(parts) == 3 { - parts = strings.Split(parts[1], ": ") - if len(parts) == 2 { - v.RuncCommit.ID = strings.TrimSpace(parts[1]) - } - } - - if v.RuncCommit.ID == "" { - logrus.Warnf("failed to retrieve %s version: unknown output format: %s", defaultRuntimeBinary, string(rv)) - v.RuncCommit.ID = "N/A" - } - } else { - logrus.Warnf("failed to retrieve %s version: %v", defaultRuntimeBinary, err) - v.RuncCommit.ID = "N/A" - } - - v.ContainerdCommit.Expected = dockerversion.ContainerdCommitID - if rv, err := daemon.containerd.Version(context.Background()); err == nil { - v.ContainerdCommit.ID = rv.Revision - } else { - logrus.Warnf("failed to retrieve containerd version: %v", err) - v.ContainerdCommit.ID = "N/A" - } - - defaultInitBinary := daemon.configStore.GetInitPath() - if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil { - ver, err := parseInitVersion(string(rv)) - - if err != nil { - logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) - } - v.InitCommit = ver - } else { - logrus.Warnf("failed to retrieve %s version: %s", defaultInitBinary, err) - v.InitCommit.ID = "N/A" - } -} - -// parseInitVersion parses a Tini version string, and extracts the version. -func parseInitVersion(v string) (types.Commit, error) { - version := types.Commit{ID: "", Expected: dockerversion.InitCommitID} - parts := strings.Split(strings.TrimSpace(v), " - ") - - if len(parts) >= 2 { - gitParts := strings.Split(parts[1], ".") - if len(gitParts) == 2 && gitParts[0] == "git" { - version.ID = gitParts[1] - version.Expected = dockerversion.InitCommitID[0:len(version.ID)] - } - } - if version.ID == "" && strings.HasPrefix(parts[0], "tini version ") { - version.ID = "v" + strings.TrimPrefix(parts[0], "tini version ") - } - if version.ID == "" { - version.ID = "N/A" - return version, errors.Errorf("unknown output format: %s", v) - } - return version, nil -} diff --git a/vendor/github.com/docker/docker/daemon/info_windows.go b/vendor/github.com/docker/docker/daemon/info_windows.go deleted file mode 100644 index e452369fc..000000000 --- a/vendor/github.com/docker/docker/daemon/info_windows.go +++ /dev/null @@ -1,10 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/sysinfo" -) - -// FillPlatformInfo fills the platform related info. -func (daemon *Daemon) FillPlatformInfo(v *types.Info, sysInfo *sysinfo.SysInfo) { -} diff --git a/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go b/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go deleted file mode 100644 index 035f62075..000000000 --- a/vendor/github.com/docker/docker/daemon/initlayer/setup_unix.go +++ /dev/null @@ -1,73 +0,0 @@ -// +build linux freebsd - -package initlayer // import "github.com/docker/docker/daemon/initlayer" - -import ( - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/idtools" - "golang.org/x/sys/unix" -) - -// Setup populates a directory with mountpoints suitable -// for bind-mounting things into the container. -// -// This extra layer is used by all containers as the top-most ro layer. It protects -// the container from unwanted side-effects on the rw layer. -func Setup(initLayerFs containerfs.ContainerFS, rootIDs idtools.IDPair) error { - // Since all paths are local to the container, we can just extract initLayerFs.Path() - initLayer := initLayerFs.Path() - - for pth, typ := range map[string]string{ - "/dev/pts": "dir", - "/dev/shm": "dir", - "/proc": "dir", - "/sys": "dir", - "/.dockerenv": "file", - "/etc/resolv.conf": "file", - "/etc/hosts": "file", - "/etc/hostname": "file", - "/dev/console": "file", - "/etc/mtab": "/proc/mounts", - } { - parts := strings.Split(pth, "/") - prev := "/" - for _, p := range parts[1:] { - prev = filepath.Join(prev, p) - unix.Unlink(filepath.Join(initLayer, prev)) - } - - if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { - if os.IsNotExist(err) { - if err := idtools.MkdirAllAndChownNew(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootIDs); err != nil { - return err - } - switch typ { - case "dir": - if err := idtools.MkdirAllAndChownNew(filepath.Join(initLayer, pth), 0755, rootIDs); err != nil { - return err - } - case "file": - f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) - if err != nil { - return err - } - f.Chown(rootIDs.UID, rootIDs.GID) - f.Close() - default: - if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { - return err - } - } - } else { - return err - } - } - } - - // Layer is ready to use, if it wasn't before. - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go b/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go deleted file mode 100644 index 1032092e6..000000000 --- a/vendor/github.com/docker/docker/daemon/initlayer/setup_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -package initlayer // import "github.com/docker/docker/daemon/initlayer" - -import ( - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/idtools" -) - -// Setup populates a directory with mountpoints suitable -// for bind-mounting dockerinit into the container. The mountpoint is simply an -// empty file at /.dockerinit -// -// This extra layer is used by all containers as the top-most ro layer. It protects -// the container from unwanted side-effects on the rw layer. -func Setup(initLayer containerfs.ContainerFS, rootIDs idtools.IDPair) error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/inspect.go b/vendor/github.com/docker/docker/daemon/inspect.go deleted file mode 100644 index 45a215425..000000000 --- a/vendor/github.com/docker/docker/daemon/inspect.go +++ /dev/null @@ -1,273 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "errors" - "fmt" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - networktypes "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/api/types/versions/v1p20" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/network" - "github.com/docker/docker/errdefs" - "github.com/docker/go-connections/nat" -) - -// ContainerInspect returns low-level information about a -// container. Returns an error if the container cannot be found, or if -// there is an error getting the data. -func (daemon *Daemon) ContainerInspect(name string, size bool, version string) (interface{}, error) { - switch { - case versions.LessThan(version, "1.20"): - return daemon.containerInspectPre120(name) - case versions.Equal(version, "1.20"): - return daemon.containerInspect120(name) - } - return daemon.ContainerInspectCurrent(name, size) -} - -// ContainerInspectCurrent returns low-level information about a -// container in a most recent api version. -func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - container.Lock() - - base, err := daemon.getInspectData(container) - if err != nil { - container.Unlock() - return nil, err - } - - apiNetworks := make(map[string]*networktypes.EndpointSettings) - for name, epConf := range container.NetworkSettings.Networks { - if epConf.EndpointSettings != nil { - // We must make a copy of this pointer object otherwise it can race with other operations - apiNetworks[name] = epConf.EndpointSettings.Copy() - } - } - - mountPoints := container.GetMountPoints() - networkSettings := &types.NetworkSettings{ - NetworkSettingsBase: types.NetworkSettingsBase{ - Bridge: container.NetworkSettings.Bridge, - SandboxID: container.NetworkSettings.SandboxID, - HairpinMode: container.NetworkSettings.HairpinMode, - LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, - LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, - SandboxKey: container.NetworkSettings.SandboxKey, - SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, - SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, - }, - DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks), - Networks: apiNetworks, - } - - ports := make(nat.PortMap, len(container.NetworkSettings.Ports)) - for k, pm := range container.NetworkSettings.Ports { - ports[k] = pm - } - networkSettings.NetworkSettingsBase.Ports = ports - - container.Unlock() - - if size { - sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(base.ID) - base.SizeRw = &sizeRw - base.SizeRootFs = &sizeRootFs - } - - return &types.ContainerJSON{ - ContainerJSONBase: base, - Mounts: mountPoints, - Config: container.Config, - NetworkSettings: networkSettings, - }, nil -} - -// containerInspect120 serializes the master version of a container into a json type. -func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - container.Lock() - defer container.Unlock() - - base, err := daemon.getInspectData(container) - if err != nil { - return nil, err - } - - mountPoints := container.GetMountPoints() - config := &v1p20.ContainerConfig{ - Config: container.Config, - MacAddress: container.Config.MacAddress, - NetworkDisabled: container.Config.NetworkDisabled, - ExposedPorts: container.Config.ExposedPorts, - VolumeDriver: container.HostConfig.VolumeDriver, - } - networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) - - return &v1p20.ContainerJSON{ - ContainerJSONBase: base, - Mounts: mountPoints, - Config: config, - NetworkSettings: networkSettings, - }, nil -} - -func (daemon *Daemon) getInspectData(container *container.Container) (*types.ContainerJSONBase, error) { - // make a copy to play with - hostConfig := *container.HostConfig - - children := daemon.children(container) - hostConfig.Links = nil // do not expose the internal structure - for linkAlias, child := range children { - hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) - } - - // We merge the Ulimits from hostConfig with daemon default - daemon.mergeUlimits(&hostConfig) - - var containerHealth *types.Health - if container.State.Health != nil { - containerHealth = &types.Health{ - Status: container.State.Health.Status(), - FailingStreak: container.State.Health.FailingStreak, - Log: append([]*types.HealthcheckResult{}, container.State.Health.Log...), - } - } - - containerState := &types.ContainerState{ - Status: container.State.StateString(), - Running: container.State.Running, - Paused: container.State.Paused, - Restarting: container.State.Restarting, - OOMKilled: container.State.OOMKilled, - Dead: container.State.Dead, - Pid: container.State.Pid, - ExitCode: container.State.ExitCode(), - Error: container.State.ErrorMsg, - StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), - FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), - Health: containerHealth, - } - - contJSONBase := &types.ContainerJSONBase{ - ID: container.ID, - Created: container.Created.Format(time.RFC3339Nano), - Path: container.Path, - Args: container.Args, - State: containerState, - Image: container.ImageID.String(), - LogPath: container.LogPath, - Name: container.Name, - RestartCount: container.RestartCount, - Driver: container.Driver, - Platform: container.OS, - MountLabel: container.MountLabel, - ProcessLabel: container.ProcessLabel, - ExecIDs: container.GetExecIDs(), - HostConfig: &hostConfig, - } - - // Now set any platform-specific fields - contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase) - - contJSONBase.GraphDriver.Name = container.Driver - - if container.RWLayer == nil { - if container.Dead { - return contJSONBase, nil - } - return nil, errdefs.System(errors.New("RWLayer of container " + container.ID + " is unexpectedly nil")) - } - - graphDriverData, err := container.RWLayer.Metadata() - // If container is marked as Dead, the container's graphdriver metadata - // could have been removed, it will cause error if we try to get the metadata, - // we can ignore the error if the container is dead. - if err != nil { - if !container.Dead { - return nil, errdefs.System(err) - } - } else { - contJSONBase.GraphDriver.Data = graphDriverData - } - - return contJSONBase, nil -} - -// ContainerExecInspect returns low-level information about the exec -// command. An error is returned if the exec cannot be found. -func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, error) { - e := daemon.execCommands.Get(id) - if e == nil { - return nil, errExecNotFound(id) - } - - if container := daemon.containers.Get(e.ContainerID); container == nil { - return nil, errExecNotFound(id) - } - - pc := inspectExecProcessConfig(e) - - return &backend.ExecInspect{ - ID: e.ID, - Running: e.Running, - ExitCode: e.ExitCode, - ProcessConfig: pc, - OpenStdin: e.OpenStdin, - OpenStdout: e.OpenStdout, - OpenStderr: e.OpenStderr, - CanRemove: e.CanRemove, - ContainerID: e.ContainerID, - DetachKeys: e.DetachKeys, - Pid: e.Pid, - }, nil -} - -func (daemon *Daemon) getBackwardsCompatibleNetworkSettings(settings *network.Settings) *v1p20.NetworkSettings { - result := &v1p20.NetworkSettings{ - NetworkSettingsBase: types.NetworkSettingsBase{ - Bridge: settings.Bridge, - SandboxID: settings.SandboxID, - HairpinMode: settings.HairpinMode, - LinkLocalIPv6Address: settings.LinkLocalIPv6Address, - LinkLocalIPv6PrefixLen: settings.LinkLocalIPv6PrefixLen, - Ports: settings.Ports, - SandboxKey: settings.SandboxKey, - SecondaryIPAddresses: settings.SecondaryIPAddresses, - SecondaryIPv6Addresses: settings.SecondaryIPv6Addresses, - }, - DefaultNetworkSettings: daemon.getDefaultNetworkSettings(settings.Networks), - } - - return result -} - -// getDefaultNetworkSettings creates the deprecated structure that holds the information -// about the bridge network for a container. -func (daemon *Daemon) getDefaultNetworkSettings(networks map[string]*network.EndpointSettings) types.DefaultNetworkSettings { - var settings types.DefaultNetworkSettings - - if defaultNetwork, ok := networks["bridge"]; ok && defaultNetwork.EndpointSettings != nil { - settings.EndpointID = defaultNetwork.EndpointID - settings.Gateway = defaultNetwork.Gateway - settings.GlobalIPv6Address = defaultNetwork.GlobalIPv6Address - settings.GlobalIPv6PrefixLen = defaultNetwork.GlobalIPv6PrefixLen - settings.IPAddress = defaultNetwork.IPAddress - settings.IPPrefixLen = defaultNetwork.IPPrefixLen - settings.IPv6Gateway = defaultNetwork.IPv6Gateway - settings.MacAddress = defaultNetwork.MacAddress - } - return settings -} diff --git a/vendor/github.com/docker/docker/daemon/inspect_linux.go b/vendor/github.com/docker/docker/daemon/inspect_linux.go deleted file mode 100644 index 77a4c44d7..000000000 --- a/vendor/github.com/docker/docker/daemon/inspect_linux.go +++ /dev/null @@ -1,73 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/versions/v1p19" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" -) - -// This sets platform-specific fields -func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { - contJSONBase.AppArmorProfile = container.AppArmorProfile - contJSONBase.ResolvConfPath = container.ResolvConfPath - contJSONBase.HostnamePath = container.HostnamePath - contJSONBase.HostsPath = container.HostsPath - - return contJSONBase -} - -// containerInspectPre120 gets containers for pre 1.20 APIs. -func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - container.Lock() - defer container.Unlock() - - base, err := daemon.getInspectData(container) - if err != nil { - return nil, err - } - - volumes := make(map[string]string) - volumesRW := make(map[string]bool) - for _, m := range container.MountPoints { - volumes[m.Destination] = m.Path() - volumesRW[m.Destination] = m.RW - } - - config := &v1p19.ContainerConfig{ - Config: container.Config, - MacAddress: container.Config.MacAddress, - NetworkDisabled: container.Config.NetworkDisabled, - ExposedPorts: container.Config.ExposedPorts, - VolumeDriver: container.HostConfig.VolumeDriver, - Memory: container.HostConfig.Memory, - MemorySwap: container.HostConfig.MemorySwap, - CPUShares: container.HostConfig.CPUShares, - CPUSet: container.HostConfig.CpusetCpus, - } - networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) - - return &v1p19.ContainerJSON{ - ContainerJSONBase: base, - Volumes: volumes, - VolumesRW: volumesRW, - Config: config, - NetworkSettings: networkSettings, - }, nil -} - -func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { - return &backend.ExecProcessConfig{ - Tty: e.Tty, - Entrypoint: e.Entrypoint, - Arguments: e.Args, - Privileged: &e.Privileged, - User: e.User, - } -} diff --git a/vendor/github.com/docker/docker/daemon/inspect_windows.go b/vendor/github.com/docker/docker/daemon/inspect_windows.go deleted file mode 100644 index 12fda670d..000000000 --- a/vendor/github.com/docker/docker/daemon/inspect_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" -) - -// This sets platform-specific fields -func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { - return contJSONBase -} - -// containerInspectPre120 get containers for pre 1.20 APIs. -func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { - return daemon.ContainerInspectCurrent(name, false) -} - -func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { - return &backend.ExecProcessConfig{ - Tty: e.Tty, - Entrypoint: e.Entrypoint, - Arguments: e.Args, - } -} diff --git a/vendor/github.com/docker/docker/daemon/keys.go b/vendor/github.com/docker/docker/daemon/keys.go deleted file mode 100644 index 946eaaab1..000000000 --- a/vendor/github.com/docker/docker/daemon/keys.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build linux - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" -) - -const ( - rootKeyFile = "/proc/sys/kernel/keys/root_maxkeys" - rootBytesFile = "/proc/sys/kernel/keys/root_maxbytes" - rootKeyLimit = 1000000 - // it is standard configuration to allocate 25 bytes per key - rootKeyByteMultiplier = 25 -) - -// ModifyRootKeyLimit checks to see if the root key limit is set to -// at least 1000000 and changes it to that limit along with the maxbytes -// allocated to the keys at a 25 to 1 multiplier. -func ModifyRootKeyLimit() error { - value, err := readRootKeyLimit(rootKeyFile) - if err != nil { - return err - } - if value < rootKeyLimit { - return setRootKeyLimit(rootKeyLimit) - } - return nil -} - -func setRootKeyLimit(limit int) error { - keys, err := os.OpenFile(rootKeyFile, os.O_WRONLY, 0) - if err != nil { - return err - } - defer keys.Close() - if _, err := fmt.Fprintf(keys, "%d", limit); err != nil { - return err - } - bytes, err := os.OpenFile(rootBytesFile, os.O_WRONLY, 0) - if err != nil { - return err - } - defer bytes.Close() - _, err = fmt.Fprintf(bytes, "%d", limit*rootKeyByteMultiplier) - return err -} - -func readRootKeyLimit(path string) (int, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return -1, err - } - return strconv.Atoi(strings.Trim(string(data), "\n")) -} diff --git a/vendor/github.com/docker/docker/daemon/keys_unsupported.go b/vendor/github.com/docker/docker/daemon/keys_unsupported.go deleted file mode 100644 index 2ccdb576d..000000000 --- a/vendor/github.com/docker/docker/daemon/keys_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !linux - -package daemon // import "github.com/docker/docker/daemon" - -// ModifyRootKeyLimit is a noop on unsupported platforms. -func ModifyRootKeyLimit() error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/kill.go b/vendor/github.com/docker/docker/daemon/kill.go deleted file mode 100644 index 5034c4df3..000000000 --- a/vendor/github.com/docker/docker/daemon/kill.go +++ /dev/null @@ -1,180 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - "runtime" - "syscall" - "time" - - containerpkg "github.com/docker/docker/container" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/pkg/signal" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type errNoSuchProcess struct { - pid int - signal int -} - -func (e errNoSuchProcess) Error() string { - return fmt.Sprintf("Cannot kill process (pid=%d) with signal %d: no such process.", e.pid, e.signal) -} - -func (errNoSuchProcess) NotFound() {} - -// isErrNoSuchProcess returns true if the error -// is an instance of errNoSuchProcess. -func isErrNoSuchProcess(err error) bool { - _, ok := err.(errNoSuchProcess) - return ok -} - -// ContainerKill sends signal to the container -// If no signal is given (sig 0), then Kill with SIGKILL and wait -// for the container to exit. -// If a signal is given, then just send it to the container and return. -func (daemon *Daemon) ContainerKill(name string, sig uint64) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - if sig != 0 && !signal.ValidSignalForPlatform(syscall.Signal(sig)) { - return fmt.Errorf("The %s daemon does not support signal %d", runtime.GOOS, sig) - } - - // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) - if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { - return daemon.Kill(container) - } - return daemon.killWithSignal(container, int(sig)) -} - -// killWithSignal sends the container the given signal. This wrapper for the -// host specific kill command prepares the container before attempting -// to send the signal. An error is returned if the container is paused -// or not running, or if there is a problem returned from the -// underlying kill command. -func (daemon *Daemon) killWithSignal(container *containerpkg.Container, sig int) error { - logrus.Debugf("Sending kill signal %d to container %s", sig, container.ID) - container.Lock() - defer container.Unlock() - - daemon.stopHealthchecks(container) - - if !container.Running { - return errNotRunning(container.ID) - } - - var unpause bool - if container.Config.StopSignal != "" && syscall.Signal(sig) != syscall.SIGKILL { - containerStopSignal, err := signal.ParseSignal(container.Config.StopSignal) - if err != nil { - return err - } - if containerStopSignal == syscall.Signal(sig) { - container.ExitOnNext() - unpause = container.Paused - } - } else { - container.ExitOnNext() - unpause = container.Paused - } - - if !daemon.IsShuttingDown() { - container.HasBeenManuallyStopped = true - } - - // if the container is currently restarting we do not need to send the signal - // to the process. Telling the monitor that it should exit on its next event - // loop is enough - if container.Restarting { - return nil - } - - if err := daemon.kill(container, sig); err != nil { - if errdefs.IsNotFound(err) { - unpause = false - logrus.WithError(err).WithField("container", container.ID).WithField("action", "kill").Debug("container kill failed because of 'container not found' or 'no such process'") - } else { - return errors.Wrapf(err, "Cannot kill container %s", container.ID) - } - } - - if unpause { - // above kill signal will be sent once resume is finished - if err := daemon.containerd.Resume(context.Background(), container.ID); err != nil { - logrus.Warn("Cannot unpause container %s: %s", container.ID, err) - } - } - - attributes := map[string]string{ - "signal": fmt.Sprintf("%d", sig), - } - daemon.LogContainerEventWithAttributes(container, "kill", attributes) - return nil -} - -// Kill forcefully terminates a container. -func (daemon *Daemon) Kill(container *containerpkg.Container) error { - if !container.IsRunning() { - return errNotRunning(container.ID) - } - - // 1. Send SIGKILL - if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil { - // While normally we might "return err" here we're not going to - // because if we can't stop the container by this point then - // it's probably because it's already stopped. Meaning, between - // the time of the IsRunning() call above and now it stopped. - // Also, since the err return will be environment specific we can't - // look for any particular (common) error that would indicate - // that the process is already dead vs something else going wrong. - // So, instead we'll give it up to 2 more seconds to complete and if - // by that time the container is still running, then the error - // we got is probably valid and so we return it to the caller. - if isErrNoSuchProcess(err) { - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { - return err - } - } - - // 2. Wait for the process to die, in last resort, try to kill the process directly - if err := killProcessDirectly(container); err != nil { - if isErrNoSuchProcess(err) { - return nil - } - return err - } - - // Wait for exit with no timeout. - // Ignore returned status. - <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) - - return nil -} - -// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error. -func (daemon *Daemon) killPossiblyDeadProcess(container *containerpkg.Container, sig int) error { - err := daemon.killWithSignal(container, sig) - if errdefs.IsNotFound(err) { - e := errNoSuchProcess{container.GetPID(), sig} - logrus.Debug(e) - return e - } - return err -} - -func (daemon *Daemon) kill(c *containerpkg.Container, sig int) error { - return daemon.containerd.SignalProcess(context.Background(), c.ID, libcontainerd.InitProcessName, sig) -} diff --git a/vendor/github.com/docker/docker/daemon/links.go b/vendor/github.com/docker/docker/daemon/links.go deleted file mode 100644 index 1639572fa..000000000 --- a/vendor/github.com/docker/docker/daemon/links.go +++ /dev/null @@ -1,91 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "sync" - - "github.com/docker/docker/container" -) - -// linkIndex stores link relationships between containers, including their specified alias -// The alias is the name the parent uses to reference the child -type linkIndex struct { - // idx maps a parent->alias->child relationship - idx map[*container.Container]map[string]*container.Container - // childIdx maps child->parent->aliases - childIdx map[*container.Container]map[*container.Container]map[string]struct{} - mu sync.Mutex -} - -func newLinkIndex() *linkIndex { - return &linkIndex{ - idx: make(map[*container.Container]map[string]*container.Container), - childIdx: make(map[*container.Container]map[*container.Container]map[string]struct{}), - } -} - -// link adds indexes for the passed in parent/child/alias relationships -func (l *linkIndex) link(parent, child *container.Container, alias string) { - l.mu.Lock() - - if l.idx[parent] == nil { - l.idx[parent] = make(map[string]*container.Container) - } - l.idx[parent][alias] = child - if l.childIdx[child] == nil { - l.childIdx[child] = make(map[*container.Container]map[string]struct{}) - } - if l.childIdx[child][parent] == nil { - l.childIdx[child][parent] = make(map[string]struct{}) - } - l.childIdx[child][parent][alias] = struct{}{} - - l.mu.Unlock() -} - -// unlink removes the requested alias for the given parent/child -func (l *linkIndex) unlink(alias string, child, parent *container.Container) { - l.mu.Lock() - delete(l.idx[parent], alias) - delete(l.childIdx[child], parent) - l.mu.Unlock() -} - -// children maps all the aliases-> children for the passed in parent -// aliases here are the aliases the parent uses to refer to the child -func (l *linkIndex) children(parent *container.Container) map[string]*container.Container { - l.mu.Lock() - children := l.idx[parent] - l.mu.Unlock() - return children -} - -// parents maps all the aliases->parent for the passed in child -// aliases here are the aliases the parents use to refer to the child -func (l *linkIndex) parents(child *container.Container) map[string]*container.Container { - l.mu.Lock() - - parents := make(map[string]*container.Container) - for parent, aliases := range l.childIdx[child] { - for alias := range aliases { - parents[alias] = parent - } - } - - l.mu.Unlock() - return parents -} - -// delete deletes all link relationships referencing this container -func (l *linkIndex) delete(container *container.Container) []string { - l.mu.Lock() - - var aliases []string - for alias, child := range l.idx[container] { - aliases = append(aliases, alias) - delete(l.childIdx[child], container) - } - delete(l.idx, container) - delete(l.childIdx, container) - l.mu.Unlock() - return aliases -} diff --git a/vendor/github.com/docker/docker/daemon/links/links.go b/vendor/github.com/docker/docker/daemon/links/links.go deleted file mode 100644 index 2bcb48325..000000000 --- a/vendor/github.com/docker/docker/daemon/links/links.go +++ /dev/null @@ -1,141 +0,0 @@ -package links // import "github.com/docker/docker/daemon/links" - -import ( - "fmt" - "path" - "strings" - - "github.com/docker/go-connections/nat" -) - -// Link struct holds informations about parent/child linked container -type Link struct { - // Parent container IP address - ParentIP string - // Child container IP address - ChildIP string - // Link name - Name string - // Child environments variables - ChildEnvironment []string - // Child exposed ports - Ports []nat.Port -} - -// NewLink initializes a new Link struct with the provided options. -func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) *Link { - var ( - i int - ports = make([]nat.Port, len(exposedPorts)) - ) - - for p := range exposedPorts { - ports[i] = p - i++ - } - - return &Link{ - Name: name, - ChildIP: childIP, - ParentIP: parentIP, - ChildEnvironment: env, - Ports: ports, - } -} - -// ToEnv creates a string's slice containing child container informations in -// the form of environment variables which will be later exported on container -// startup. -func (l *Link) ToEnv() []string { - env := []string{} - - _, n := path.Split(l.Name) - alias := strings.Replace(strings.ToUpper(n), "-", "_", -1) - - if p := l.getDefaultPort(); p != nil { - env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) - } - - //sort the ports so that we can bulk the continuous ports together - nat.Sort(l.Ports, func(ip, jp nat.Port) bool { - // If the two ports have the same number, tcp takes priority - // Sort in desc order - return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") - }) - - for i := 0; i < len(l.Ports); { - p := l.Ports[i] - j := nextContiguous(l.Ports, p.Int(), i) - if j > i+1 { - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) - - q := l.Ports[j] - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) - - i = j + 1 - continue - } else { - i++ - } - } - for _, p := range l.Ports { - env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) - } - - // Load the linked container's name into the environment - env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) - - if l.ChildEnvironment != nil { - for _, v := range l.ChildEnvironment { - parts := strings.SplitN(v, "=", 2) - if len(parts) < 2 { - continue - } - // Ignore a few variables that are added during docker build (and not really relevant to linked containers) - if parts[0] == "HOME" || parts[0] == "PATH" { - continue - } - env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) - } - } - return env -} - -func nextContiguous(ports []nat.Port, value int, index int) int { - if index+1 == len(ports) { - return index - } - for i := index + 1; i < len(ports); i++ { - if ports[i].Int() > value+1 { - return i - 1 - } - - value++ - } - return len(ports) - 1 -} - -// Default port rules -func (l *Link) getDefaultPort() *nat.Port { - var p nat.Port - i := len(l.Ports) - - if i == 0 { - return nil - } else if i > 1 { - nat.Sort(l.Ports, func(ip, jp nat.Port) bool { - // If the two ports have the same number, tcp takes priority - // Sort in desc order - return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") - }) - } - p = l.Ports[0] - return &p -} diff --git a/vendor/github.com/docker/docker/daemon/list.go b/vendor/github.com/docker/docker/daemon/list.go deleted file mode 100644 index 750079f96..000000000 --- a/vendor/github.com/docker/docker/daemon/list.go +++ /dev/null @@ -1,607 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "sort" - "strconv" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/images" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" - "github.com/docker/go-connections/nat" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var acceptedPsFilterTags = map[string]bool{ - "ancestor": true, - "before": true, - "exited": true, - "id": true, - "isolation": true, - "label": true, - "name": true, - "status": true, - "health": true, - "since": true, - "volume": true, - "network": true, - "is-task": true, - "publish": true, - "expose": true, -} - -// iterationAction represents possible outcomes happening during the container iteration. -type iterationAction int - -// containerReducer represents a reducer for a container. -// Returns the object to serialize by the api. -type containerReducer func(*container.Snapshot, *listContext) (*types.Container, error) - -const ( - // includeContainer is the action to include a container in the reducer. - includeContainer iterationAction = iota - // excludeContainer is the action to exclude a container in the reducer. - excludeContainer - // stopIteration is the action to stop iterating over the list of containers. - stopIteration -) - -// errStopIteration makes the iterator to stop without returning an error. -var errStopIteration = errors.New("container list iteration stopped") - -// List returns an array of all containers registered in the daemon. -func (daemon *Daemon) List() []*container.Container { - return daemon.containers.List() -} - -// listContext is the daemon generated filtering to iterate over containers. -// This is created based on the user specification from types.ContainerListOptions. -type listContext struct { - // idx is the container iteration index for this context - idx int - // ancestorFilter tells whether it should check ancestors or not - ancestorFilter bool - // names is a list of container names to filter with - names map[string][]string - // images is a list of images to filter with - images map[image.ID]bool - // filters is a collection of arguments to filter with, specified by the user - filters filters.Args - // exitAllowed is a list of exit codes allowed to filter with - exitAllowed []int - - // beforeFilter is a filter to ignore containers that appear before the one given - beforeFilter *container.Snapshot - // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container - sinceFilter *container.Snapshot - - // taskFilter tells if we should filter based on wether a container is part of a task - taskFilter bool - // isTask tells us if the we should filter container that are a task (true) or not (false) - isTask bool - - // publish is a list of published ports to filter with - publish map[nat.Port]bool - // expose is a list of exposed ports to filter with - expose map[nat.Port]bool - - // ContainerListOptions is the filters set by the user - *types.ContainerListOptions -} - -// byCreatedDescending is a temporary type used to sort a list of containers by creation time. -type byCreatedDescending []container.Snapshot - -func (r byCreatedDescending) Len() int { return len(r) } -func (r byCreatedDescending) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byCreatedDescending) Less(i, j int) bool { - return r[j].CreatedAt.UnixNano() < r[i].CreatedAt.UnixNano() -} - -// Containers returns the list of containers to show given the user's filtering. -func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { - return daemon.reduceContainers(config, daemon.refreshImage) -} - -func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContext) ([]container.Snapshot, error) { - idSearch := false - names := ctx.filters.Get("name") - ids := ctx.filters.Get("id") - if len(names)+len(ids) == 0 { - // if name or ID filters are not in use, return to - // standard behavior of walking the entire container - // list from the daemon's in-memory store - all, err := view.All() - sort.Sort(byCreatedDescending(all)) - return all, err - } - - // idSearch will determine if we limit name matching to the IDs - // matched from any IDs which were specified as filters - if len(ids) > 0 { - idSearch = true - } - - matches := make(map[string]bool) - // find ID matches; errors represent "not found" and can be ignored - for _, id := range ids { - if fullID, err := daemon.idIndex.Get(id); err == nil { - matches[fullID] = true - } - } - - // look for name matches; if ID filtering was used, then limit the - // search space to the matches map only; errors represent "not found" - // and can be ignored - if len(names) > 0 { - for id, idNames := range ctx.names { - // if ID filters were used and no matches on that ID were - // found, continue to next ID in the list - if idSearch && !matches[id] { - continue - } - for _, eachName := range idNames { - if ctx.filters.Match("name", eachName) { - matches[id] = true - } - } - } - } - - cntrs := make([]container.Snapshot, 0, len(matches)) - for id := range matches { - c, err := view.Get(id) - switch err.(type) { - case nil: - cntrs = append(cntrs, *c) - case container.NoSuchContainerError: - // ignore error - default: - return nil, err - } - } - - // Restore sort-order after filtering - // Created gives us nanosec resolution for sorting - sort.Sort(byCreatedDescending(cntrs)) - - return cntrs, nil -} - -// reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. -func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { - if err := config.Filters.Validate(acceptedPsFilterTags); err != nil { - return nil, err - } - - var ( - view = daemon.containersReplica.Snapshot() - containers = []*types.Container{} - ) - - ctx, err := daemon.foldFilter(view, config) - if err != nil { - return nil, err - } - - // fastpath to only look at a subset of containers if specific name - // or ID matches were provided by the user--otherwise we potentially - // end up querying many more containers than intended - containerList, err := daemon.filterByNameIDMatches(view, ctx) - if err != nil { - return nil, err - } - - for i := range containerList { - t, err := daemon.reducePsContainer(&containerList[i], ctx, reducer) - if err != nil { - if err != errStopIteration { - return nil, err - } - break - } - if t != nil { - containers = append(containers, t) - ctx.idx++ - } - } - - return containers, nil -} - -// reducePsContainer is the basic representation for a container as expected by the ps command. -func (daemon *Daemon) reducePsContainer(container *container.Snapshot, ctx *listContext, reducer containerReducer) (*types.Container, error) { - // filter containers to return - switch includeContainerInList(container, ctx) { - case excludeContainer: - return nil, nil - case stopIteration: - return nil, errStopIteration - } - - // transform internal container struct into api structs - newC, err := reducer(container, ctx) - if err != nil { - return nil, err - } - - // release lock because size calculation is slow - if ctx.Size { - sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(newC.ID) - newC.SizeRw = sizeRw - newC.SizeRootFs = sizeRootFs - } - return newC, nil -} - -// foldFilter generates the container filter based on the user's filtering options. -func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerListOptions) (*listContext, error) { - psFilters := config.Filters - - var filtExited []int - - err := psFilters.WalkValues("exited", func(value string) error { - code, err := strconv.Atoi(value) - if err != nil { - return err - } - filtExited = append(filtExited, code) - return nil - }) - if err != nil { - return nil, err - } - - err = psFilters.WalkValues("status", func(value string) error { - if !container.IsValidStateString(value) { - return invalidFilter{"status", value} - } - - config.All = true - return nil - }) - if err != nil { - return nil, err - } - - var taskFilter, isTask bool - if psFilters.Contains("is-task") { - if psFilters.ExactMatch("is-task", "true") { - taskFilter = true - isTask = true - } else if psFilters.ExactMatch("is-task", "false") { - taskFilter = true - isTask = false - } else { - return nil, invalidFilter{"is-task", psFilters.Get("is-task")} - } - } - - err = psFilters.WalkValues("health", func(value string) error { - if !container.IsValidHealthString(value) { - return errdefs.InvalidParameter(errors.Errorf("Unrecognised filter value for health: %s", value)) - } - - return nil - }) - if err != nil { - return nil, err - } - - var beforeContFilter, sinceContFilter *container.Snapshot - - err = psFilters.WalkValues("before", func(value string) error { - beforeContFilter, err = idOrNameFilter(view, value) - return err - }) - if err != nil { - return nil, err - } - - err = psFilters.WalkValues("since", func(value string) error { - sinceContFilter, err = idOrNameFilter(view, value) - return err - }) - if err != nil { - return nil, err - } - - imagesFilter := map[image.ID]bool{} - var ancestorFilter bool - if psFilters.Contains("ancestor") { - ancestorFilter = true - psFilters.WalkValues("ancestor", func(ancestor string) error { - img, err := daemon.imageService.GetImage(ancestor) - if err != nil { - logrus.Warnf("Error while looking up for image %v", ancestor) - return nil - } - if imagesFilter[img.ID()] { - // Already seen this ancestor, skip it - return nil - } - // Then walk down the graph and put the imageIds in imagesFilter - populateImageFilterByParents(imagesFilter, img.ID(), daemon.imageService.Children) - return nil - }) - } - - publishFilter := map[nat.Port]bool{} - err = psFilters.WalkValues("publish", portOp("publish", publishFilter)) - if err != nil { - return nil, err - } - - exposeFilter := map[nat.Port]bool{} - err = psFilters.WalkValues("expose", portOp("expose", exposeFilter)) - if err != nil { - return nil, err - } - - return &listContext{ - filters: psFilters, - ancestorFilter: ancestorFilter, - images: imagesFilter, - exitAllowed: filtExited, - beforeFilter: beforeContFilter, - sinceFilter: sinceContFilter, - taskFilter: taskFilter, - isTask: isTask, - publish: publishFilter, - expose: exposeFilter, - ContainerListOptions: config, - names: view.GetAllNames(), - }, nil -} - -func idOrNameFilter(view container.View, value string) (*container.Snapshot, error) { - filter, err := view.Get(value) - switch err.(type) { - case container.NoSuchContainerError: - // Try name search instead - found := "" - for id, idNames := range view.GetAllNames() { - for _, eachName := range idNames { - if strings.TrimPrefix(value, "/") == strings.TrimPrefix(eachName, "/") { - if found != "" && found != id { - return nil, err - } - found = id - } - } - } - if found != "" { - filter, err = view.Get(found) - } - } - return filter, err -} - -func portOp(key string, filter map[nat.Port]bool) func(value string) error { - return func(value string) error { - if strings.Contains(value, ":") { - return fmt.Errorf("filter for '%s' should not contain ':': %s", key, value) - } - //support two formats, original format /[] or /[] - proto, port := nat.SplitProtoPort(value) - start, end, err := nat.ParsePortRange(port) - if err != nil { - return fmt.Errorf("error while looking up for %s %s: %s", key, value, err) - } - for i := start; i <= end; i++ { - p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) - if err != nil { - return fmt.Errorf("error while looking up for %s %s: %s", key, value, err) - } - filter[p] = true - } - return nil - } -} - -// includeContainerInList decides whether a container should be included in the output or not based in the filter. -// It also decides if the iteration should be stopped or not. -func includeContainerInList(container *container.Snapshot, ctx *listContext) iterationAction { - // Do not include container if it's in the list before the filter container. - // Set the filter container to nil to include the rest of containers after this one. - if ctx.beforeFilter != nil { - if container.ID == ctx.beforeFilter.ID { - ctx.beforeFilter = nil - } - return excludeContainer - } - - // Stop iteration when the container arrives to the filter container - if ctx.sinceFilter != nil { - if container.ID == ctx.sinceFilter.ID { - return stopIteration - } - } - - // Do not include container if it's stopped and we're not filters - if !container.Running && !ctx.All && ctx.Limit <= 0 { - return excludeContainer - } - - // Do not include container if the name doesn't match - if !ctx.filters.Match("name", container.Name) { - return excludeContainer - } - - // Do not include container if the id doesn't match - if !ctx.filters.Match("id", container.ID) { - return excludeContainer - } - - if ctx.taskFilter { - if ctx.isTask != container.Managed { - return excludeContainer - } - } - - // Do not include container if any of the labels don't match - if !ctx.filters.MatchKVList("label", container.Labels) { - return excludeContainer - } - - // Do not include container if isolation doesn't match - if excludeContainer == excludeByIsolation(container, ctx) { - return excludeContainer - } - - // Stop iteration when the index is over the limit - if ctx.Limit > 0 && ctx.idx == ctx.Limit { - return stopIteration - } - - // Do not include container if its exit code is not in the filter - if len(ctx.exitAllowed) > 0 { - shouldSkip := true - for _, code := range ctx.exitAllowed { - if code == container.ExitCode && !container.Running && !container.StartedAt.IsZero() { - shouldSkip = false - break - } - } - if shouldSkip { - return excludeContainer - } - } - - // Do not include container if its status doesn't match the filter - if !ctx.filters.Match("status", container.State) { - return excludeContainer - } - - // Do not include container if its health doesn't match the filter - if !ctx.filters.ExactMatch("health", container.Health) { - return excludeContainer - } - - if ctx.filters.Contains("volume") { - volumesByName := make(map[string]types.MountPoint) - for _, m := range container.Mounts { - if m.Name != "" { - volumesByName[m.Name] = m - } else { - volumesByName[m.Source] = m - } - } - volumesByDestination := make(map[string]types.MountPoint) - for _, m := range container.Mounts { - if m.Destination != "" { - volumesByDestination[m.Destination] = m - } - } - - volumeExist := fmt.Errorf("volume mounted in container") - err := ctx.filters.WalkValues("volume", func(value string) error { - if _, exist := volumesByDestination[value]; exist { - return volumeExist - } - if _, exist := volumesByName[value]; exist { - return volumeExist - } - return nil - }) - if err != volumeExist { - return excludeContainer - } - } - - if ctx.ancestorFilter { - if len(ctx.images) == 0 { - return excludeContainer - } - if !ctx.images[image.ID(container.ImageID)] { - return excludeContainer - } - } - - var ( - networkExist = errors.New("container part of network") - noNetworks = errors.New("container is not part of any networks") - ) - if ctx.filters.Contains("network") { - err := ctx.filters.WalkValues("network", func(value string) error { - if container.NetworkSettings == nil { - return noNetworks - } - if _, ok := container.NetworkSettings.Networks[value]; ok { - return networkExist - } - for _, nw := range container.NetworkSettings.Networks { - if nw == nil { - continue - } - if strings.HasPrefix(nw.NetworkID, value) { - return networkExist - } - } - return nil - }) - if err != networkExist { - return excludeContainer - } - } - - if len(ctx.publish) > 0 { - shouldSkip := true - for port := range ctx.publish { - if _, ok := container.PortBindings[port]; ok { - shouldSkip = false - break - } - } - if shouldSkip { - return excludeContainer - } - } - - if len(ctx.expose) > 0 { - shouldSkip := true - for port := range ctx.expose { - if _, ok := container.ExposedPorts[port]; ok { - shouldSkip = false - break - } - } - if shouldSkip { - return excludeContainer - } - } - - return includeContainer -} - -// refreshImage checks if the Image ref still points to the correct ID, and updates the ref to the actual ID when it doesn't -func (daemon *Daemon) refreshImage(s *container.Snapshot, ctx *listContext) (*types.Container, error) { - c := s.Container - image := s.Image // keep the original ref if still valid (hasn't changed) - if image != s.ImageID { - img, err := daemon.imageService.GetImage(image) - if _, isDNE := err.(images.ErrImageDoesNotExist); err != nil && !isDNE { - return nil, err - } - if err != nil || img.ImageID() != s.ImageID { - // ref changed, we need to use original ID - image = s.ImageID - } - } - c.Image = image - return &c, nil -} - -func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { - if !ancestorMap[imageID] { - for _, id := range getChildren(imageID) { - populateImageFilterByParents(ancestorMap, id, getChildren) - } - ancestorMap[imageID] = true - } -} diff --git a/vendor/github.com/docker/docker/daemon/list_unix.go b/vendor/github.com/docker/docker/daemon/list_unix.go deleted file mode 100644 index 4f9e453bc..000000000 --- a/vendor/github.com/docker/docker/daemon/list_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux freebsd - -package daemon // import "github.com/docker/docker/daemon" - -import "github.com/docker/docker/container" - -// excludeByIsolation is a platform specific helper function to support PS -// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. -func excludeByIsolation(container *container.Snapshot, ctx *listContext) iterationAction { - return includeContainer -} diff --git a/vendor/github.com/docker/docker/daemon/list_windows.go b/vendor/github.com/docker/docker/daemon/list_windows.go deleted file mode 100644 index 7c7b5fa85..000000000 --- a/vendor/github.com/docker/docker/daemon/list_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "strings" - - "github.com/docker/docker/container" -) - -// excludeByIsolation is a platform specific helper function to support PS -// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. -func excludeByIsolation(container *container.Snapshot, ctx *listContext) iterationAction { - i := strings.ToLower(string(container.HostConfig.Isolation)) - if i == "" { - i = "default" - } - if !ctx.filters.Match("isolation", i) { - return excludeContainer - } - return includeContainer -} diff --git a/vendor/github.com/docker/docker/daemon/listeners/group_unix.go b/vendor/github.com/docker/docker/daemon/listeners/group_unix.go deleted file mode 100644 index 9cc17eba7..000000000 --- a/vendor/github.com/docker/docker/daemon/listeners/group_unix.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build !windows - -package listeners // import "github.com/docker/docker/daemon/listeners" - -import ( - "fmt" - "strconv" - - "github.com/opencontainers/runc/libcontainer/user" - "github.com/pkg/errors" -) - -const defaultSocketGroup = "docker" - -func lookupGID(name string) (int, error) { - groupFile, err := user.GetGroupPath() - if err != nil { - return -1, errors.Wrap(err, "error looking up groups") - } - groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { - return g.Name == name || strconv.Itoa(g.Gid) == name - }) - if err != nil { - return -1, errors.Wrapf(err, "error parsing groups for %s", name) - } - if len(groups) > 0 { - return groups[0].Gid, nil - } - gid, err := strconv.Atoi(name) - if err == nil { - return gid, nil - } - return -1, fmt.Errorf("group %s not found", name) -} diff --git a/vendor/github.com/docker/docker/daemon/listeners/listeners_linux.go b/vendor/github.com/docker/docker/daemon/listeners/listeners_linux.go deleted file mode 100644 index c8956db25..000000000 --- a/vendor/github.com/docker/docker/daemon/listeners/listeners_linux.go +++ /dev/null @@ -1,102 +0,0 @@ -package listeners // import "github.com/docker/docker/daemon/listeners" - -import ( - "crypto/tls" - "fmt" - "net" - "os" - "strconv" - - "github.com/coreos/go-systemd/activation" - "github.com/docker/go-connections/sockets" - "github.com/sirupsen/logrus" -) - -// Init creates new listeners for the server. -// TODO: Clean up the fact that socketGroup and tlsConfig aren't always used. -func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { - ls := []net.Listener{} - - switch proto { - case "fd": - fds, err := listenFD(addr, tlsConfig) - if err != nil { - return nil, err - } - ls = append(ls, fds...) - case "tcp": - l, err := sockets.NewTCPSocket(addr, tlsConfig) - if err != nil { - return nil, err - } - ls = append(ls, l) - case "unix": - gid, err := lookupGID(socketGroup) - if err != nil { - if socketGroup != "" { - if socketGroup != defaultSocketGroup { - return nil, err - } - logrus.Warnf("could not change group %s to %s: %v", addr, defaultSocketGroup, err) - } - gid = os.Getgid() - } - l, err := sockets.NewUnixSocket(addr, gid) - if err != nil { - return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) - } - ls = append(ls, l) - default: - return nil, fmt.Errorf("invalid protocol format: %q", proto) - } - - return ls, nil -} - -// listenFD returns the specified socket activated files as a slice of -// net.Listeners or all of the activated files if "*" is given. -func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { - var ( - err error - listeners []net.Listener - ) - // socket activation - if tlsConfig != nil { - listeners, err = activation.TLSListeners(tlsConfig) - } else { - listeners, err = activation.Listeners() - } - if err != nil { - return nil, err - } - - if len(listeners) == 0 { - return nil, fmt.Errorf("no sockets found via socket activation: make sure the service was started by systemd") - } - - // default to all fds just like unix:// and tcp:// - if addr == "" || addr == "*" { - return listeners, nil - } - - fdNum, err := strconv.Atoi(addr) - if err != nil { - return nil, fmt.Errorf("failed to parse systemd fd address: should be a number: %v", addr) - } - fdOffset := fdNum - 3 - if len(listeners) < fdOffset+1 { - return nil, fmt.Errorf("too few socket activated files passed in by systemd") - } - if listeners[fdOffset] == nil { - return nil, fmt.Errorf("failed to listen on systemd activated file: fd %d", fdOffset+3) - } - for i, ls := range listeners { - if i == fdOffset || ls == nil { - continue - } - if err := ls.Close(); err != nil { - return nil, fmt.Errorf("failed to close systemd activated file: fd %d: %v", fdOffset+3, err) - } - } - return []net.Listener{listeners[fdOffset]}, nil -} diff --git a/vendor/github.com/docker/docker/daemon/listeners/listeners_windows.go b/vendor/github.com/docker/docker/daemon/listeners/listeners_windows.go deleted file mode 100644 index 73f5f79e4..000000000 --- a/vendor/github.com/docker/docker/daemon/listeners/listeners_windows.go +++ /dev/null @@ -1,54 +0,0 @@ -package listeners // import "github.com/docker/docker/daemon/listeners" - -import ( - "crypto/tls" - "fmt" - "net" - "strings" - - "github.com/Microsoft/go-winio" - "github.com/docker/go-connections/sockets" -) - -// Init creates new listeners for the server. -func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { - ls := []net.Listener{} - - switch proto { - case "tcp": - l, err := sockets.NewTCPSocket(addr, tlsConfig) - if err != nil { - return nil, err - } - ls = append(ls, l) - - case "npipe": - // allow Administrators and SYSTEM, plus whatever additional users or groups were specified - sddl := "D:P(A;;GA;;;BA)(A;;GA;;;SY)" - if socketGroup != "" { - for _, g := range strings.Split(socketGroup, ",") { - sid, err := winio.LookupSidByName(g) - if err != nil { - return nil, err - } - sddl += fmt.Sprintf("(A;;GRGW;;;%s)", sid) - } - } - c := winio.PipeConfig{ - SecurityDescriptor: sddl, - MessageMode: true, // Use message mode so that CloseWrite() is supported - InputBufferSize: 65536, // Use 64KB buffers to improve performance - OutputBufferSize: 65536, - } - l, err := winio.ListenPipe(addr, &c) - if err != nil { - return nil, err - } - ls = append(ls, l) - - default: - return nil, fmt.Errorf("invalid protocol format: windows only supports tcp and npipe") - } - - return ls, nil -} diff --git a/vendor/github.com/docker/docker/daemon/logdrivers_linux.go b/vendor/github.com/docker/docker/daemon/logdrivers_linux.go deleted file mode 100644 index 6ddcd2fc8..000000000 --- a/vendor/github.com/docker/docker/daemon/logdrivers_linux.go +++ /dev/null @@ -1,15 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - // Importing packages here only to make sure their init gets called and - // therefore they register themselves to the logdriver factory. - _ "github.com/docker/docker/daemon/logger/awslogs" - _ "github.com/docker/docker/daemon/logger/fluentd" - _ "github.com/docker/docker/daemon/logger/gcplogs" - _ "github.com/docker/docker/daemon/logger/gelf" - _ "github.com/docker/docker/daemon/logger/journald" - _ "github.com/docker/docker/daemon/logger/jsonfilelog" - _ "github.com/docker/docker/daemon/logger/logentries" - _ "github.com/docker/docker/daemon/logger/splunk" - _ "github.com/docker/docker/daemon/logger/syslog" -) diff --git a/vendor/github.com/docker/docker/daemon/logdrivers_windows.go b/vendor/github.com/docker/docker/daemon/logdrivers_windows.go deleted file mode 100644 index 62e7a6f95..000000000 --- a/vendor/github.com/docker/docker/daemon/logdrivers_windows.go +++ /dev/null @@ -1,14 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - // Importing packages here only to make sure their init gets called and - // therefore they register themselves to the logdriver factory. - _ "github.com/docker/docker/daemon/logger/awslogs" - _ "github.com/docker/docker/daemon/logger/etwlogs" - _ "github.com/docker/docker/daemon/logger/fluentd" - _ "github.com/docker/docker/daemon/logger/gelf" - _ "github.com/docker/docker/daemon/logger/jsonfilelog" - _ "github.com/docker/docker/daemon/logger/logentries" - _ "github.com/docker/docker/daemon/logger/splunk" - _ "github.com/docker/docker/daemon/logger/syslog" -) diff --git a/vendor/github.com/docker/docker/daemon/logger/adapter.go b/vendor/github.com/docker/docker/daemon/logger/adapter.go deleted file mode 100644 index 95aff9bf3..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/adapter.go +++ /dev/null @@ -1,139 +0,0 @@ -package logger // import "github.com/docker/docker/daemon/logger" - -import ( - "io" - "os" - "path/filepath" - "sync" - "time" - - "github.com/docker/docker/api/types/plugins/logdriver" - "github.com/docker/docker/pkg/plugingetter" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// pluginAdapter takes a plugin and implements the Logger interface for logger -// instances -type pluginAdapter struct { - driverName string - id string - plugin logPlugin - fifoPath string - capabilities Capability - logInfo Info - - // synchronize access to the log stream and shared buffer - mu sync.Mutex - enc logdriver.LogEntryEncoder - stream io.WriteCloser - // buf is shared for each `Log()` call to reduce allocations. - // buf must be protected by mutex - buf logdriver.LogEntry -} - -func (a *pluginAdapter) Log(msg *Message) error { - a.mu.Lock() - - a.buf.Line = msg.Line - a.buf.TimeNano = msg.Timestamp.UnixNano() - a.buf.Partial = msg.PLogMetaData != nil - a.buf.Source = msg.Source - - err := a.enc.Encode(&a.buf) - a.buf.Reset() - - a.mu.Unlock() - - PutMessage(msg) - return err -} - -func (a *pluginAdapter) Name() string { - return a.driverName -} - -func (a *pluginAdapter) Close() error { - a.mu.Lock() - defer a.mu.Unlock() - - if err := a.plugin.StopLogging(filepath.Join("/", "run", "docker", "logging", a.id)); err != nil { - return err - } - - if err := a.stream.Close(); err != nil { - logrus.WithError(err).Error("error closing plugin fifo") - } - if err := os.Remove(a.fifoPath); err != nil && !os.IsNotExist(err) { - logrus.WithError(err).Error("error cleaning up plugin fifo") - } - - // may be nil, especially for unit tests - if pluginGetter != nil { - pluginGetter.Get(a.Name(), extName, plugingetter.Release) - } - return nil -} - -type pluginAdapterWithRead struct { - *pluginAdapter -} - -func (a *pluginAdapterWithRead) ReadLogs(config ReadConfig) *LogWatcher { - watcher := NewLogWatcher() - - go func() { - defer close(watcher.Msg) - stream, err := a.plugin.ReadLogs(a.logInfo, config) - if err != nil { - watcher.Err <- errors.Wrap(err, "error getting log reader") - return - } - defer stream.Close() - - dec := logdriver.NewLogEntryDecoder(stream) - for { - select { - case <-watcher.WatchClose(): - return - default: - } - - var buf logdriver.LogEntry - if err := dec.Decode(&buf); err != nil { - if err == io.EOF { - return - } - select { - case watcher.Err <- errors.Wrap(err, "error decoding log message"): - case <-watcher.WatchClose(): - } - return - } - - msg := &Message{ - Timestamp: time.Unix(0, buf.TimeNano), - Line: buf.Line, - Source: buf.Source, - } - - // plugin should handle this, but check just in case - if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) { - continue - } - if !config.Until.IsZero() && msg.Timestamp.After(config.Until) { - return - } - - select { - case watcher.Msg <- msg: - case <-watcher.WatchClose(): - // make sure the message we consumed is sent - watcher.Msg <- msg - return - } - } - }() - - return watcher -} diff --git a/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go b/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go deleted file mode 100644 index 3d6466f09..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/awslogs/cloudwatchlogs.go +++ /dev/null @@ -1,744 +0,0 @@ -// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs -package awslogs // import "github.com/docker/docker/daemon/logger/awslogs" - -import ( - "fmt" - "os" - "regexp" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/docker/dockerversion" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - name = "awslogs" - regionKey = "awslogs-region" - regionEnvKey = "AWS_REGION" - logGroupKey = "awslogs-group" - logStreamKey = "awslogs-stream" - logCreateGroupKey = "awslogs-create-group" - tagKey = "tag" - datetimeFormatKey = "awslogs-datetime-format" - multilinePatternKey = "awslogs-multiline-pattern" - credentialsEndpointKey = "awslogs-credentials-endpoint" - batchPublishFrequency = 5 * time.Second - - // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html - perEventBytes = 26 - maximumBytesPerPut = 1048576 - maximumLogEventsPerPut = 10000 - - // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html - maximumBytesPerEvent = 262144 - perEventBytes - - resourceAlreadyExistsCode = "ResourceAlreadyExistsException" - dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" - invalidSequenceTokenCode = "InvalidSequenceTokenException" - resourceNotFoundCode = "ResourceNotFoundException" - - credentialsEndpoint = "http://169.254.170.2" - - userAgentHeader = "User-Agent" -) - -type logStream struct { - logStreamName string - logGroupName string - logCreateGroup bool - logNonBlocking bool - multilinePattern *regexp.Regexp - client api - messages chan *logger.Message - lock sync.RWMutex - closed bool - sequenceToken *string -} - -var _ logger.SizedLogger = &logStream{} - -type api interface { - CreateLogGroup(*cloudwatchlogs.CreateLogGroupInput) (*cloudwatchlogs.CreateLogGroupOutput, error) - CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) - PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) -} - -type regionFinder interface { - Region() (string, error) -} - -type wrappedEvent struct { - inputLogEvent *cloudwatchlogs.InputLogEvent - insertOrder int -} -type byTimestamp []wrappedEvent - -// init registers the awslogs driver -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// eventBatch holds the events that are batched for submission and the -// associated data about it. -// -// Warning: this type is not threadsafe and must not be used -// concurrently. This type is expected to be consumed in a single go -// routine and never concurrently. -type eventBatch struct { - batch []wrappedEvent - bytes int -} - -// New creates an awslogs logger using the configuration passed in on the -// context. Supported context configuration variables are awslogs-region, -// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-multiline-pattern -// and awslogs-datetime-format. When available, configuration is -// also taken from environment variables AWS_REGION, AWS_ACCESS_KEY_ID, -// AWS_SECRET_ACCESS_KEY, the shared credentials file (~/.aws/credentials), and -// the EC2 Instance Metadata Service. -func New(info logger.Info) (logger.Logger, error) { - logGroupName := info.Config[logGroupKey] - logStreamName, err := loggerutils.ParseLogTag(info, "{{.FullID}}") - if err != nil { - return nil, err - } - logCreateGroup := false - if info.Config[logCreateGroupKey] != "" { - logCreateGroup, err = strconv.ParseBool(info.Config[logCreateGroupKey]) - if err != nil { - return nil, err - } - } - - logNonBlocking := info.Config["mode"] == "non-blocking" - - if info.Config[logStreamKey] != "" { - logStreamName = info.Config[logStreamKey] - } - - multilinePattern, err := parseMultilineOptions(info) - if err != nil { - return nil, err - } - - client, err := newAWSLogsClient(info) - if err != nil { - return nil, err - } - - containerStream := &logStream{ - logStreamName: logStreamName, - logGroupName: logGroupName, - logCreateGroup: logCreateGroup, - logNonBlocking: logNonBlocking, - multilinePattern: multilinePattern, - client: client, - messages: make(chan *logger.Message, 4096), - } - - creationDone := make(chan bool) - if logNonBlocking { - go func() { - backoff := 1 - maxBackoff := 32 - for { - // If logger is closed we are done - containerStream.lock.RLock() - if containerStream.closed { - containerStream.lock.RUnlock() - break - } - containerStream.lock.RUnlock() - err := containerStream.create() - if err == nil { - break - } - - time.Sleep(time.Duration(backoff) * time.Second) - if backoff < maxBackoff { - backoff *= 2 - } - logrus. - WithError(err). - WithField("container-id", info.ContainerID). - WithField("container-name", info.ContainerName). - Error("Error while trying to initialize awslogs. Retrying in: ", backoff, " seconds") - } - close(creationDone) - }() - } else { - if err = containerStream.create(); err != nil { - return nil, err - } - close(creationDone) - } - go containerStream.collectBatch(creationDone) - - return containerStream, nil -} - -// Parses awslogs-multiline-pattern and awslogs-datetime-format options -// If awslogs-datetime-format is present, convert the format from strftime -// to regexp and return. -// If awslogs-multiline-pattern is present, compile regexp and return -func parseMultilineOptions(info logger.Info) (*regexp.Regexp, error) { - dateTimeFormat := info.Config[datetimeFormatKey] - multilinePatternKey := info.Config[multilinePatternKey] - // strftime input is parsed into a regular expression - if dateTimeFormat != "" { - // %. matches each strftime format sequence and ReplaceAllStringFunc - // looks up each format sequence in the conversion table strftimeToRegex - // to replace with a defined regular expression - r := regexp.MustCompile("%.") - multilinePatternKey = r.ReplaceAllStringFunc(dateTimeFormat, func(s string) string { - return strftimeToRegex[s] - }) - } - if multilinePatternKey != "" { - multilinePattern, err := regexp.Compile(multilinePatternKey) - if err != nil { - return nil, errors.Wrapf(err, "awslogs could not parse multiline pattern key %q", multilinePatternKey) - } - return multilinePattern, nil - } - return nil, nil -} - -// Maps strftime format strings to regex -var strftimeToRegex = map[string]string{ - /*weekdayShort */ `%a`: `(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)`, - /*weekdayFull */ `%A`: `(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)`, - /*weekdayZeroIndex */ `%w`: `[0-6]`, - /*dayZeroPadded */ `%d`: `(?:0[1-9]|[1,2][0-9]|3[0,1])`, - /*monthShort */ `%b`: `(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)`, - /*monthFull */ `%B`: `(?:January|February|March|April|May|June|July|August|September|October|November|December)`, - /*monthZeroPadded */ `%m`: `(?:0[1-9]|1[0-2])`, - /*yearCentury */ `%Y`: `\d{4}`, - /*yearZeroPadded */ `%y`: `\d{2}`, - /*hour24ZeroPadded */ `%H`: `(?:[0,1][0-9]|2[0-3])`, - /*hour12ZeroPadded */ `%I`: `(?:0[0-9]|1[0-2])`, - /*AM or PM */ `%p`: "[A,P]M", - /*minuteZeroPadded */ `%M`: `[0-5][0-9]`, - /*secondZeroPadded */ `%S`: `[0-5][0-9]`, - /*microsecondZeroPadded */ `%f`: `\d{6}`, - /*utcOffset */ `%z`: `[+-]\d{4}`, - /*tzName */ `%Z`: `[A-Z]{1,4}T`, - /*dayOfYearZeroPadded */ `%j`: `(?:0[0-9][1-9]|[1,2][0-9][0-9]|3[0-5][0-9]|36[0-6])`, - /*milliseconds */ `%L`: `\.\d{3}`, -} - -// newRegionFinder is a variable such that the implementation -// can be swapped out for unit tests. -var newRegionFinder = func() regionFinder { - return ec2metadata.New(session.New()) -} - -// newSDKEndpoint is a variable such that the implementation -// can be swapped out for unit tests. -var newSDKEndpoint = credentialsEndpoint - -// newAWSLogsClient creates the service client for Amazon CloudWatch Logs. -// Customizations to the default client from the SDK include a Docker-specific -// User-Agent string and automatic region detection using the EC2 Instance -// Metadata Service when region is otherwise unspecified. -func newAWSLogsClient(info logger.Info) (api, error) { - var region *string - if os.Getenv(regionEnvKey) != "" { - region = aws.String(os.Getenv(regionEnvKey)) - } - if info.Config[regionKey] != "" { - region = aws.String(info.Config[regionKey]) - } - if region == nil || *region == "" { - logrus.Info("Trying to get region from EC2 Metadata") - ec2MetadataClient := newRegionFinder() - r, err := ec2MetadataClient.Region() - if err != nil { - logrus.WithFields(logrus.Fields{ - "error": err, - }).Error("Could not get region from EC2 metadata, environment, or log option") - return nil, errors.New("Cannot determine region for awslogs driver") - } - region = &r - } - - sess, err := session.NewSession() - if err != nil { - return nil, errors.New("Failed to create a service client session for for awslogs driver") - } - - // attach region to cloudwatchlogs config - sess.Config.Region = region - - if uri, ok := info.Config[credentialsEndpointKey]; ok { - logrus.Debugf("Trying to get credentials from awslogs-credentials-endpoint") - - endpoint := fmt.Sprintf("%s%s", newSDKEndpoint, uri) - creds := endpointcreds.NewCredentialsClient(*sess.Config, sess.Handlers, endpoint, - func(p *endpointcreds.Provider) { - p.ExpiryWindow = 5 * time.Minute - }) - - // attach credentials to cloudwatchlogs config - sess.Config.Credentials = creds - } - - logrus.WithFields(logrus.Fields{ - "region": *region, - }).Debug("Created awslogs client") - - client := cloudwatchlogs.New(sess) - - client.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "DockerUserAgentHandler", - Fn: func(r *request.Request) { - currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) - r.HTTPRequest.Header.Set(userAgentHeader, - fmt.Sprintf("Docker %s (%s) %s", - dockerversion.Version, runtime.GOOS, currentAgent)) - }, - }) - return client, nil -} - -// Name returns the name of the awslogs logging driver -func (l *logStream) Name() string { - return name -} - -func (l *logStream) BufSize() int { - return maximumBytesPerEvent -} - -// Log submits messages for logging by an instance of the awslogs logging driver -func (l *logStream) Log(msg *logger.Message) error { - l.lock.RLock() - defer l.lock.RUnlock() - if l.closed { - return errors.New("awslogs is closed") - } - if l.logNonBlocking { - select { - case l.messages <- msg: - return nil - default: - return errors.New("awslogs buffer is full") - } - } - l.messages <- msg - return nil -} - -// Close closes the instance of the awslogs logging driver -func (l *logStream) Close() error { - l.lock.Lock() - defer l.lock.Unlock() - if !l.closed { - close(l.messages) - } - l.closed = true - return nil -} - -// create creates log group and log stream for the instance of the awslogs logging driver -func (l *logStream) create() error { - if err := l.createLogStream(); err != nil { - if l.logCreateGroup { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == resourceNotFoundCode { - if err := l.createLogGroup(); err != nil { - return err - } - return l.createLogStream() - } - } - if err != nil { - return err - } - } - - return nil -} - -// createLogGroup creates a log group for the instance of the awslogs logging driver -func (l *logStream) createLogGroup() error { - if _, err := l.client.CreateLogGroup(&cloudwatchlogs.CreateLogGroupInput{ - LogGroupName: aws.String(l.logGroupName), - }); err != nil { - if awsErr, ok := err.(awserr.Error); ok { - fields := logrus.Fields{ - "errorCode": awsErr.Code(), - "message": awsErr.Message(), - "origError": awsErr.OrigErr(), - "logGroupName": l.logGroupName, - "logCreateGroup": l.logCreateGroup, - } - if awsErr.Code() == resourceAlreadyExistsCode { - // Allow creation to succeed - logrus.WithFields(fields).Info("Log group already exists") - return nil - } - logrus.WithFields(fields).Error("Failed to create log group") - } - return err - } - return nil -} - -// createLogStream creates a log stream for the instance of the awslogs logging driver -func (l *logStream) createLogStream() error { - input := &cloudwatchlogs.CreateLogStreamInput{ - LogGroupName: aws.String(l.logGroupName), - LogStreamName: aws.String(l.logStreamName), - } - - _, err := l.client.CreateLogStream(input) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - fields := logrus.Fields{ - "errorCode": awsErr.Code(), - "message": awsErr.Message(), - "origError": awsErr.OrigErr(), - "logGroupName": l.logGroupName, - "logStreamName": l.logStreamName, - } - if awsErr.Code() == resourceAlreadyExistsCode { - // Allow creation to succeed - logrus.WithFields(fields).Info("Log stream already exists") - return nil - } - logrus.WithFields(fields).Error("Failed to create log stream") - } - } - return err -} - -// newTicker is used for time-based batching. newTicker is a variable such -// that the implementation can be swapped out for unit tests. -var newTicker = func(freq time.Duration) *time.Ticker { - return time.NewTicker(freq) -} - -// collectBatch executes as a goroutine to perform batching of log events for -// submission to the log stream. If the awslogs-multiline-pattern or -// awslogs-datetime-format options have been configured, multiline processing -// is enabled, where log messages are stored in an event buffer until a multiline -// pattern match is found, at which point the messages in the event buffer are -// pushed to CloudWatch logs as a single log event. Multiline messages are processed -// according to the maximumBytesPerPut constraint, and the implementation only -// allows for messages to be buffered for a maximum of 2*batchPublishFrequency -// seconds. When events are ready to be processed for submission to CloudWatch -// Logs, the processEvents method is called. If a multiline pattern is not -// configured, log events are submitted to the processEvents method immediately. -func (l *logStream) collectBatch(created chan bool) { - // Wait for the logstream/group to be created - <-created - ticker := newTicker(batchPublishFrequency) - var eventBuffer []byte - var eventBufferTimestamp int64 - var batch = newEventBatch() - for { - select { - case t := <-ticker.C: - // If event buffer is older than batch publish frequency flush the event buffer - if eventBufferTimestamp > 0 && len(eventBuffer) > 0 { - eventBufferAge := t.UnixNano()/int64(time.Millisecond) - eventBufferTimestamp - eventBufferExpired := eventBufferAge >= int64(batchPublishFrequency)/int64(time.Millisecond) - eventBufferNegative := eventBufferAge < 0 - if eventBufferExpired || eventBufferNegative { - l.processEvent(batch, eventBuffer, eventBufferTimestamp) - eventBuffer = eventBuffer[:0] - } - } - l.publishBatch(batch) - batch.reset() - case msg, more := <-l.messages: - if !more { - // Flush event buffer and release resources - l.processEvent(batch, eventBuffer, eventBufferTimestamp) - eventBuffer = eventBuffer[:0] - l.publishBatch(batch) - batch.reset() - return - } - if eventBufferTimestamp == 0 { - eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) - } - line := msg.Line - if l.multilinePattern != nil { - if l.multilinePattern.Match(line) || len(eventBuffer)+len(line) > maximumBytesPerEvent { - // This is a new log event or we will exceed max bytes per event - // so flush the current eventBuffer to events and reset timestamp - l.processEvent(batch, eventBuffer, eventBufferTimestamp) - eventBufferTimestamp = msg.Timestamp.UnixNano() / int64(time.Millisecond) - eventBuffer = eventBuffer[:0] - } - // Append new line if event is less than max event size - if len(line) < maximumBytesPerEvent { - line = append(line, "\n"...) - } - eventBuffer = append(eventBuffer, line...) - logger.PutMessage(msg) - } else { - l.processEvent(batch, line, msg.Timestamp.UnixNano()/int64(time.Millisecond)) - logger.PutMessage(msg) - } - } - } -} - -// processEvent processes log events that are ready for submission to CloudWatch -// logs. Batching is performed on time- and size-bases. Time-based batching -// occurs at a 5 second interval (defined in the batchPublishFrequency const). -// Size-based batching is performed on the maximum number of events per batch -// (defined in maximumLogEventsPerPut) and the maximum number of total bytes in a -// batch (defined in maximumBytesPerPut). Log messages are split by the maximum -// bytes per event (defined in maximumBytesPerEvent). There is a fixed per-event -// byte overhead (defined in perEventBytes) which is accounted for in split- and -// batch-calculations. -func (l *logStream) processEvent(batch *eventBatch, events []byte, timestamp int64) { - for len(events) > 0 { - // Split line length so it does not exceed the maximum - lineBytes := len(events) - if lineBytes > maximumBytesPerEvent { - lineBytes = maximumBytesPerEvent - } - line := events[:lineBytes] - - event := wrappedEvent{ - inputLogEvent: &cloudwatchlogs.InputLogEvent{ - Message: aws.String(string(line)), - Timestamp: aws.Int64(timestamp), - }, - insertOrder: batch.count(), - } - - added := batch.add(event, lineBytes) - if added { - events = events[lineBytes:] - } else { - l.publishBatch(batch) - batch.reset() - } - } -} - -// publishBatch calls PutLogEvents for a given set of InputLogEvents, -// accounting for sequencing requirements (each request must reference the -// sequence token returned by the previous request). -func (l *logStream) publishBatch(batch *eventBatch) { - if batch.isEmpty() { - return - } - cwEvents := unwrapEvents(batch.events()) - - nextSequenceToken, err := l.putLogEvents(cwEvents, l.sequenceToken) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == dataAlreadyAcceptedCode { - // already submitted, just grab the correct sequence token - parts := strings.Split(awsErr.Message(), " ") - nextSequenceToken = &parts[len(parts)-1] - logrus.WithFields(logrus.Fields{ - "errorCode": awsErr.Code(), - "message": awsErr.Message(), - "logGroupName": l.logGroupName, - "logStreamName": l.logStreamName, - }).Info("Data already accepted, ignoring error") - err = nil - } else if awsErr.Code() == invalidSequenceTokenCode { - // sequence code is bad, grab the correct one and retry - parts := strings.Split(awsErr.Message(), " ") - token := parts[len(parts)-1] - nextSequenceToken, err = l.putLogEvents(cwEvents, &token) - } - } - } - if err != nil { - logrus.Error(err) - } else { - l.sequenceToken = nextSequenceToken - } -} - -// putLogEvents wraps the PutLogEvents API -func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { - input := &cloudwatchlogs.PutLogEventsInput{ - LogEvents: events, - SequenceToken: sequenceToken, - LogGroupName: aws.String(l.logGroupName), - LogStreamName: aws.String(l.logStreamName), - } - resp, err := l.client.PutLogEvents(input) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - logrus.WithFields(logrus.Fields{ - "errorCode": awsErr.Code(), - "message": awsErr.Message(), - "origError": awsErr.OrigErr(), - "logGroupName": l.logGroupName, - "logStreamName": l.logStreamName, - }).Error("Failed to put log events") - } - return nil, err - } - return resp.NextSequenceToken, nil -} - -// ValidateLogOpt looks for awslogs-specific log options awslogs-region, -// awslogs-group, awslogs-stream, awslogs-create-group, awslogs-datetime-format, -// awslogs-multiline-pattern -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case logGroupKey: - case logStreamKey: - case logCreateGroupKey: - case regionKey: - case tagKey: - case datetimeFormatKey: - case multilinePatternKey: - case credentialsEndpointKey: - default: - return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) - } - } - if cfg[logGroupKey] == "" { - return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) - } - if cfg[logCreateGroupKey] != "" { - if _, err := strconv.ParseBool(cfg[logCreateGroupKey]); err != nil { - return fmt.Errorf("must specify valid value for log opt '%s': %v", logCreateGroupKey, err) - } - } - _, datetimeFormatKeyExists := cfg[datetimeFormatKey] - _, multilinePatternKeyExists := cfg[multilinePatternKey] - if datetimeFormatKeyExists && multilinePatternKeyExists { - return fmt.Errorf("you cannot configure log opt '%s' and '%s' at the same time", datetimeFormatKey, multilinePatternKey) - } - return nil -} - -// Len returns the length of a byTimestamp slice. Len is required by the -// sort.Interface interface. -func (slice byTimestamp) Len() int { - return len(slice) -} - -// Less compares two values in a byTimestamp slice by Timestamp. Less is -// required by the sort.Interface interface. -func (slice byTimestamp) Less(i, j int) bool { - iTimestamp, jTimestamp := int64(0), int64(0) - if slice != nil && slice[i].inputLogEvent.Timestamp != nil { - iTimestamp = *slice[i].inputLogEvent.Timestamp - } - if slice != nil && slice[j].inputLogEvent.Timestamp != nil { - jTimestamp = *slice[j].inputLogEvent.Timestamp - } - if iTimestamp == jTimestamp { - return slice[i].insertOrder < slice[j].insertOrder - } - return iTimestamp < jTimestamp -} - -// Swap swaps two values in a byTimestamp slice with each other. Swap is -// required by the sort.Interface interface. -func (slice byTimestamp) Swap(i, j int) { - slice[i], slice[j] = slice[j], slice[i] -} - -func unwrapEvents(events []wrappedEvent) []*cloudwatchlogs.InputLogEvent { - cwEvents := make([]*cloudwatchlogs.InputLogEvent, len(events)) - for i, input := range events { - cwEvents[i] = input.inputLogEvent - } - return cwEvents -} - -func newEventBatch() *eventBatch { - return &eventBatch{ - batch: make([]wrappedEvent, 0), - bytes: 0, - } -} - -// events returns a slice of wrappedEvents sorted in order of their -// timestamps and then by their insertion order (see `byTimestamp`). -// -// Warning: this method is not threadsafe and must not be used -// concurrently. -func (b *eventBatch) events() []wrappedEvent { - sort.Sort(byTimestamp(b.batch)) - return b.batch -} - -// add adds an event to the batch of events accounting for the -// necessary overhead for an event to be logged. An error will be -// returned if the event cannot be added to the batch due to service -// limits. -// -// Warning: this method is not threadsafe and must not be used -// concurrently. -func (b *eventBatch) add(event wrappedEvent, size int) bool { - addBytes := size + perEventBytes - - // verify we are still within service limits - switch { - case len(b.batch)+1 > maximumLogEventsPerPut: - return false - case b.bytes+addBytes > maximumBytesPerPut: - return false - } - - b.bytes += addBytes - b.batch = append(b.batch, event) - - return true -} - -// count is the number of batched events. Warning: this method -// is not threadsafe and must not be used concurrently. -func (b *eventBatch) count() int { - return len(b.batch) -} - -// size is the total number of bytes that the batch represents. -// -// Warning: this method is not threadsafe and must not be used -// concurrently. -func (b *eventBatch) size() int { - return b.bytes -} - -func (b *eventBatch) isEmpty() bool { - zeroEvents := b.count() == 0 - zeroSize := b.size() == 0 - return zeroEvents && zeroSize -} - -// reset prepares the batch for reuse. -func (b *eventBatch) reset() { - b.bytes = 0 - b.batch = b.batch[:0] -} diff --git a/vendor/github.com/docker/docker/daemon/logger/copier.go b/vendor/github.com/docker/docker/daemon/logger/copier.go deleted file mode 100644 index e24272fa6..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/copier.go +++ /dev/null @@ -1,186 +0,0 @@ -package logger // import "github.com/docker/docker/daemon/logger" - -import ( - "bytes" - "io" - "sync" - "time" - - types "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/pkg/stringid" - "github.com/sirupsen/logrus" -) - -const ( - // readSize is the maximum bytes read during a single read - // operation. - readSize = 2 * 1024 - - // defaultBufSize provides a reasonable default for loggers that do - // not have an external limit to impose on log line size. - defaultBufSize = 16 * 1024 -) - -// Copier can copy logs from specified sources to Logger and attach Timestamp. -// Writes are concurrent, so you need implement some sync in your logger. -type Copier struct { - // srcs is map of name -> reader pairs, for example "stdout", "stderr" - srcs map[string]io.Reader - dst Logger - copyJobs sync.WaitGroup - closeOnce sync.Once - closed chan struct{} -} - -// NewCopier creates a new Copier -func NewCopier(srcs map[string]io.Reader, dst Logger) *Copier { - return &Copier{ - srcs: srcs, - dst: dst, - closed: make(chan struct{}), - } -} - -// Run starts logs copying -func (c *Copier) Run() { - for src, w := range c.srcs { - c.copyJobs.Add(1) - go c.copySrc(src, w) - } -} - -func (c *Copier) copySrc(name string, src io.Reader) { - defer c.copyJobs.Done() - - bufSize := defaultBufSize - if sizedLogger, ok := c.dst.(SizedLogger); ok { - bufSize = sizedLogger.BufSize() - } - buf := make([]byte, bufSize) - - n := 0 - eof := false - var partialid string - var partialTS time.Time - var ordinal int - firstPartial := true - hasMorePartial := false - - for { - select { - case <-c.closed: - return - default: - // Work out how much more data we are okay with reading this time. - upto := n + readSize - if upto > cap(buf) { - upto = cap(buf) - } - // Try to read that data. - if upto > n { - read, err := src.Read(buf[n:upto]) - if err != nil { - if err != io.EOF { - logReadsFailedCount.Inc(1) - logrus.Errorf("Error scanning log stream: %s", err) - return - } - eof = true - } - n += read - } - // If we have no data to log, and there's no more coming, we're done. - if n == 0 && eof { - return - } - // Break up the data that we've buffered up into lines, and log each in turn. - p := 0 - - for q := bytes.IndexByte(buf[p:n], '\n'); q >= 0; q = bytes.IndexByte(buf[p:n], '\n') { - select { - case <-c.closed: - return - default: - msg := NewMessage() - msg.Source = name - msg.Line = append(msg.Line, buf[p:p+q]...) - - if hasMorePartial { - msg.PLogMetaData = &types.PartialLogMetaData{ID: partialid, Ordinal: ordinal, Last: true} - - // reset - partialid = "" - ordinal = 0 - firstPartial = true - hasMorePartial = false - } - if msg.PLogMetaData == nil { - msg.Timestamp = time.Now().UTC() - } else { - msg.Timestamp = partialTS - } - - if logErr := c.dst.Log(msg); logErr != nil { - logWritesFailedCount.Inc(1) - logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) - } - } - p += q + 1 - } - // If there's no more coming, or the buffer is full but - // has no newlines, log whatever we haven't logged yet, - // noting that it's a partial log line. - if eof || (p == 0 && n == len(buf)) { - if p < n { - msg := NewMessage() - msg.Source = name - msg.Line = append(msg.Line, buf[p:n]...) - - // Generate unique partialID for first partial. Use it across partials. - // Record timestamp for first partial. Use it across partials. - // Initialize Ordinal for first partial. Increment it across partials. - if firstPartial { - msg.Timestamp = time.Now().UTC() - partialTS = msg.Timestamp - partialid = stringid.GenerateRandomID() - ordinal = 1 - firstPartial = false - totalPartialLogs.Inc(1) - } else { - msg.Timestamp = partialTS - } - msg.PLogMetaData = &types.PartialLogMetaData{ID: partialid, Ordinal: ordinal, Last: false} - ordinal++ - hasMorePartial = true - - if logErr := c.dst.Log(msg); logErr != nil { - logWritesFailedCount.Inc(1) - logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) - } - p = 0 - n = 0 - } - if eof { - return - } - } - // Move any unlogged data to the front of the buffer in preparation for another read. - if p > 0 { - copy(buf[0:], buf[p:n]) - n -= p - } - } - } -} - -// Wait waits until all copying is done -func (c *Copier) Wait() { - c.copyJobs.Wait() -} - -// Close closes the copier -func (c *Copier) Close() { - c.closeOnce.Do(func() { - close(c.closed) - }) -} diff --git a/vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go b/vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go deleted file mode 100644 index 78d3477b6..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/etwlogs/etwlogs_windows.go +++ /dev/null @@ -1,168 +0,0 @@ -// Package etwlogs provides a log driver for forwarding container logs -// as ETW events.(ETW stands for Event Tracing for Windows) -// A client can then create an ETW listener to listen for events that are sent -// by the ETW provider that we register, using the provider's GUID "a3693192-9ed6-46d2-a981-f8226c8363bd". -// Here is an example of how to do this using the logman utility: -// 1. logman start -ets DockerContainerLogs -p {a3693192-9ed6-46d2-a981-f8226c8363bd} 0 0 -o trace.etl -// 2. Run container(s) and generate log messages -// 3. logman stop -ets DockerContainerLogs -// 4. You can then convert the etl log file to XML using: tracerpt -y trace.etl -// -// Each container log message generates an ETW event that also contains: -// the container name and ID, the timestamp, and the stream type. -package etwlogs // import "github.com/docker/docker/daemon/logger/etwlogs" - -import ( - "errors" - "fmt" - "sync" - "unsafe" - - "github.com/docker/docker/daemon/logger" - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -type etwLogs struct { - containerName string - imageName string - containerID string - imageID string -} - -const ( - name = "etwlogs" - win32CallSuccess = 0 -) - -var ( - modAdvapi32 = windows.NewLazySystemDLL("Advapi32.dll") - procEventRegister = modAdvapi32.NewProc("EventRegister") - procEventWriteString = modAdvapi32.NewProc("EventWriteString") - procEventUnregister = modAdvapi32.NewProc("EventUnregister") -) -var providerHandle windows.Handle -var refCount int -var mu sync.Mutex - -func init() { - providerHandle = windows.InvalidHandle - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } -} - -// New creates a new etwLogs logger for the given container and registers the EWT provider. -func New(info logger.Info) (logger.Logger, error) { - if err := registerETWProvider(); err != nil { - return nil, err - } - logrus.Debugf("logging driver etwLogs configured for container: %s.", info.ContainerID) - - return &etwLogs{ - containerName: info.Name(), - imageName: info.ContainerImageName, - containerID: info.ContainerID, - imageID: info.ContainerImageID, - }, nil -} - -// Log logs the message to the ETW stream. -func (etwLogger *etwLogs) Log(msg *logger.Message) error { - if providerHandle == windows.InvalidHandle { - // This should never be hit, if it is, it indicates a programming error. - errorMessage := "ETWLogs cannot log the message, because the event provider has not been registered." - logrus.Error(errorMessage) - return errors.New(errorMessage) - } - m := createLogMessage(etwLogger, msg) - logger.PutMessage(msg) - return callEventWriteString(m) -} - -// Close closes the logger by unregistering the ETW provider. -func (etwLogger *etwLogs) Close() error { - unregisterETWProvider() - return nil -} - -func (etwLogger *etwLogs) Name() string { - return name -} - -func createLogMessage(etwLogger *etwLogs, msg *logger.Message) string { - return fmt.Sprintf("container_name: %s, image_name: %s, container_id: %s, image_id: %s, source: %s, log: %s", - etwLogger.containerName, - etwLogger.imageName, - etwLogger.containerID, - etwLogger.imageID, - msg.Source, - msg.Line) -} - -func registerETWProvider() error { - mu.Lock() - defer mu.Unlock() - if refCount == 0 { - var err error - if err = callEventRegister(); err != nil { - return err - } - } - - refCount++ - return nil -} - -func unregisterETWProvider() { - mu.Lock() - defer mu.Unlock() - if refCount == 1 { - if callEventUnregister() { - refCount-- - providerHandle = windows.InvalidHandle - } - // Not returning an error if EventUnregister fails, because etwLogs will continue to work - } else { - refCount-- - } -} - -func callEventRegister() error { - // The provider's GUID is {a3693192-9ed6-46d2-a981-f8226c8363bd} - guid := windows.GUID{ - Data1: 0xa3693192, - Data2: 0x9ed6, - Data3: 0x46d2, - Data4: [8]byte{0xa9, 0x81, 0xf8, 0x22, 0x6c, 0x83, 0x63, 0xbd}, - } - - ret, _, _ := procEventRegister.Call(uintptr(unsafe.Pointer(&guid)), 0, 0, uintptr(unsafe.Pointer(&providerHandle))) - if ret != win32CallSuccess { - errorMessage := fmt.Sprintf("Failed to register ETW provider. Error: %d", ret) - logrus.Error(errorMessage) - return errors.New(errorMessage) - } - return nil -} - -func callEventWriteString(message string) error { - utf16message, err := windows.UTF16FromString(message) - - if err != nil { - return err - } - - ret, _, _ := procEventWriteString.Call(uintptr(providerHandle), 0, 0, uintptr(unsafe.Pointer(&utf16message[0]))) - if ret != win32CallSuccess { - errorMessage := fmt.Sprintf("ETWLogs provider failed to log message. Error: %d", ret) - logrus.Error(errorMessage) - return errors.New(errorMessage) - } - return nil -} - -func callEventUnregister() bool { - ret, _, _ := procEventUnregister.Call(uintptr(providerHandle)) - return ret == win32CallSuccess -} diff --git a/vendor/github.com/docker/docker/daemon/logger/factory.go b/vendor/github.com/docker/docker/daemon/logger/factory.go deleted file mode 100644 index 84b54b279..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/factory.go +++ /dev/null @@ -1,162 +0,0 @@ -package logger // import "github.com/docker/docker/daemon/logger" - -import ( - "fmt" - "sort" - "sync" - - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/go-units" - "github.com/pkg/errors" -) - -// Creator builds a logging driver instance with given context. -type Creator func(Info) (Logger, error) - -// LogOptValidator checks the options specific to the underlying -// logging implementation. -type LogOptValidator func(cfg map[string]string) error - -type logdriverFactory struct { - registry map[string]Creator - optValidator map[string]LogOptValidator - m sync.Mutex -} - -func (lf *logdriverFactory) list() []string { - ls := make([]string, 0, len(lf.registry)) - lf.m.Lock() - for name := range lf.registry { - ls = append(ls, name) - } - lf.m.Unlock() - sort.Strings(ls) - return ls -} - -// ListDrivers gets the list of registered log driver names -func ListDrivers() []string { - return factory.list() -} - -func (lf *logdriverFactory) register(name string, c Creator) error { - if lf.driverRegistered(name) { - return fmt.Errorf("logger: log driver named '%s' is already registered", name) - } - - lf.m.Lock() - lf.registry[name] = c - lf.m.Unlock() - return nil -} - -func (lf *logdriverFactory) driverRegistered(name string) bool { - lf.m.Lock() - _, ok := lf.registry[name] - lf.m.Unlock() - if !ok { - if pluginGetter != nil { // this can be nil when the init functions are running - if l, _ := getPlugin(name, plugingetter.Lookup); l != nil { - return true - } - } - } - return ok -} - -func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { - lf.m.Lock() - defer lf.m.Unlock() - - if _, ok := lf.optValidator[name]; ok { - return fmt.Errorf("logger: log validator named '%s' is already registered", name) - } - lf.optValidator[name] = l - return nil -} - -func (lf *logdriverFactory) get(name string) (Creator, error) { - lf.m.Lock() - defer lf.m.Unlock() - - c, ok := lf.registry[name] - if ok { - return c, nil - } - - c, err := getPlugin(name, plugingetter.Acquire) - return c, errors.Wrapf(err, "logger: no log driver named '%s' is registered", name) -} - -func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { - lf.m.Lock() - defer lf.m.Unlock() - - c := lf.optValidator[name] - return c -} - -var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance - -// RegisterLogDriver registers the given logging driver builder with given logging -// driver name. -func RegisterLogDriver(name string, c Creator) error { - return factory.register(name, c) -} - -// RegisterLogOptValidator registers the logging option validator with -// the given logging driver name. -func RegisterLogOptValidator(name string, l LogOptValidator) error { - return factory.registerLogOptValidator(name, l) -} - -// GetLogDriver provides the logging driver builder for a logging driver name. -func GetLogDriver(name string) (Creator, error) { - return factory.get(name) -} - -var builtInLogOpts = map[string]bool{ - "mode": true, - "max-buffer-size": true, -} - -// ValidateLogOpts checks the options for the given log driver. The -// options supported are specific to the LogDriver implementation. -func ValidateLogOpts(name string, cfg map[string]string) error { - if name == "none" { - return nil - } - - switch containertypes.LogMode(cfg["mode"]) { - case containertypes.LogModeBlocking, containertypes.LogModeNonBlock, containertypes.LogModeUnset: - default: - return fmt.Errorf("logger: logging mode not supported: %s", cfg["mode"]) - } - - if s, ok := cfg["max-buffer-size"]; ok { - if containertypes.LogMode(cfg["mode"]) != containertypes.LogModeNonBlock { - return fmt.Errorf("logger: max-buffer-size option is only supported with 'mode=%s'", containertypes.LogModeNonBlock) - } - if _, err := units.RAMInBytes(s); err != nil { - return errors.Wrap(err, "error parsing option max-buffer-size") - } - } - - if !factory.driverRegistered(name) { - return fmt.Errorf("logger: no log driver named '%s' is registered", name) - } - - filteredOpts := make(map[string]string, len(builtInLogOpts)) - for k, v := range cfg { - if !builtInLogOpts[k] { - filteredOpts[k] = v - } - } - - validator := factory.getLogOptValidator(name) - if validator != nil { - return validator(filteredOpts) - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go b/vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go deleted file mode 100644 index 907261f41..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/fluentd/fluentd.go +++ /dev/null @@ -1,263 +0,0 @@ -// Package fluentd provides the log driver for forwarding server logs -// to fluentd endpoints. -package fluentd // import "github.com/docker/docker/daemon/logger/fluentd" - -import ( - "fmt" - "math" - "net" - "net/url" - "strconv" - "strings" - "time" - - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/docker/pkg/urlutil" - "github.com/docker/go-units" - "github.com/fluent/fluent-logger-golang/fluent" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type fluentd struct { - tag string - containerID string - containerName string - writer *fluent.Fluent - extra map[string]string -} - -type location struct { - protocol string - host string - port int - path string -} - -const ( - name = "fluentd" - - defaultProtocol = "tcp" - defaultHost = "127.0.0.1" - defaultPort = 24224 - defaultBufferLimit = 1024 * 1024 - - // logger tries to reconnect 2**32 - 1 times - // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] - defaultRetryWait = 1000 - defaultMaxRetries = math.MaxInt32 - - addressKey = "fluentd-address" - bufferLimitKey = "fluentd-buffer-limit" - retryWaitKey = "fluentd-retry-wait" - maxRetriesKey = "fluentd-max-retries" - asyncConnectKey = "fluentd-async-connect" - subSecondPrecisionKey = "fluentd-sub-second-precision" -) - -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates a fluentd logger using the configuration passed in on -// the context. The supported context configuration variable is -// fluentd-address. -func New(info logger.Info) (logger.Logger, error) { - loc, err := parseAddress(info.Config[addressKey]) - if err != nil { - return nil, err - } - - tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) - if err != nil { - return nil, err - } - - extra, err := info.ExtraAttributes(nil) - if err != nil { - return nil, err - } - - bufferLimit := defaultBufferLimit - if info.Config[bufferLimitKey] != "" { - bl64, err := units.RAMInBytes(info.Config[bufferLimitKey]) - if err != nil { - return nil, err - } - bufferLimit = int(bl64) - } - - retryWait := defaultRetryWait - if info.Config[retryWaitKey] != "" { - rwd, err := time.ParseDuration(info.Config[retryWaitKey]) - if err != nil { - return nil, err - } - retryWait = int(rwd.Seconds() * 1000) - } - - maxRetries := defaultMaxRetries - if info.Config[maxRetriesKey] != "" { - mr64, err := strconv.ParseUint(info.Config[maxRetriesKey], 10, strconv.IntSize) - if err != nil { - return nil, err - } - maxRetries = int(mr64) - } - - asyncConnect := false - if info.Config[asyncConnectKey] != "" { - if asyncConnect, err = strconv.ParseBool(info.Config[asyncConnectKey]); err != nil { - return nil, err - } - } - - subSecondPrecision := false - if info.Config[subSecondPrecisionKey] != "" { - if subSecondPrecision, err = strconv.ParseBool(info.Config[subSecondPrecisionKey]); err != nil { - return nil, err - } - } - - fluentConfig := fluent.Config{ - FluentPort: loc.port, - FluentHost: loc.host, - FluentNetwork: loc.protocol, - FluentSocketPath: loc.path, - BufferLimit: bufferLimit, - RetryWait: retryWait, - MaxRetry: maxRetries, - AsyncConnect: asyncConnect, - SubSecondPrecision: subSecondPrecision, - } - - logrus.WithField("container", info.ContainerID).WithField("config", fluentConfig). - Debug("logging driver fluentd configured") - - log, err := fluent.New(fluentConfig) - if err != nil { - return nil, err - } - return &fluentd{ - tag: tag, - containerID: info.ContainerID, - containerName: info.ContainerName, - writer: log, - extra: extra, - }, nil -} - -func (f *fluentd) Log(msg *logger.Message) error { - data := map[string]string{ - "container_id": f.containerID, - "container_name": f.containerName, - "source": msg.Source, - "log": string(msg.Line), - } - for k, v := range f.extra { - data[k] = v - } - if msg.PLogMetaData != nil { - data["partial_message"] = "true" - } - - ts := msg.Timestamp - logger.PutMessage(msg) - // fluent-logger-golang buffers logs from failures and disconnections, - // and these are transferred again automatically. - return f.writer.PostWithTime(f.tag, ts, data) -} - -func (f *fluentd) Close() error { - return f.writer.Close() -} - -func (f *fluentd) Name() string { - return name -} - -// ValidateLogOpt looks for fluentd specific log option fluentd-address. -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case "env": - case "env-regex": - case "labels": - case "tag": - case addressKey: - case bufferLimitKey: - case retryWaitKey: - case maxRetriesKey: - case asyncConnectKey: - case subSecondPrecisionKey: - // Accepted - default: - return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key) - } - } - - _, err := parseAddress(cfg[addressKey]) - return err -} - -func parseAddress(address string) (*location, error) { - if address == "" { - return &location{ - protocol: defaultProtocol, - host: defaultHost, - port: defaultPort, - path: "", - }, nil - } - - protocol := defaultProtocol - givenAddress := address - if urlutil.IsTransportURL(address) { - url, err := url.Parse(address) - if err != nil { - return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) - } - // unix and unixgram socket - if url.Scheme == "unix" || url.Scheme == "unixgram" { - return &location{ - protocol: url.Scheme, - host: "", - port: 0, - path: url.Path, - }, nil - } - // tcp|udp - protocol = url.Scheme - address = url.Host - } - - host, port, err := net.SplitHostPort(address) - if err != nil { - if !strings.Contains(err.Error(), "missing port in address") { - return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) - } - return &location{ - protocol: protocol, - host: host, - port: defaultPort, - path: "", - }, nil - } - - portnum, err := strconv.Atoi(port) - if err != nil { - return nil, errors.Wrapf(err, "invalid fluentd-address %s", givenAddress) - } - return &location{ - protocol: protocol, - host: host, - port: portnum, - path: "", - }, nil -} diff --git a/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go b/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go deleted file mode 100644 index 1699f67a2..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging.go +++ /dev/null @@ -1,244 +0,0 @@ -package gcplogs // import "github.com/docker/docker/daemon/logger/gcplogs" - -import ( - "context" - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/docker/docker/daemon/logger" - - "cloud.google.com/go/compute/metadata" - "cloud.google.com/go/logging" - "github.com/sirupsen/logrus" - mrpb "google.golang.org/genproto/googleapis/api/monitoredres" -) - -const ( - name = "gcplogs" - - projectOptKey = "gcp-project" - logLabelsKey = "labels" - logEnvKey = "env" - logEnvRegexKey = "env-regex" - logCmdKey = "gcp-log-cmd" - logZoneKey = "gcp-meta-zone" - logNameKey = "gcp-meta-name" - logIDKey = "gcp-meta-id" -) - -var ( - // The number of logs the gcplogs driver has dropped. - droppedLogs uint64 - - onGCE bool - - // instance metadata populated from the metadata server if available - projectID string - zone string - instanceName string - instanceID string -) - -func init() { - - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - - if err := logger.RegisterLogOptValidator(name, ValidateLogOpts); err != nil { - logrus.Fatal(err) - } -} - -type gcplogs struct { - logger *logging.Logger - instance *instanceInfo - container *containerInfo -} - -type dockerLogEntry struct { - Instance *instanceInfo `json:"instance,omitempty"` - Container *containerInfo `json:"container,omitempty"` - Message string `json:"message,omitempty"` -} - -type instanceInfo struct { - Zone string `json:"zone,omitempty"` - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` -} - -type containerInfo struct { - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - ImageName string `json:"imageName,omitempty"` - ImageID string `json:"imageId,omitempty"` - Created time.Time `json:"created,omitempty"` - Command string `json:"command,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` -} - -var initGCPOnce sync.Once - -func initGCP() { - initGCPOnce.Do(func() { - onGCE = metadata.OnGCE() - if onGCE { - // These will fail on instances if the metadata service is - // down or the client is compiled with an API version that - // has been removed. Since these are not vital, let's ignore - // them and make their fields in the dockerLogEntry ,omitempty - projectID, _ = metadata.ProjectID() - zone, _ = metadata.Zone() - instanceName, _ = metadata.InstanceName() - instanceID, _ = metadata.InstanceID() - } - }) -} - -// New creates a new logger that logs to Google Cloud Logging using the application -// default credentials. -// -// See https://developers.google.com/identity/protocols/application-default-credentials -func New(info logger.Info) (logger.Logger, error) { - initGCP() - - var project string - if projectID != "" { - project = projectID - } - if projectID, found := info.Config[projectOptKey]; found { - project = projectID - } - if project == "" { - return nil, fmt.Errorf("No project was specified and couldn't read project from the metadata server. Please specify a project") - } - - // Issue #29344: gcplogs segfaults (static binary) - // If HOME is not set, logging.NewClient() will call os/user.Current() via oauth2/google. - // However, in static binary, os/user.Current() leads to segfault due to a glibc issue that won't be fixed - // in a short term. (golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) - // So we forcibly set HOME so as to avoid call to os/user/Current() - if err := ensureHomeIfIAmStatic(); err != nil { - return nil, err - } - - c, err := logging.NewClient(context.Background(), project) - if err != nil { - return nil, err - } - var instanceResource *instanceInfo - if onGCE { - instanceResource = &instanceInfo{ - Zone: zone, - Name: instanceName, - ID: instanceID, - } - } else if info.Config[logZoneKey] != "" || info.Config[logNameKey] != "" || info.Config[logIDKey] != "" { - instanceResource = &instanceInfo{ - Zone: info.Config[logZoneKey], - Name: info.Config[logNameKey], - ID: info.Config[logIDKey], - } - } - - options := []logging.LoggerOption{} - if instanceResource != nil { - vmMrpb := logging.CommonResource( - &mrpb.MonitoredResource{ - Type: "gce_instance", - Labels: map[string]string{ - "instance_id": instanceResource.ID, - "zone": instanceResource.Zone, - }, - }, - ) - options = []logging.LoggerOption{vmMrpb} - } - lg := c.Logger("gcplogs-docker-driver", options...) - - if err := c.Ping(context.Background()); err != nil { - return nil, fmt.Errorf("unable to connect or authenticate with Google Cloud Logging: %v", err) - } - - extraAttributes, err := info.ExtraAttributes(nil) - if err != nil { - return nil, err - } - - l := &gcplogs{ - logger: lg, - container: &containerInfo{ - Name: info.ContainerName, - ID: info.ContainerID, - ImageName: info.ContainerImageName, - ImageID: info.ContainerImageID, - Created: info.ContainerCreated, - Metadata: extraAttributes, - }, - } - - if info.Config[logCmdKey] == "true" { - l.container.Command = info.Command() - } - - if instanceResource != nil { - l.instance = instanceResource - } - - // The logger "overflows" at a rate of 10,000 logs per second and this - // overflow func is called. We want to surface the error to the user - // without overly spamming /var/log/docker.log so we log the first time - // we overflow and every 1000th time after. - c.OnError = func(err error) { - if err == logging.ErrOverflow { - if i := atomic.AddUint64(&droppedLogs, 1); i%1000 == 1 { - logrus.Errorf("gcplogs driver has dropped %v logs", i) - } - } else { - logrus.Error(err) - } - } - - return l, nil -} - -// ValidateLogOpts validates the opts passed to the gcplogs driver. Currently, the gcplogs -// driver doesn't take any arguments. -func ValidateLogOpts(cfg map[string]string) error { - for k := range cfg { - switch k { - case projectOptKey, logLabelsKey, logEnvKey, logEnvRegexKey, logCmdKey, logZoneKey, logNameKey, logIDKey: - default: - return fmt.Errorf("%q is not a valid option for the gcplogs driver", k) - } - } - return nil -} - -func (l *gcplogs) Log(m *logger.Message) error { - message := string(m.Line) - ts := m.Timestamp - logger.PutMessage(m) - - l.logger.Log(logging.Entry{ - Timestamp: ts, - Payload: &dockerLogEntry{ - Instance: l.instance, - Container: l.container, - Message: message, - }, - }) - return nil -} - -func (l *gcplogs) Close() error { - l.logger.Flush() - return nil -} - -func (l *gcplogs) Name() string { - return name -} diff --git a/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_linux.go b/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_linux.go deleted file mode 100644 index 27f8ef32f..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -package gcplogs // import "github.com/docker/docker/daemon/logger/gcplogs" - -import ( - "os" - - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/homedir" - "github.com/sirupsen/logrus" -) - -// ensureHomeIfIAmStatic ensure $HOME to be set if dockerversion.IAmStatic is "true". -// See issue #29344: gcplogs segfaults (static binary) -// If HOME is not set, logging.NewClient() will call os/user.Current() via oauth2/google. -// However, in static binary, os/user.Current() leads to segfault due to a glibc issue that won't be fixed -// in a short term. (golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) -// So we forcibly set HOME so as to avoid call to os/user/Current() -func ensureHomeIfIAmStatic() error { - // Note: dockerversion.IAmStatic and homedir.GetStatic() is only available for linux. - // So we need to use them in this gcplogging_linux.go rather than in gcplogging.go - if dockerversion.IAmStatic == "true" && os.Getenv("HOME") == "" { - home, err := homedir.GetStatic() - if err != nil { - return err - } - logrus.Warnf("gcplogs requires HOME to be set for static daemon binary. Forcibly setting HOME to %s.", home) - os.Setenv("HOME", home) - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_others.go b/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_others.go deleted file mode 100644 index 10a2cdc8c..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/gcplogs/gcplogging_others.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux - -package gcplogs // import "github.com/docker/docker/daemon/logger/gcplogs" - -func ensureHomeIfIAmStatic() error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go b/vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go deleted file mode 100644 index e9c860406..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/gelf/gelf.go +++ /dev/null @@ -1,268 +0,0 @@ -// Package gelf provides the log driver for forwarding server logs to -// endpoints that support the Graylog Extended Log Format. -package gelf // import "github.com/docker/docker/daemon/logger/gelf" - -import ( - "compress/flate" - "encoding/json" - "fmt" - "net" - "net/url" - "strconv" - "time" - - "github.com/Graylog2/go-gelf/gelf" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/docker/pkg/urlutil" - "github.com/sirupsen/logrus" -) - -const name = "gelf" - -type gelfLogger struct { - writer gelf.Writer - info logger.Info - hostname string - rawExtra json.RawMessage -} - -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates a gelf logger using the configuration passed in on the -// context. The supported context configuration variable is gelf-address. -func New(info logger.Info) (logger.Logger, error) { - // parse gelf address - address, err := parseAddress(info.Config["gelf-address"]) - if err != nil { - return nil, err - } - - // collect extra data for GELF message - hostname, err := info.Hostname() - if err != nil { - return nil, fmt.Errorf("gelf: cannot access hostname to set source field") - } - - // parse log tag - tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) - if err != nil { - return nil, err - } - - extra := map[string]interface{}{ - "_container_id": info.ContainerID, - "_container_name": info.Name(), - "_image_id": info.ContainerImageID, - "_image_name": info.ContainerImageName, - "_command": info.Command(), - "_tag": tag, - "_created": info.ContainerCreated, - } - - extraAttrs, err := info.ExtraAttributes(func(key string) string { - if key[0] == '_' { - return key - } - return "_" + key - }) - - if err != nil { - return nil, err - } - - for k, v := range extraAttrs { - extra[k] = v - } - - rawExtra, err := json.Marshal(extra) - if err != nil { - return nil, err - } - - var gelfWriter gelf.Writer - if address.Scheme == "udp" { - gelfWriter, err = newGELFUDPWriter(address.Host, info) - if err != nil { - return nil, err - } - } else if address.Scheme == "tcp" { - gelfWriter, err = newGELFTCPWriter(address.Host, info) - if err != nil { - return nil, err - } - } - - return &gelfLogger{ - writer: gelfWriter, - info: info, - hostname: hostname, - rawExtra: rawExtra, - }, nil -} - -// create new TCP gelfWriter -func newGELFTCPWriter(address string, info logger.Info) (gelf.Writer, error) { - gelfWriter, err := gelf.NewTCPWriter(address) - if err != nil { - return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) - } - - if v, ok := info.Config["gelf-tcp-max-reconnect"]; ok { - i, err := strconv.Atoi(v) - if err != nil || i < 0 { - return nil, fmt.Errorf("gelf-tcp-max-reconnect must be a positive integer") - } - gelfWriter.MaxReconnect = i - } - - if v, ok := info.Config["gelf-tcp-reconnect-delay"]; ok { - i, err := strconv.Atoi(v) - if err != nil || i < 0 { - return nil, fmt.Errorf("gelf-tcp-reconnect-delay must be a positive integer") - } - gelfWriter.ReconnectDelay = time.Duration(i) - } - - return gelfWriter, nil -} - -// create new UDP gelfWriter -func newGELFUDPWriter(address string, info logger.Info) (gelf.Writer, error) { - gelfWriter, err := gelf.NewUDPWriter(address) - if err != nil { - return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) - } - - if v, ok := info.Config["gelf-compression-type"]; ok { - switch v { - case "gzip": - gelfWriter.CompressionType = gelf.CompressGzip - case "zlib": - gelfWriter.CompressionType = gelf.CompressZlib - case "none": - gelfWriter.CompressionType = gelf.CompressNone - default: - return nil, fmt.Errorf("gelf: invalid compression type %q", v) - } - } - - if v, ok := info.Config["gelf-compression-level"]; ok { - val, err := strconv.Atoi(v) - if err != nil { - return nil, fmt.Errorf("gelf: invalid compression level %s, err %v", v, err) - } - gelfWriter.CompressionLevel = val - } - - return gelfWriter, nil -} - -func (s *gelfLogger) Log(msg *logger.Message) error { - level := gelf.LOG_INFO - if msg.Source == "stderr" { - level = gelf.LOG_ERR - } - - m := gelf.Message{ - Version: "1.1", - Host: s.hostname, - Short: string(msg.Line), - TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0, - Level: int32(level), - RawExtra: s.rawExtra, - } - logger.PutMessage(msg) - - if err := s.writer.WriteMessage(&m); err != nil { - return fmt.Errorf("gelf: cannot send GELF message: %v", err) - } - return nil -} - -func (s *gelfLogger) Close() error { - return s.writer.Close() -} - -func (s *gelfLogger) Name() string { - return name -} - -// ValidateLogOpt looks for gelf specific log option gelf-address. -func ValidateLogOpt(cfg map[string]string) error { - address, err := parseAddress(cfg["gelf-address"]) - if err != nil { - return err - } - - for key, val := range cfg { - switch key { - case "gelf-address": - case "tag": - case "labels": - case "env": - case "env-regex": - case "gelf-compression-level": - if address.Scheme != "udp" { - return fmt.Errorf("compression is only supported on UDP") - } - i, err := strconv.Atoi(val) - if err != nil || i < flate.DefaultCompression || i > flate.BestCompression { - return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) - } - case "gelf-compression-type": - if address.Scheme != "udp" { - return fmt.Errorf("compression is only supported on UDP") - } - switch val { - case "gzip", "zlib", "none": - default: - return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) - } - case "gelf-tcp-max-reconnect", "gelf-tcp-reconnect-delay": - if address.Scheme != "tcp" { - return fmt.Errorf("%q is only valid for TCP", key) - } - i, err := strconv.Atoi(val) - if err != nil || i < 0 { - return fmt.Errorf("%q must be a positive integer", key) - } - default: - return fmt.Errorf("unknown log opt %q for gelf log driver", key) - } - } - - return nil -} - -func parseAddress(address string) (*url.URL, error) { - if address == "" { - return nil, fmt.Errorf("gelf-address is a required parameter") - } - if !urlutil.IsTransportURL(address) { - return nil, fmt.Errorf("gelf-address should be in form proto://address, got %v", address) - } - url, err := url.Parse(address) - if err != nil { - return nil, err - } - - // we support only udp - if url.Scheme != "udp" && url.Scheme != "tcp" { - return nil, fmt.Errorf("gelf: endpoint needs to be TCP or UDP") - } - - // get host and port - if _, _, err = net.SplitHostPort(url.Host); err != nil { - return nil, fmt.Errorf("gelf: please provide gelf-address as proto://host:port") - } - - return url, nil -} diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/journald.go b/vendor/github.com/docker/docker/daemon/logger/journald/journald.go deleted file mode 100644 index 342e18f57..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/journald/journald.go +++ /dev/null @@ -1,127 +0,0 @@ -// +build linux - -// Package journald provides the log driver for forwarding server logs -// to endpoints that receive the systemd format. -package journald // import "github.com/docker/docker/daemon/logger/journald" - -import ( - "fmt" - "sync" - "unicode" - - "github.com/coreos/go-systemd/journal" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/sirupsen/logrus" -) - -const name = "journald" - -type journald struct { - mu sync.Mutex - vars map[string]string // additional variables and values to send to the journal along with the log message - readers readerList - closed bool -} - -type readerList struct { - readers map[*logger.LogWatcher]*logger.LogWatcher -} - -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, validateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// sanitizeKeyMode returns the sanitized string so that it could be used in journald. -// In journald log, there are special requirements for fields. -// Fields must be composed of uppercase letters, numbers, and underscores, but must -// not start with an underscore. -func sanitizeKeyMod(s string) string { - n := "" - for _, v := range s { - if 'a' <= v && v <= 'z' { - v = unicode.ToUpper(v) - } else if ('Z' < v || v < 'A') && ('9' < v || v < '0') { - v = '_' - } - // If (n == "" && v == '_'), then we will skip as this is the beginning with '_' - if !(n == "" && v == '_') { - n += string(v) - } - } - return n -} - -// New creates a journald logger using the configuration passed in on -// the context. -func New(info logger.Info) (logger.Logger, error) { - if !journal.Enabled() { - return nil, fmt.Errorf("journald is not enabled on this host") - } - - // parse log tag - tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) - if err != nil { - return nil, err - } - - vars := map[string]string{ - "CONTAINER_ID": info.ContainerID[:12], - "CONTAINER_ID_FULL": info.ContainerID, - "CONTAINER_NAME": info.Name(), - "CONTAINER_TAG": tag, - "SYSLOG_IDENTIFIER": tag, - } - extraAttrs, err := info.ExtraAttributes(sanitizeKeyMod) - if err != nil { - return nil, err - } - for k, v := range extraAttrs { - vars[k] = v - } - return &journald{vars: vars, readers: readerList{readers: make(map[*logger.LogWatcher]*logger.LogWatcher)}}, nil -} - -// We don't actually accept any options, but we have to supply a callback for -// the factory to pass the (probably empty) configuration map to. -func validateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case "labels": - case "env": - case "env-regex": - case "tag": - default: - return fmt.Errorf("unknown log opt '%s' for journald log driver", key) - } - } - return nil -} - -func (s *journald) Log(msg *logger.Message) error { - vars := map[string]string{} - for k, v := range s.vars { - vars[k] = v - } - if msg.PLogMetaData != nil { - vars["CONTAINER_PARTIAL_MESSAGE"] = "true" - } - - line := string(msg.Line) - source := msg.Source - logger.PutMessage(msg) - - if source == "stderr" { - return journal.Send(line, journal.PriErr, vars) - } - return journal.Send(line, journal.PriInfo, vars) -} - -func (s *journald) Name() string { - return name -} diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go deleted file mode 100644 index 7899fc121..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/journald/journald_unsupported.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !linux - -package journald // import "github.com/docker/docker/daemon/logger/journald" - -type journald struct { -} diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read.go b/vendor/github.com/docker/docker/daemon/logger/journald/read.go deleted file mode 100644 index d4bcc62d9..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/journald/read.go +++ /dev/null @@ -1,441 +0,0 @@ -// +build linux,cgo,!static_build,journald - -package journald // import "github.com/docker/docker/daemon/logger/journald" - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// -//static int get_message(sd_journal *j, const char **msg, size_t *length, int *partial) -//{ -// int rc; -// size_t plength; -// *msg = NULL; -// *length = 0; -// plength = strlen("CONTAINER_PARTIAL_MESSAGE=true"); -// rc = sd_journal_get_data(j, "CONTAINER_PARTIAL_MESSAGE", (const void **) msg, length); -// *partial = ((rc == 0) && (*length == plength) && (memcmp(*msg, "CONTAINER_PARTIAL_MESSAGE=true", plength) == 0)); -// rc = sd_journal_get_data(j, "MESSAGE", (const void **) msg, length); -// if (rc == 0) { -// if (*length > 8) { -// (*msg) += 8; -// *length -= 8; -// } else { -// *msg = NULL; -// *length = 0; -// rc = -ENOENT; -// } -// } -// return rc; -//} -//static int get_priority(sd_journal *j, int *priority) -//{ -// const void *data; -// size_t i, length; -// int rc; -// *priority = -1; -// rc = sd_journal_get_data(j, "PRIORITY", &data, &length); -// if (rc == 0) { -// if ((length > 9) && (strncmp(data, "PRIORITY=", 9) == 0)) { -// *priority = 0; -// for (i = 9; i < length; i++) { -// *priority = *priority * 10 + ((const char *)data)[i] - '0'; -// } -// if (length > 9) { -// rc = 0; -// } -// } -// } -// return rc; -//} -//static int is_attribute_field(const char *msg, size_t length) -//{ -// static const struct known_field { -// const char *name; -// size_t length; -// } fields[] = { -// {"MESSAGE", sizeof("MESSAGE") - 1}, -// {"MESSAGE_ID", sizeof("MESSAGE_ID") - 1}, -// {"PRIORITY", sizeof("PRIORITY") - 1}, -// {"CODE_FILE", sizeof("CODE_FILE") - 1}, -// {"CODE_LINE", sizeof("CODE_LINE") - 1}, -// {"CODE_FUNC", sizeof("CODE_FUNC") - 1}, -// {"ERRNO", sizeof("ERRNO") - 1}, -// {"SYSLOG_FACILITY", sizeof("SYSLOG_FACILITY") - 1}, -// {"SYSLOG_IDENTIFIER", sizeof("SYSLOG_IDENTIFIER") - 1}, -// {"SYSLOG_PID", sizeof("SYSLOG_PID") - 1}, -// {"CONTAINER_NAME", sizeof("CONTAINER_NAME") - 1}, -// {"CONTAINER_ID", sizeof("CONTAINER_ID") - 1}, -// {"CONTAINER_ID_FULL", sizeof("CONTAINER_ID_FULL") - 1}, -// {"CONTAINER_TAG", sizeof("CONTAINER_TAG") - 1}, -// }; -// unsigned int i; -// void *p; -// if ((length < 1) || (msg[0] == '_') || ((p = memchr(msg, '=', length)) == NULL)) { -// return -1; -// } -// length = ((const char *) p) - msg; -// for (i = 0; i < sizeof(fields) / sizeof(fields[0]); i++) { -// if ((fields[i].length == length) && (memcmp(fields[i].name, msg, length) == 0)) { -// return -1; -// } -// } -// return 0; -//} -//static int get_attribute_field(sd_journal *j, const char **msg, size_t *length) -//{ -// int rc; -// *msg = NULL; -// *length = 0; -// while ((rc = sd_journal_enumerate_data(j, (const void **) msg, length)) > 0) { -// if (is_attribute_field(*msg, *length) == 0) { -// break; -// } -// rc = -ENOENT; -// } -// return rc; -//} -//static int wait_for_data_cancelable(sd_journal *j, int pipefd) -//{ -// struct pollfd fds[2]; -// uint64_t when = 0; -// int timeout, jevents, i; -// struct timespec ts; -// uint64_t now; -// -// memset(&fds, 0, sizeof(fds)); -// fds[0].fd = pipefd; -// fds[0].events = POLLHUP; -// fds[1].fd = sd_journal_get_fd(j); -// if (fds[1].fd < 0) { -// return fds[1].fd; -// } -// -// do { -// jevents = sd_journal_get_events(j); -// if (jevents < 0) { -// return jevents; -// } -// fds[1].events = jevents; -// sd_journal_get_timeout(j, &when); -// if (when == -1) { -// timeout = -1; -// } else { -// clock_gettime(CLOCK_MONOTONIC, &ts); -// now = (uint64_t) ts.tv_sec * 1000000 + ts.tv_nsec / 1000; -// timeout = when > now ? (int) ((when - now + 999) / 1000) : 0; -// } -// i = poll(fds, 2, timeout); -// if ((i == -1) && (errno != EINTR)) { -// /* An unexpected error. */ -// return (errno != 0) ? -errno : -EINTR; -// } -// if (fds[0].revents & POLLHUP) { -// /* The close notification pipe was closed. */ -// return 0; -// } -// if (sd_journal_process(j) == SD_JOURNAL_APPEND) { -// /* Data, which we might care about, was appended. */ -// return 1; -// } -// } while ((fds[0].revents & POLLHUP) == 0); -// return 0; -//} -import "C" - -import ( - "fmt" - "strings" - "time" - "unsafe" - - "github.com/coreos/go-systemd/journal" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/daemon/logger" - "github.com/sirupsen/logrus" -) - -func (s *journald) Close() error { - s.mu.Lock() - s.closed = true - for reader := range s.readers.readers { - reader.Close() - } - s.mu.Unlock() - return nil -} - -func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, oldCursor *C.char, untilUnixMicro uint64) (*C.char, bool) { - var msg, data, cursor *C.char - var length C.size_t - var stamp C.uint64_t - var priority, partial C.int - var done bool - - // Walk the journal from here forward until we run out of new entries - // or we reach the until value (if provided). -drain: - for { - // Try not to send a given entry twice. - if oldCursor != nil { - for C.sd_journal_test_cursor(j, oldCursor) > 0 { - if C.sd_journal_next(j) <= 0 { - break drain - } - } - } - // Read and send the logged message, if there is one to read. - i := C.get_message(j, &msg, &length, &partial) - if i != -C.ENOENT && i != -C.EADDRNOTAVAIL { - // Read the entry's timestamp. - if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { - break - } - // Break if the timestamp exceeds any provided until flag. - if untilUnixMicro != 0 && untilUnixMicro < uint64(stamp) { - done = true - break - } - - // Set up the time and text of the entry. - timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000) - line := C.GoBytes(unsafe.Pointer(msg), C.int(length)) - if partial == 0 { - line = append(line, "\n"...) - } - // Recover the stream name by mapping - // from the journal priority back to - // the stream that we would have - // assigned that value. - source := "" - if C.get_priority(j, &priority) != 0 { - source = "" - } else if priority == C.int(journal.PriErr) { - source = "stderr" - } else if priority == C.int(journal.PriInfo) { - source = "stdout" - } - // Retrieve the values of any variables we're adding to the journal. - var attrs []backend.LogAttr - C.sd_journal_restart_data(j) - for C.get_attribute_field(j, &data, &length) > C.int(0) { - kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2) - attrs = append(attrs, backend.LogAttr{Key: kv[0], Value: kv[1]}) - } - // Send the log message. - logWatcher.Msg <- &logger.Message{ - Line: line, - Source: source, - Timestamp: timestamp.In(time.UTC), - Attrs: attrs, - } - } - // If we're at the end of the journal, we're done (for now). - if C.sd_journal_next(j) <= 0 { - break - } - } - - // free(NULL) is safe - C.free(unsafe.Pointer(oldCursor)) - if C.sd_journal_get_cursor(j, &cursor) != 0 { - // ensure that we won't be freeing an address that's invalid - cursor = nil - } - return cursor, done -} - -func (s *journald) followJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, pfd [2]C.int, cursor *C.char, untilUnixMicro uint64) *C.char { - s.mu.Lock() - s.readers.readers[logWatcher] = logWatcher - if s.closed { - // the journald Logger is closed, presumably because the container has been - // reset. So we shouldn't follow, because we'll never be woken up. But we - // should make one more drainJournal call to be sure we've got all the logs. - // Close pfd[1] so that one drainJournal happens, then cleanup, then return. - C.close(pfd[1]) - } - s.mu.Unlock() - - newCursor := make(chan *C.char) - - go func() { - for { - // Keep copying journal data out until we're notified to stop - // or we hit an error. - status := C.wait_for_data_cancelable(j, pfd[0]) - if status < 0 { - cerrstr := C.strerror(C.int(-status)) - errstr := C.GoString(cerrstr) - fmtstr := "error %q while attempting to follow journal for container %q" - logrus.Errorf(fmtstr, errstr, s.vars["CONTAINER_ID_FULL"]) - break - } - - var done bool - cursor, done = s.drainJournal(logWatcher, j, cursor, untilUnixMicro) - - if status != 1 || done { - // We were notified to stop - break - } - } - - // Clean up. - C.close(pfd[0]) - s.mu.Lock() - delete(s.readers.readers, logWatcher) - s.mu.Unlock() - close(logWatcher.Msg) - newCursor <- cursor - }() - - // Wait until we're told to stop. - select { - case cursor = <-newCursor: - case <-logWatcher.WatchClose(): - // Notify the other goroutine that its work is done. - C.close(pfd[1]) - cursor = <-newCursor - } - - return cursor -} - -func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { - var j *C.sd_journal - var cmatch, cursor *C.char - var stamp C.uint64_t - var sinceUnixMicro uint64 - var untilUnixMicro uint64 - var pipes [2]C.int - - // Get a handle to the journal. - rc := C.sd_journal_open(&j, C.int(0)) - if rc != 0 { - logWatcher.Err <- fmt.Errorf("error opening journal") - close(logWatcher.Msg) - return - } - // If we end up following the log, we can set the journal context - // pointer and the channel pointer to nil so that we won't close them - // here, potentially while the goroutine that uses them is still - // running. Otherwise, close them when we return from this function. - following := false - defer func(pfollowing *bool) { - if !*pfollowing { - close(logWatcher.Msg) - } - C.sd_journal_close(j) - }(&following) - // Remove limits on the size of data items that we'll retrieve. - rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) - if rc != 0 { - logWatcher.Err <- fmt.Errorf("error setting journal data threshold") - return - } - // Add a match to have the library do the searching for us. - cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) - defer C.free(unsafe.Pointer(cmatch)) - rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) - if rc != 0 { - logWatcher.Err <- fmt.Errorf("error setting journal match") - return - } - // If we have a cutoff time, convert it to Unix time once. - if !config.Since.IsZero() { - nano := config.Since.UnixNano() - sinceUnixMicro = uint64(nano / 1000) - } - // If we have an until value, convert it too - if !config.Until.IsZero() { - nano := config.Until.UnixNano() - untilUnixMicro = uint64(nano / 1000) - } - if config.Tail > 0 { - lines := config.Tail - // If until time provided, start from there. - // Otherwise start at the end of the journal. - if untilUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(untilUnixMicro)) < 0 { - logWatcher.Err <- fmt.Errorf("error seeking provided until value") - return - } else if C.sd_journal_seek_tail(j) < 0 { - logWatcher.Err <- fmt.Errorf("error seeking to end of journal") - return - } - if C.sd_journal_previous(j) < 0 { - logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") - return - } - // Walk backward. - for lines > 0 { - // Stop if the entry time is before our cutoff. - // We'll need the entry time if it isn't, so go - // ahead and parse it now. - if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { - break - } else { - // Compare the timestamp on the entry to our threshold value. - if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { - break - } - } - lines-- - // If we're at the start of the journal, or - // don't need to back up past any more entries, - // stop. - if lines == 0 || C.sd_journal_previous(j) <= 0 { - break - } - } - } else { - // Start at the beginning of the journal. - if C.sd_journal_seek_head(j) < 0 { - logWatcher.Err <- fmt.Errorf("error seeking to start of journal") - return - } - // If we have a cutoff date, fast-forward to it. - if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { - logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") - return - } - if C.sd_journal_next(j) < 0 { - logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") - return - } - } - cursor, _ = s.drainJournal(logWatcher, j, nil, untilUnixMicro) - if config.Follow { - // Allocate a descriptor for following the journal, if we'll - // need one. Do it here so that we can report if it fails. - if fd := C.sd_journal_get_fd(j); fd < C.int(0) { - logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd))) - } else { - // Create a pipe that we can poll at the same time as - // the journald descriptor. - if C.pipe(&pipes[0]) == C.int(-1) { - logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") - } else { - cursor = s.followJournal(logWatcher, j, pipes, cursor, untilUnixMicro) - // Let followJournal handle freeing the journal context - // object and closing the channel. - following = true - } - } - } - - C.free(unsafe.Pointer(cursor)) - return -} - -func (s *journald) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { - logWatcher := logger.NewLogWatcher() - go s.readLogs(logWatcher, config) - return logWatcher -} diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read_native.go b/vendor/github.com/docker/docker/daemon/logger/journald/read_native.go deleted file mode 100644 index ab68cf4ba..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/journald/read_native.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux,cgo,!static_build,journald,!journald_compat - -package journald // import "github.com/docker/docker/daemon/logger/journald" - -// #cgo pkg-config: libsystemd -import "C" diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go b/vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go deleted file mode 100644 index 4806e130e..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/journald/read_native_compat.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux,cgo,!static_build,journald,journald_compat - -package journald // import "github.com/docker/docker/daemon/logger/journald" - -// #cgo pkg-config: libsystemd-journal -import "C" diff --git a/vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go deleted file mode 100644 index a66b66665..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/journald/read_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux !cgo static_build !journald - -package journald // import "github.com/docker/docker/daemon/logger/journald" - -func (s *journald) Close() error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go deleted file mode 100644 index b806a5ad1..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go +++ /dev/null @@ -1,185 +0,0 @@ -// Package jsonfilelog provides the default Logger implementation for -// Docker logging. This logger logs to files on the host server in the -// JSON format. -package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "sync" - - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/go-units" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// Name is the name of the file that the jsonlogger logs to. -const Name = "json-file" - -// JSONFileLogger is Logger implementation for default Docker logging. -type JSONFileLogger struct { - mu sync.Mutex - closed bool - writer *loggerutils.LogFile - readers map[*logger.LogWatcher]struct{} // stores the active log followers - tag string // tag values requested by the user to log -} - -func init() { - if err := logger.RegisterLogDriver(Name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates new JSONFileLogger which writes to filename passed in -// on given context. -func New(info logger.Info) (logger.Logger, error) { - var capval int64 = -1 - if capacity, ok := info.Config["max-size"]; ok { - var err error - capval, err = units.FromHumanSize(capacity) - if err != nil { - return nil, err - } - if capval <= 0 { - return nil, fmt.Errorf("max-size should be a positive numbler") - } - } - var maxFiles = 1 - if maxFileString, ok := info.Config["max-file"]; ok { - var err error - maxFiles, err = strconv.Atoi(maxFileString) - if err != nil { - return nil, err - } - if maxFiles < 1 { - return nil, fmt.Errorf("max-file cannot be less than 1") - } - } - - var compress bool - if compressString, ok := info.Config["compress"]; ok { - var err error - compress, err = strconv.ParseBool(compressString) - if err != nil { - return nil, err - } - if compress && (maxFiles == 1 || capval == -1) { - return nil, fmt.Errorf("compress cannot be true when max-file is less than 2 or max-size is not set") - } - } - - attrs, err := info.ExtraAttributes(nil) - if err != nil { - return nil, err - } - - // no default template. only use a tag if the user asked for it - tag, err := loggerutils.ParseLogTag(info, "") - if err != nil { - return nil, err - } - if tag != "" { - attrs["tag"] = tag - } - - var extra []byte - if len(attrs) > 0 { - var err error - extra, err = json.Marshal(attrs) - if err != nil { - return nil, err - } - } - - buf := bytes.NewBuffer(nil) - marshalFunc := func(msg *logger.Message) ([]byte, error) { - if err := marshalMessage(msg, extra, buf); err != nil { - return nil, err - } - b := buf.Bytes() - buf.Reset() - return b, nil - } - - writer, err := loggerutils.NewLogFile(info.LogPath, capval, maxFiles, compress, marshalFunc, decodeFunc, 0640) - if err != nil { - return nil, err - } - - return &JSONFileLogger{ - writer: writer, - readers: make(map[*logger.LogWatcher]struct{}), - tag: tag, - }, nil -} - -// Log converts logger.Message to jsonlog.JSONLog and serializes it to file. -func (l *JSONFileLogger) Log(msg *logger.Message) error { - l.mu.Lock() - err := l.writer.WriteLogEntry(msg) - l.mu.Unlock() - return err -} - -func marshalMessage(msg *logger.Message, extra json.RawMessage, buf *bytes.Buffer) error { - logLine := msg.Line - if msg.PLogMetaData == nil || (msg.PLogMetaData != nil && msg.PLogMetaData.Last) { - logLine = append(msg.Line, '\n') - } - err := (&jsonlog.JSONLogs{ - Log: logLine, - Stream: msg.Source, - Created: msg.Timestamp, - RawAttrs: extra, - }).MarshalJSONBuf(buf) - if err != nil { - return errors.Wrap(err, "error writing log message to buffer") - } - err = buf.WriteByte('\n') - return errors.Wrap(err, "error finalizing log buffer") -} - -// ValidateLogOpt looks for json specific log options max-file & max-size. -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case "max-file": - case "max-size": - case "compress": - case "labels": - case "env": - case "env-regex": - case "tag": - default: - return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) - } - } - return nil -} - -// Close closes underlying file and signals all readers to stop. -func (l *JSONFileLogger) Close() error { - l.mu.Lock() - l.closed = true - err := l.writer.Close() - for r := range l.readers { - r.Close() - delete(l.readers, r) - } - l.mu.Unlock() - return err -} - -// Name returns name of this logger. -func (l *JSONFileLogger) Name() string { - return Name -} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go deleted file mode 100644 index 74be8e7da..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go +++ /dev/null @@ -1,25 +0,0 @@ -package jsonlog // import "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" - -import ( - "time" -) - -// JSONLog is a log message, typically a single entry from a given log stream. -type JSONLog struct { - // Log is the log message - Log string `json:"log,omitempty"` - // Stream is the log source - Stream string `json:"stream,omitempty"` - // Created is the created timestamp of log - Created time.Time `json:"time"` - // Attrs is the list of extra attributes provided by the user - Attrs map[string]string `json:"attrs,omitempty"` -} - -// Reset all fields to their zero value. -func (jl *JSONLog) Reset() { - jl.Log = "" - jl.Stream = "" - jl.Created = time.Time{} - jl.Attrs = make(map[string]string) -} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go deleted file mode 100644 index 577c718f6..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go +++ /dev/null @@ -1,125 +0,0 @@ -package jsonlog // import "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" - -import ( - "bytes" - "encoding/json" - "time" - "unicode/utf8" -) - -// JSONLogs marshals encoded JSONLog objects -type JSONLogs struct { - Log []byte `json:"log,omitempty"` - Stream string `json:"stream,omitempty"` - Created time.Time `json:"time"` - - // json-encoded bytes - RawAttrs json.RawMessage `json:"attrs,omitempty"` -} - -// MarshalJSONBuf is an optimized JSON marshaller that avoids reflection -// and unnecessary allocation. -func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { - var first = true - - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONBytesAsString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONBytesAsString(buf, []byte(mj.Stream)) - } - if len(mj.RawAttrs) > 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"attrs":`) - buf.Write(mj.RawAttrs) - } - if !first { - buf.WriteString(`,`) - } - - created, err := fastTimeMarshalJSON(mj.Created) - if err != nil { - return err - } - - buf.WriteString(`"time":`) - buf.WriteString(created) - buf.WriteString(`}`) - return nil -} - -func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.Write(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.Write(s[start:]) - } - buf.WriteByte('"') -} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go deleted file mode 100644 index 1822ea5db..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go +++ /dev/null @@ -1,20 +0,0 @@ -package jsonlog // import "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" - -import ( - "time" - - "github.com/pkg/errors" -) - -const jsonFormat = `"` + time.RFC3339Nano + `"` - -// fastTimeMarshalJSON avoids one of the extra allocations that -// time.MarshalJSON is making. -func fastTimeMarshalJSON(t time.Time) (string, error) { - if y := t.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See golang.org/issue/4556#c15 for more discussion. - return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") - } - return t.Format(jsonFormat), nil -} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go deleted file mode 100644 index ab1793bb7..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go +++ /dev/null @@ -1,89 +0,0 @@ -package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" - -import ( - "encoding/json" - "io" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" -) - -const maxJSONDecodeRetry = 20000 - -// ReadLogs implements the logger's LogReader interface for the logs -// created by this driver. -func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { - logWatcher := logger.NewLogWatcher() - - go l.readLogs(logWatcher, config) - return logWatcher -} - -func (l *JSONFileLogger) readLogs(watcher *logger.LogWatcher, config logger.ReadConfig) { - defer close(watcher.Msg) - - l.mu.Lock() - l.readers[watcher] = struct{}{} - l.mu.Unlock() - - l.writer.ReadLogs(config, watcher) - - l.mu.Lock() - delete(l.readers, watcher) - l.mu.Unlock() -} - -func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { - l.Reset() - if err := dec.Decode(l); err != nil { - return nil, err - } - - var attrs []backend.LogAttr - if len(l.Attrs) != 0 { - attrs = make([]backend.LogAttr, 0, len(l.Attrs)) - for k, v := range l.Attrs { - attrs = append(attrs, backend.LogAttr{Key: k, Value: v}) - } - } - msg := &logger.Message{ - Source: l.Stream, - Timestamp: l.Created, - Line: []byte(l.Log), - Attrs: attrs, - } - return msg, nil -} - -// decodeFunc is used to create a decoder for the log file reader -func decodeFunc(rdr io.Reader) func() (*logger.Message, error) { - l := &jsonlog.JSONLog{} - dec := json.NewDecoder(rdr) - return func() (msg *logger.Message, err error) { - for retries := 0; retries < maxJSONDecodeRetry; retries++ { - msg, err = decodeLogLine(dec, l) - if err == nil { - break - } - - // try again, could be due to a an incomplete json object as we read - if _, ok := err.(*json.SyntaxError); ok { - dec = json.NewDecoder(rdr) - retries++ - continue - } - - // io.ErrUnexpectedEOF is returned from json.Decoder when there is - // remaining data in the parser's buffer while an io.EOF occurs. - // If the json logger writes a partial json log entry to the disk - // while at the same time the decoder tries to decode it, the race condition happens. - if err == io.ErrUnexpectedEOF { - reader := io.MultiReader(dec.Buffered(), rdr) - dec = json.NewDecoder(reader) - retries++ - } - } - return msg, err - } -} diff --git a/vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go b/vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go deleted file mode 100644 index 70a8baf66..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/logentries/logentries.go +++ /dev/null @@ -1,115 +0,0 @@ -// Package logentries provides the log driver for forwarding server logs -// to logentries endpoints. -package logentries // import "github.com/docker/docker/daemon/logger/logentries" - -import ( - "fmt" - "strconv" - - "github.com/bsphere/le_go" - "github.com/docker/docker/daemon/logger" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type logentries struct { - tag string - containerID string - containerName string - writer *le_go.Logger - extra map[string]string - lineOnly bool -} - -const ( - name = "logentries" - token = "logentries-token" - lineonly = "line-only" -) - -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates a logentries logger using the configuration passed in on -// the context. The supported context configuration variable is -// logentries-token. -func New(info logger.Info) (logger.Logger, error) { - logrus.WithField("container", info.ContainerID). - WithField("token", info.Config[token]). - WithField("line-only", info.Config[lineonly]). - Debug("logging driver logentries configured") - - log, err := le_go.Connect(info.Config[token]) - if err != nil { - return nil, errors.Wrap(err, "error connecting to logentries") - } - var lineOnly bool - if info.Config[lineonly] != "" { - if lineOnly, err = strconv.ParseBool(info.Config[lineonly]); err != nil { - return nil, errors.Wrap(err, "error parsing lineonly option") - } - } - return &logentries{ - containerID: info.ContainerID, - containerName: info.ContainerName, - writer: log, - lineOnly: lineOnly, - }, nil -} - -func (f *logentries) Log(msg *logger.Message) error { - if !f.lineOnly { - data := map[string]string{ - "container_id": f.containerID, - "container_name": f.containerName, - "source": msg.Source, - "log": string(msg.Line), - } - for k, v := range f.extra { - data[k] = v - } - ts := msg.Timestamp - logger.PutMessage(msg) - f.writer.Println(f.tag, ts, data) - } else { - line := string(msg.Line) - logger.PutMessage(msg) - f.writer.Println(line) - } - return nil -} - -func (f *logentries) Close() error { - return f.writer.Close() -} - -func (f *logentries) Name() string { - return name -} - -// ValidateLogOpt looks for logentries specific log option logentries-address. -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case "env": - case "env-regex": - case "labels": - case "tag": - case key: - default: - return fmt.Errorf("unknown log opt '%s' for logentries log driver", key) - } - } - - if cfg[token] == "" { - return fmt.Errorf("Missing logentries token") - } - - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/logger/logger.go b/vendor/github.com/docker/docker/daemon/logger/logger.go deleted file mode 100644 index 912e855c7..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/logger.go +++ /dev/null @@ -1,145 +0,0 @@ -// Package logger defines interfaces that logger drivers implement to -// log messages. -// -// The other half of a logger driver is the implementation of the -// factory, which holds the contextual instance information that -// allows multiple loggers of the same type to perform different -// actions, such as logging to different locations. -package logger // import "github.com/docker/docker/daemon/logger" - -import ( - "sync" - "time" - - "github.com/docker/docker/api/types/backend" -) - -// ErrReadLogsNotSupported is returned when the underlying log driver does not support reading -type ErrReadLogsNotSupported struct{} - -func (ErrReadLogsNotSupported) Error() string { - return "configured logging driver does not support reading" -} - -// NotImplemented makes this error implement the `NotImplemented` interface from api/errdefs -func (ErrReadLogsNotSupported) NotImplemented() {} - -const ( - logWatcherBufferSize = 4096 -) - -var messagePool = &sync.Pool{New: func() interface{} { return &Message{Line: make([]byte, 0, 256)} }} - -// NewMessage returns a new message from the message sync.Pool -func NewMessage() *Message { - return messagePool.Get().(*Message) -} - -// PutMessage puts the specified message back n the message pool. -// The message fields are reset before putting into the pool. -func PutMessage(msg *Message) { - msg.reset() - messagePool.Put(msg) -} - -// Message is data structure that represents piece of output produced by some -// container. The Line member is a slice of an array whose contents can be -// changed after a log driver's Log() method returns. -// -// Message is subtyped from backend.LogMessage because there is a lot of -// internal complexity around the Message type that should not be exposed -// to any package not explicitly importing the logger type. -// -// Any changes made to this struct must also be updated in the `reset` function -type Message backend.LogMessage - -// reset sets the message back to default values -// This is used when putting a message back into the message pool. -// Any changes to the `Message` struct should be reflected here. -func (m *Message) reset() { - m.Line = m.Line[:0] - m.Source = "" - m.Attrs = nil - m.PLogMetaData = nil - - m.Err = nil -} - -// AsLogMessage returns a pointer to the message as a pointer to -// backend.LogMessage, which is an identical type with a different purpose -func (m *Message) AsLogMessage() *backend.LogMessage { - return (*backend.LogMessage)(m) -} - -// Logger is the interface for docker logging drivers. -type Logger interface { - Log(*Message) error - Name() string - Close() error -} - -// SizedLogger is the interface for logging drivers that can control -// the size of buffer used for their messages. -type SizedLogger interface { - Logger - BufSize() int -} - -// ReadConfig is the configuration passed into ReadLogs. -type ReadConfig struct { - Since time.Time - Until time.Time - Tail int - Follow bool -} - -// LogReader is the interface for reading log messages for loggers that support reading. -type LogReader interface { - // Read logs from underlying logging backend - ReadLogs(ReadConfig) *LogWatcher -} - -// LogWatcher is used when consuming logs read from the LogReader interface. -type LogWatcher struct { - // For sending log messages to a reader. - Msg chan *Message - // For sending error messages that occur while while reading logs. - Err chan error - closeOnce sync.Once - closeNotifier chan struct{} -} - -// NewLogWatcher returns a new LogWatcher. -func NewLogWatcher() *LogWatcher { - return &LogWatcher{ - Msg: make(chan *Message, logWatcherBufferSize), - Err: make(chan error, 1), - closeNotifier: make(chan struct{}), - } -} - -// Close notifies the underlying log reader to stop. -func (w *LogWatcher) Close() { - // only close if not already closed - w.closeOnce.Do(func() { - close(w.closeNotifier) - }) -} - -// WatchClose returns a channel receiver that receives notification -// when the watcher has been closed. This should only be called from -// one goroutine. -func (w *LogWatcher) WatchClose() <-chan struct{} { - return w.closeNotifier -} - -// Capability defines the list of capabilities that a driver can implement -// These capabilities are not required to be a logging driver, however do -// determine how a logging driver can be used -type Capability struct { - // Determines if a log driver can read back logs - ReadLogs bool -} - -// MarshalFunc is a func that marshals a message into an arbitrary format -type MarshalFunc func(*Message) ([]byte, error) diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go deleted file mode 100644 index 719512dbd..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go +++ /dev/null @@ -1,31 +0,0 @@ -package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils" - -import ( - "bytes" - - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/templates" -) - -// DefaultTemplate defines the defaults template logger should use. -const DefaultTemplate = "{{.ID}}" - -// ParseLogTag generates a context aware tag for consistency across different -// log drivers based on the context of the running container. -func ParseLogTag(info logger.Info, defaultTemplate string) (string, error) { - tagTemplate := info.Config["tag"] - if tagTemplate == "" { - tagTemplate = defaultTemplate - } - - tmpl, err := templates.NewParse("log-tag", tagTemplate) - if err != nil { - return "", err - } - buf := new(bytes.Buffer) - if err := tmpl.Execute(buf, &info); err != nil { - return "", err - } - - return buf.String(), nil -} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go deleted file mode 100644 index 6e3cda864..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go +++ /dev/null @@ -1,666 +0,0 @@ -package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils" - -import ( - "bytes" - "compress/gzip" - "context" - "encoding/json" - "fmt" - "io" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils/multireader" - "github.com/docker/docker/pkg/filenotify" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/pubsub" - "github.com/docker/docker/pkg/tailfile" - "github.com/fsnotify/fsnotify" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const tmpLogfileSuffix = ".tmp" - -// rotateFileMetadata is a metadata of the gzip header of the compressed log file -type rotateFileMetadata struct { - LastTime time.Time `json:"lastTime,omitempty"` -} - -// refCounter is a counter of logfile being referenced -type refCounter struct { - mu sync.Mutex - counter map[string]int -} - -// Reference increase the reference counter for specified logfile -func (rc *refCounter) GetReference(fileName string, openRefFile func(fileName string, exists bool) (*os.File, error)) (*os.File, error) { - rc.mu.Lock() - defer rc.mu.Unlock() - - var ( - file *os.File - err error - ) - _, ok := rc.counter[fileName] - file, err = openRefFile(fileName, ok) - if err != nil { - return nil, err - } - - if ok { - rc.counter[fileName]++ - } else if file != nil { - rc.counter[file.Name()] = 1 - } - - return file, nil -} - -// Dereference reduce the reference counter for specified logfile -func (rc *refCounter) Dereference(fileName string) error { - rc.mu.Lock() - defer rc.mu.Unlock() - - rc.counter[fileName]-- - if rc.counter[fileName] <= 0 { - delete(rc.counter, fileName) - err := os.Remove(fileName) - if err != nil { - return err - } - } - return nil -} - -// LogFile is Logger implementation for default Docker logging. -type LogFile struct { - mu sync.RWMutex // protects the logfile access - f *os.File // store for closing - closed bool - rotateMu sync.Mutex // blocks the next rotation until the current rotation is completed - capacity int64 // maximum size of each file - currentSize int64 // current size of the latest file - maxFiles int // maximum number of files - compress bool // whether old versions of log files are compressed - lastTimestamp time.Time // timestamp of the last log - filesRefCounter refCounter // keep reference-counted of decompressed files - notifyRotate *pubsub.Publisher - marshal logger.MarshalFunc - createDecoder makeDecoderFunc - perms os.FileMode -} - -type makeDecoderFunc func(rdr io.Reader) func() (*logger.Message, error) - -// NewLogFile creates new LogFile -func NewLogFile(logPath string, capacity int64, maxFiles int, compress bool, marshaller logger.MarshalFunc, decodeFunc makeDecoderFunc, perms os.FileMode) (*LogFile, error) { - log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, perms) - if err != nil { - return nil, err - } - - size, err := log.Seek(0, os.SEEK_END) - if err != nil { - return nil, err - } - - return &LogFile{ - f: log, - capacity: capacity, - currentSize: size, - maxFiles: maxFiles, - compress: compress, - filesRefCounter: refCounter{counter: make(map[string]int)}, - notifyRotate: pubsub.NewPublisher(0, 1), - marshal: marshaller, - createDecoder: decodeFunc, - perms: perms, - }, nil -} - -// WriteLogEntry writes the provided log message to the current log file. -// This may trigger a rotation event if the max file/capacity limits are hit. -func (w *LogFile) WriteLogEntry(msg *logger.Message) error { - b, err := w.marshal(msg) - if err != nil { - return errors.Wrap(err, "error marshalling log message") - } - - logger.PutMessage(msg) - - w.mu.Lock() - if w.closed { - w.mu.Unlock() - return errors.New("cannot write because the output file was closed") - } - - if err := w.checkCapacityAndRotate(); err != nil { - w.mu.Unlock() - return err - } - - n, err := w.f.Write(b) - if err == nil { - w.currentSize += int64(n) - w.lastTimestamp = msg.Timestamp - } - w.mu.Unlock() - return err -} - -func (w *LogFile) checkCapacityAndRotate() error { - if w.capacity == -1 { - return nil - } - - if w.currentSize >= w.capacity { - w.rotateMu.Lock() - fname := w.f.Name() - if err := w.f.Close(); err != nil { - w.rotateMu.Unlock() - return errors.Wrap(err, "error closing file") - } - if err := rotate(fname, w.maxFiles, w.compress); err != nil { - w.rotateMu.Unlock() - return err - } - file, err := os.OpenFile(fname, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, w.perms) - if err != nil { - w.rotateMu.Unlock() - return err - } - w.f = file - w.currentSize = 0 - w.notifyRotate.Publish(struct{}{}) - - if w.maxFiles <= 1 || !w.compress { - w.rotateMu.Unlock() - return nil - } - - go func() { - compressFile(fname+".1", w.lastTimestamp) - w.rotateMu.Unlock() - }() - } - - return nil -} - -func rotate(name string, maxFiles int, compress bool) error { - if maxFiles < 2 { - return nil - } - - var extension string - if compress { - extension = ".gz" - } - - lastFile := fmt.Sprintf("%s.%d%s", name, maxFiles-1, extension) - err := os.Remove(lastFile) - if err != nil && !os.IsNotExist(err) { - return errors.Wrap(err, "error removing oldest log file") - } - - for i := maxFiles - 1; i > 1; i-- { - toPath := name + "." + strconv.Itoa(i) + extension - fromPath := name + "." + strconv.Itoa(i-1) + extension - if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { - return err - } - } - - if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { - return err - } - - return nil -} - -func compressFile(fileName string, lastTimestamp time.Time) { - file, err := os.Open(fileName) - if err != nil { - logrus.Errorf("Failed to open log file: %v", err) - return - } - defer func() { - file.Close() - err := os.Remove(fileName) - if err != nil { - logrus.Errorf("Failed to remove source log file: %v", err) - } - }() - - outFile, err := os.OpenFile(fileName+".gz", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0640) - if err != nil { - logrus.Errorf("Failed to open or create gzip log file: %v", err) - return - } - defer func() { - outFile.Close() - if err != nil { - os.Remove(fileName + ".gz") - } - }() - - compressWriter := gzip.NewWriter(outFile) - defer compressWriter.Close() - - // Add the last log entry timestramp to the gzip header - extra := rotateFileMetadata{} - extra.LastTime = lastTimestamp - compressWriter.Header.Extra, err = json.Marshal(&extra) - if err != nil { - // Here log the error only and don't return since this is just an optimization. - logrus.Warningf("Failed to marshal gzip header as JSON: %v", err) - } - - _, err = pools.Copy(compressWriter, file) - if err != nil { - logrus.WithError(err).WithField("module", "container.logs").WithField("file", fileName).Error("Error compressing log file") - return - } -} - -// MaxFiles return maximum number of files -func (w *LogFile) MaxFiles() int { - return w.maxFiles -} - -// Close closes underlying file and signals all readers to stop. -func (w *LogFile) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - if w.closed { - return nil - } - if err := w.f.Close(); err != nil { - return err - } - w.closed = true - return nil -} - -// ReadLogs decodes entries from log files and sends them the passed in watcher -// -// Note: Using the follow option can become inconsistent in cases with very frequent rotations and max log files is 1. -// TODO: Consider a different implementation which can effectively follow logs under frequent rotations. -func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) { - w.mu.RLock() - currentFile, err := os.Open(w.f.Name()) - if err != nil { - w.mu.RUnlock() - watcher.Err <- err - return - } - defer currentFile.Close() - - currentChunk, err := newSectionReader(currentFile) - if err != nil { - w.mu.RUnlock() - watcher.Err <- err - return - } - - if config.Tail != 0 { - files, err := w.openRotatedFiles(config) - if err != nil { - w.mu.RUnlock() - watcher.Err <- err - return - } - w.mu.RUnlock() - seekers := make([]io.ReadSeeker, 0, len(files)+1) - for _, f := range files { - seekers = append(seekers, f) - } - if currentChunk.Size() > 0 { - seekers = append(seekers, currentChunk) - } - if len(seekers) > 0 { - tailFile(multireader.MultiReadSeeker(seekers...), watcher, w.createDecoder, config) - } - for _, f := range files { - f.Close() - fileName := f.Name() - if strings.HasSuffix(fileName, tmpLogfileSuffix) { - err := w.filesRefCounter.Dereference(fileName) - if err != nil { - logrus.Errorf("Failed to dereference the log file %q: %v", fileName, err) - } - } - } - - w.mu.RLock() - } - - if !config.Follow || w.closed { - w.mu.RUnlock() - return - } - w.mu.RUnlock() - - notifyRotate := w.notifyRotate.Subscribe() - defer w.notifyRotate.Evict(notifyRotate) - followLogs(currentFile, watcher, notifyRotate, w.createDecoder, config.Since, config.Until) -} - -func (w *LogFile) openRotatedFiles(config logger.ReadConfig) (files []*os.File, err error) { - w.rotateMu.Lock() - defer w.rotateMu.Unlock() - - defer func() { - if err == nil { - return - } - for _, f := range files { - f.Close() - if strings.HasSuffix(f.Name(), tmpLogfileSuffix) { - err := os.Remove(f.Name()) - if err != nil && !os.IsNotExist(err) { - logrus.Warningf("Failed to remove the logfile %q: %v", f.Name, err) - } - } - } - }() - - for i := w.maxFiles; i > 1; i-- { - f, err := os.Open(fmt.Sprintf("%s.%d", w.f.Name(), i-1)) - if err != nil { - if !os.IsNotExist(err) { - return nil, errors.Wrap(err, "error opening rotated log file") - } - - fileName := fmt.Sprintf("%s.%d.gz", w.f.Name(), i-1) - decompressedFileName := fileName + tmpLogfileSuffix - tmpFile, err := w.filesRefCounter.GetReference(decompressedFileName, func(refFileName string, exists bool) (*os.File, error) { - if exists { - return os.Open(refFileName) - } - return decompressfile(fileName, refFileName, config.Since) - }) - - if err != nil { - if !os.IsNotExist(errors.Cause(err)) { - return nil, errors.Wrap(err, "error getting reference to decompressed log file") - } - continue - } - if tmpFile == nil { - // The log before `config.Since` does not need to read - break - } - - files = append(files, tmpFile) - continue - } - files = append(files, f) - } - - return files, nil -} - -func decompressfile(fileName, destFileName string, since time.Time) (*os.File, error) { - cf, err := os.Open(fileName) - if err != nil { - return nil, errors.Wrap(err, "error opening file for decompression") - } - defer cf.Close() - - rc, err := gzip.NewReader(cf) - if err != nil { - return nil, errors.Wrap(err, "error making gzip reader for compressed log file") - } - defer rc.Close() - - // Extract the last log entry timestramp from the gzip header - extra := &rotateFileMetadata{} - err = json.Unmarshal(rc.Header.Extra, extra) - if err == nil && extra.LastTime.Before(since) { - return nil, nil - } - - rs, err := os.OpenFile(destFileName, os.O_CREATE|os.O_RDWR, 0640) - if err != nil { - return nil, errors.Wrap(err, "error creating file for copying decompressed log stream") - } - - _, err = pools.Copy(rs, rc) - if err != nil { - rs.Close() - rErr := os.Remove(rs.Name()) - if rErr != nil && !os.IsNotExist(rErr) { - logrus.Errorf("Failed to remove the logfile %q: %v", rs.Name(), rErr) - } - return nil, errors.Wrap(err, "error while copying decompressed log stream to file") - } - - return rs, nil -} - -func newSectionReader(f *os.File) (*io.SectionReader, error) { - // seek to the end to get the size - // we'll leave this at the end of the file since section reader does not advance the reader - size, err := f.Seek(0, os.SEEK_END) - if err != nil { - return nil, errors.Wrap(err, "error getting current file size") - } - return io.NewSectionReader(f, 0, size), nil -} - -type decodeFunc func() (*logger.Message, error) - -func tailFile(f io.ReadSeeker, watcher *logger.LogWatcher, createDecoder makeDecoderFunc, config logger.ReadConfig) { - var rdr io.Reader = f - if config.Tail > 0 { - ls, err := tailfile.TailFile(f, config.Tail) - if err != nil { - watcher.Err <- err - return - } - rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) - } - - decodeLogLine := createDecoder(rdr) - for { - msg, err := decodeLogLine() - if err != nil { - if errors.Cause(err) != io.EOF { - watcher.Err <- err - } - return - } - if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) { - continue - } - if !config.Until.IsZero() && msg.Timestamp.After(config.Until) { - return - } - select { - case <-watcher.WatchClose(): - return - case watcher.Msg <- msg: - } - } -} - -func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, createDecoder makeDecoderFunc, since, until time.Time) { - decodeLogLine := createDecoder(f) - - name := f.Name() - fileWatcher, err := watchFile(name) - if err != nil { - logWatcher.Err <- err - return - } - defer func() { - f.Close() - fileWatcher.Remove(name) - fileWatcher.Close() - }() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - go func() { - select { - case <-logWatcher.WatchClose(): - fileWatcher.Remove(name) - cancel() - case <-ctx.Done(): - return - } - }() - - var retries int - handleRotate := func() error { - f.Close() - fileWatcher.Remove(name) - - // retry when the file doesn't exist - for retries := 0; retries <= 5; retries++ { - f, err = os.Open(name) - if err == nil || !os.IsNotExist(err) { - break - } - } - if err != nil { - return err - } - if err := fileWatcher.Add(name); err != nil { - return err - } - decodeLogLine = createDecoder(f) - return nil - } - - errRetry := errors.New("retry") - errDone := errors.New("done") - waitRead := func() error { - select { - case e := <-fileWatcher.Events(): - switch e.Op { - case fsnotify.Write: - decodeLogLine = createDecoder(f) - return nil - case fsnotify.Rename, fsnotify.Remove: - select { - case <-notifyRotate: - case <-ctx.Done(): - return errDone - } - if err := handleRotate(); err != nil { - return err - } - return nil - } - return errRetry - case err := <-fileWatcher.Errors(): - logrus.Debug("logger got error watching file: %v", err) - // Something happened, let's try and stay alive and create a new watcher - if retries <= 5 { - fileWatcher.Close() - fileWatcher, err = watchFile(name) - if err != nil { - return err - } - retries++ - return errRetry - } - return err - case <-ctx.Done(): - return errDone - } - } - - handleDecodeErr := func(err error) error { - if errors.Cause(err) != io.EOF { - return err - } - - for { - err := waitRead() - if err == nil { - break - } - if err == errRetry { - continue - } - return err - } - return nil - } - - // main loop - for { - msg, err := decodeLogLine() - if err != nil { - if err := handleDecodeErr(err); err != nil { - if err == errDone { - return - } - // we got an unrecoverable error, so return - logWatcher.Err <- err - return - } - // ready to try again - continue - } - - retries = 0 // reset retries since we've succeeded - if !since.IsZero() && msg.Timestamp.Before(since) { - continue - } - if !until.IsZero() && msg.Timestamp.After(until) { - return - } - select { - case logWatcher.Msg <- msg: - case <-ctx.Done(): - logWatcher.Msg <- msg - for { - msg, err := decodeLogLine() - if err != nil { - return - } - if !since.IsZero() && msg.Timestamp.Before(since) { - continue - } - if !until.IsZero() && msg.Timestamp.After(until) { - return - } - logWatcher.Msg <- msg - } - } - } -} - -func watchFile(name string) (filenotify.FileWatcher, error) { - fileWatcher, err := filenotify.New() - if err != nil { - return nil, err - } - - logger := logrus.WithFields(logrus.Fields{ - "module": "logger", - "fille": name, - }) - - if err := fileWatcher.Add(name); err != nil { - logger.WithError(err).Warnf("falling back to file poller") - fileWatcher.Close() - fileWatcher = filenotify.NewPollingWatcher() - - if err := fileWatcher.Add(name); err != nil { - fileWatcher.Close() - logger.WithError(err).Debugf("error watching log file for modifications") - return nil, err - } - } - return fileWatcher, nil -} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/multireader/multireader.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/multireader/multireader.go deleted file mode 100644 index 77980a2a0..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/loggerutils/multireader/multireader.go +++ /dev/null @@ -1,212 +0,0 @@ -package multireader // import "github.com/docker/docker/daemon/logger/loggerutils/multireader" - -import ( - "bytes" - "fmt" - "io" - "os" -) - -type pos struct { - idx int - offset int64 -} - -type multiReadSeeker struct { - readers []io.ReadSeeker - pos *pos - posIdx map[io.ReadSeeker]int -} - -func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { - var tmpOffset int64 - switch whence { - case os.SEEK_SET: - for i, rdr := range r.readers { - // get size of the current reader - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - if offset > tmpOffset+s { - if i == len(r.readers)-1 { - rdrOffset := s + (offset - tmpOffset) - if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { - return -1, err - } - r.pos = &pos{i, rdrOffset} - return offset, nil - } - - tmpOffset += s - continue - } - - rdrOffset := offset - tmpOffset - idx := i - - if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { - return -1, err - } - // make sure all following readers are at 0 - for _, rdr := range r.readers[i+1:] { - rdr.Seek(0, os.SEEK_SET) - } - - if rdrOffset == s && i != len(r.readers)-1 { - idx++ - rdrOffset = 0 - } - r.pos = &pos{idx, rdrOffset} - return offset, nil - } - case os.SEEK_END: - for _, rdr := range r.readers { - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - tmpOffset += s - } - if _, err := r.Seek(tmpOffset+offset, os.SEEK_SET); err != nil { - return -1, err - } - return tmpOffset + offset, nil - case os.SEEK_CUR: - if r.pos == nil { - return r.Seek(offset, os.SEEK_SET) - } - // Just return the current offset - if offset == 0 { - return r.getCurOffset() - } - - curOffset, err := r.getCurOffset() - if err != nil { - return -1, err - } - rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) - if err != nil { - return -1, err - } - - r.pos = &pos{r.posIdx[rdr], rdrOffset} - return curOffset + offset, nil - default: - return -1, fmt.Errorf("Invalid whence: %d", whence) - } - - return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) -} - -func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { - - var offsetTo int64 - - for _, rdr := range r.readers { - size, err := getReadSeekerSize(rdr) - if err != nil { - return nil, -1, err - } - if offsetTo+size > offset { - return rdr, offset - offsetTo, nil - } - if rdr == r.readers[len(r.readers)-1] { - return rdr, offsetTo + offset, nil - } - offsetTo += size - } - - return nil, 0, nil -} - -func (r *multiReadSeeker) getCurOffset() (int64, error) { - var totalSize int64 - for _, rdr := range r.readers[:r.pos.idx+1] { - if r.posIdx[rdr] == r.pos.idx { - totalSize += r.pos.offset - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, fmt.Errorf("error getting seeker size: %v", err) - } - totalSize += size - } - return totalSize, nil -} - -func (r *multiReadSeeker) Read(b []byte) (int, error) { - if r.pos == nil { - // make sure all readers are at 0 - r.Seek(0, os.SEEK_SET) - } - - bLen := int64(len(b)) - buf := bytes.NewBuffer(nil) - var rdr io.ReadSeeker - - for _, rdr = range r.readers[r.pos.idx:] { - readBytes, err := io.CopyN(buf, rdr, bLen) - if err != nil && err != io.EOF { - return -1, err - } - bLen -= readBytes - - if bLen == 0 { - break - } - } - - rdrPos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - r.pos = &pos{r.posIdx[rdr], rdrPos} - return buf.Read(b) -} - -func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { - // save the current position - pos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - - // get the size - size, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - // reset the position - if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { - return -1, err - } - return size, nil -} - -// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided -// input readseekers. After calling this method the initial position is set to the -// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances -// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. -// Seek can be used over the sum of lengths of all readseekers. -// -// When a MultiReadSeeker is used, no Read and Seek operations should be made on -// its ReadSeeker components. Also, users should make no assumption on the state -// of individual readseekers while the MultiReadSeeker is used. -func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { - if len(readers) == 1 { - return readers[0] - } - idx := make(map[io.ReadSeeker]int) - for i, rdr := range readers { - idx[rdr] = i - } - return &multiReadSeeker{ - readers: readers, - posIdx: idx, - } -} diff --git a/vendor/github.com/docker/docker/daemon/logger/loginfo.go b/vendor/github.com/docker/docker/daemon/logger/loginfo.go deleted file mode 100644 index 4c48235f5..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/loginfo.go +++ /dev/null @@ -1,129 +0,0 @@ -package logger // import "github.com/docker/docker/daemon/logger" - -import ( - "fmt" - "os" - "regexp" - "strings" - "time" -) - -// Info provides enough information for a logging driver to do its function. -type Info struct { - Config map[string]string - ContainerID string - ContainerName string - ContainerEntrypoint string - ContainerArgs []string - ContainerImageID string - ContainerImageName string - ContainerCreated time.Time - ContainerEnv []string - ContainerLabels map[string]string - LogPath string - DaemonName string -} - -// ExtraAttributes returns the user-defined extra attributes (labels, -// environment variables) in key-value format. This can be used by log drivers -// that support metadata to add more context to a log. -func (info *Info) ExtraAttributes(keyMod func(string) string) (map[string]string, error) { - extra := make(map[string]string) - labels, ok := info.Config["labels"] - if ok && len(labels) > 0 { - for _, l := range strings.Split(labels, ",") { - if v, ok := info.ContainerLabels[l]; ok { - if keyMod != nil { - l = keyMod(l) - } - extra[l] = v - } - } - } - - envMapping := make(map[string]string) - for _, e := range info.ContainerEnv { - if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { - envMapping[kv[0]] = kv[1] - } - } - - env, ok := info.Config["env"] - if ok && len(env) > 0 { - for _, l := range strings.Split(env, ",") { - if v, ok := envMapping[l]; ok { - if keyMod != nil { - l = keyMod(l) - } - extra[l] = v - } - } - } - - envRegex, ok := info.Config["env-regex"] - if ok && len(envRegex) > 0 { - re, err := regexp.Compile(envRegex) - if err != nil { - return nil, err - } - for k, v := range envMapping { - if re.MatchString(k) { - if keyMod != nil { - k = keyMod(k) - } - extra[k] = v - } - } - } - - return extra, nil -} - -// Hostname returns the hostname from the underlying OS. -func (info *Info) Hostname() (string, error) { - hostname, err := os.Hostname() - if err != nil { - return "", fmt.Errorf("logger: can not resolve hostname: %v", err) - } - return hostname, nil -} - -// Command returns the command that the container being logged was -// started with. The Entrypoint is prepended to the container -// arguments. -func (info *Info) Command() string { - terms := []string{info.ContainerEntrypoint} - terms = append(terms, info.ContainerArgs...) - command := strings.Join(terms, " ") - return command -} - -// ID Returns the Container ID shortened to 12 characters. -func (info *Info) ID() string { - return info.ContainerID[:12] -} - -// FullID is an alias of ContainerID. -func (info *Info) FullID() string { - return info.ContainerID -} - -// Name returns the ContainerName without a preceding '/'. -func (info *Info) Name() string { - return strings.TrimPrefix(info.ContainerName, "/") -} - -// ImageID returns the ContainerImageID shortened to 12 characters. -func (info *Info) ImageID() string { - return info.ContainerImageID[:12] -} - -// ImageFullID is an alias of ContainerImageID. -func (info *Info) ImageFullID() string { - return info.ContainerImageID -} - -// ImageName is an alias of ContainerImageName -func (info *Info) ImageName() string { - return info.ContainerImageName -} diff --git a/vendor/github.com/docker/docker/daemon/logger/metrics.go b/vendor/github.com/docker/docker/daemon/logger/metrics.go deleted file mode 100644 index b7dfd38ec..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/metrics.go +++ /dev/null @@ -1,21 +0,0 @@ -package logger // import "github.com/docker/docker/daemon/logger" - -import ( - "github.com/docker/go-metrics" -) - -var ( - logWritesFailedCount metrics.Counter - logReadsFailedCount metrics.Counter - totalPartialLogs metrics.Counter -) - -func init() { - loggerMetrics := metrics.NewNamespace("logger", "", nil) - - logWritesFailedCount = loggerMetrics.NewCounter("log_write_operations_failed", "Number of log write operations that failed") - logReadsFailedCount = loggerMetrics.NewCounter("log_read_operations_failed", "Number of log reads from container stdio that failed") - totalPartialLogs = loggerMetrics.NewCounter("log_entries_size_greater_than_buffer", "Number of log entries which are larger than the log buffer") - - metrics.Register(loggerMetrics) -} diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin.go b/vendor/github.com/docker/docker/daemon/logger/plugin.go deleted file mode 100644 index c66540ce5..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/plugin.go +++ /dev/null @@ -1,116 +0,0 @@ -package logger // import "github.com/docker/docker/daemon/logger" - -import ( - "fmt" - "io" - "os" - "path/filepath" - - "github.com/docker/docker/api/types/plugins/logdriver" - "github.com/docker/docker/errdefs" - getter "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/plugins" - "github.com/docker/docker/pkg/stringid" - "github.com/pkg/errors" -) - -var pluginGetter getter.PluginGetter - -const extName = "LogDriver" - -// logPlugin defines the available functions that logging plugins must implement. -type logPlugin interface { - StartLogging(streamPath string, info Info) (err error) - StopLogging(streamPath string) (err error) - Capabilities() (cap Capability, err error) - ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error) -} - -// RegisterPluginGetter sets the plugingetter -func RegisterPluginGetter(plugingetter getter.PluginGetter) { - pluginGetter = plugingetter -} - -// GetDriver returns a logging driver by its name. -// If the driver is empty, it looks for the local driver. -func getPlugin(name string, mode int) (Creator, error) { - p, err := pluginGetter.Get(name, extName, mode) - if err != nil { - return nil, fmt.Errorf("error looking up logging plugin %s: %v", name, err) - } - - client, err := makePluginClient(p) - if err != nil { - return nil, err - } - return makePluginCreator(name, client, p.ScopedPath), nil -} - -func makePluginClient(p getter.CompatPlugin) (logPlugin, error) { - if pc, ok := p.(getter.PluginWithV1Client); ok { - return &logPluginProxy{pc.Client()}, nil - } - pa, ok := p.(getter.PluginAddr) - if !ok { - return nil, errdefs.System(errors.Errorf("got unknown plugin type %T", p)) - } - - if pa.Protocol() != plugins.ProtocolSchemeHTTPV1 { - return nil, errors.Errorf("plugin protocol not supported: %s", p) - } - - addr := pa.Addr() - c, err := plugins.NewClientWithTimeout(addr.Network()+"://"+addr.String(), nil, pa.Timeout()) - if err != nil { - return nil, errors.Wrap(err, "error making plugin client") - } - return &logPluginProxy{c}, nil -} - -func makePluginCreator(name string, l logPlugin, scopePath func(s string) string) Creator { - return func(logCtx Info) (logger Logger, err error) { - defer func() { - if err != nil { - pluginGetter.Get(name, extName, getter.Release) - } - }() - - unscopedPath := filepath.Join("/", "run", "docker", "logging") - logRoot := scopePath(unscopedPath) - if err := os.MkdirAll(logRoot, 0700); err != nil { - return nil, err - } - - id := stringid.GenerateNonCryptoID() - a := &pluginAdapter{ - driverName: name, - id: id, - plugin: l, - fifoPath: filepath.Join(logRoot, id), - logInfo: logCtx, - } - - cap, err := a.plugin.Capabilities() - if err == nil { - a.capabilities = cap - } - - stream, err := openPluginStream(a) - if err != nil { - return nil, err - } - - a.stream = stream - a.enc = logdriver.NewLogEntryEncoder(a.stream) - - if err := l.StartLogging(filepath.Join(unscopedPath, id), logCtx); err != nil { - return nil, errors.Wrapf(err, "error creating logger") - } - - if cap.ReadLogs { - return &pluginAdapterWithRead{a}, nil - } - - return a, nil - } -} diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go b/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go deleted file mode 100644 index e9a16af9b..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build linux freebsd - -package logger // import "github.com/docker/docker/daemon/logger" - -import ( - "context" - "io" - - "github.com/containerd/fifo" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { - // Make sure to also open with read (in addition to write) to avoid borken pipe errors on plugin failure. - // It is up to the plugin to keep track of pipes that it should re-attach to, however. - // If the plugin doesn't open for reads, then the container will block once the pipe is full. - f, err := fifo.OpenFifo(context.Background(), a.fifoPath, unix.O_RDWR|unix.O_CREAT|unix.O_NONBLOCK, 0700) - if err != nil { - return nil, errors.Wrapf(err, "error creating i/o pipe for log plugin: %s", a.Name()) - } - return f, nil -} diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go deleted file mode 100644 index 2ad47cc07..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux,!freebsd - -package logger // import "github.com/docker/docker/daemon/logger" - -import ( - "errors" - "io" -) - -func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { - return nil, errors.New("log plugin not supported") -} diff --git a/vendor/github.com/docker/docker/daemon/logger/proxy.go b/vendor/github.com/docker/docker/daemon/logger/proxy.go deleted file mode 100644 index 4a1c77810..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/proxy.go +++ /dev/null @@ -1,107 +0,0 @@ -package logger // import "github.com/docker/docker/daemon/logger" - -import ( - "errors" - "io" -) - -type client interface { - Call(string, interface{}, interface{}) error - Stream(string, interface{}) (io.ReadCloser, error) -} - -type logPluginProxy struct { - client -} - -type logPluginProxyStartLoggingRequest struct { - File string - Info Info -} - -type logPluginProxyStartLoggingResponse struct { - Err string -} - -func (pp *logPluginProxy) StartLogging(file string, info Info) (err error) { - var ( - req logPluginProxyStartLoggingRequest - ret logPluginProxyStartLoggingResponse - ) - - req.File = file - req.Info = info - if err = pp.Call("LogDriver.StartLogging", req, &ret); err != nil { - return - } - - if ret.Err != "" { - err = errors.New(ret.Err) - } - - return -} - -type logPluginProxyStopLoggingRequest struct { - File string -} - -type logPluginProxyStopLoggingResponse struct { - Err string -} - -func (pp *logPluginProxy) StopLogging(file string) (err error) { - var ( - req logPluginProxyStopLoggingRequest - ret logPluginProxyStopLoggingResponse - ) - - req.File = file - if err = pp.Call("LogDriver.StopLogging", req, &ret); err != nil { - return - } - - if ret.Err != "" { - err = errors.New(ret.Err) - } - - return -} - -type logPluginProxyCapabilitiesResponse struct { - Cap Capability - Err string -} - -func (pp *logPluginProxy) Capabilities() (cap Capability, err error) { - var ( - ret logPluginProxyCapabilitiesResponse - ) - - if err = pp.Call("LogDriver.Capabilities", nil, &ret); err != nil { - return - } - - cap = ret.Cap - - if ret.Err != "" { - err = errors.New(ret.Err) - } - - return -} - -type logPluginProxyReadLogsRequest struct { - Info Info - Config ReadConfig -} - -func (pp *logPluginProxy) ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error) { - var ( - req logPluginProxyReadLogsRequest - ) - - req.Info = info - req.Config = config - return pp.Stream("LogDriver.ReadLogs", req) -} diff --git a/vendor/github.com/docker/docker/daemon/logger/ring.go b/vendor/github.com/docker/docker/daemon/logger/ring.go deleted file mode 100644 index c675c1e83..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/ring.go +++ /dev/null @@ -1,223 +0,0 @@ -package logger // import "github.com/docker/docker/daemon/logger" - -import ( - "errors" - "sync" - "sync/atomic" - - "github.com/sirupsen/logrus" -) - -const ( - defaultRingMaxSize = 1e6 // 1MB -) - -// RingLogger is a ring buffer that implements the Logger interface. -// This is used when lossy logging is OK. -type RingLogger struct { - buffer *messageRing - l Logger - logInfo Info - closeFlag int32 -} - -type ringWithReader struct { - *RingLogger -} - -func (r *ringWithReader) ReadLogs(cfg ReadConfig) *LogWatcher { - reader, ok := r.l.(LogReader) - if !ok { - // something is wrong if we get here - panic("expected log reader") - } - return reader.ReadLogs(cfg) -} - -func newRingLogger(driver Logger, logInfo Info, maxSize int64) *RingLogger { - l := &RingLogger{ - buffer: newRing(maxSize), - l: driver, - logInfo: logInfo, - } - go l.run() - return l -} - -// NewRingLogger creates a new Logger that is implemented as a RingBuffer wrapping -// the passed in logger. -func NewRingLogger(driver Logger, logInfo Info, maxSize int64) Logger { - if maxSize < 0 { - maxSize = defaultRingMaxSize - } - l := newRingLogger(driver, logInfo, maxSize) - if _, ok := driver.(LogReader); ok { - return &ringWithReader{l} - } - return l -} - -// Log queues messages into the ring buffer -func (r *RingLogger) Log(msg *Message) error { - if r.closed() { - return errClosed - } - return r.buffer.Enqueue(msg) -} - -// Name returns the name of the underlying logger -func (r *RingLogger) Name() string { - return r.l.Name() -} - -func (r *RingLogger) closed() bool { - return atomic.LoadInt32(&r.closeFlag) == 1 -} - -func (r *RingLogger) setClosed() { - atomic.StoreInt32(&r.closeFlag, 1) -} - -// Close closes the logger -func (r *RingLogger) Close() error { - r.setClosed() - r.buffer.Close() - // empty out the queue - var logErr bool - for _, msg := range r.buffer.Drain() { - if logErr { - // some error logging a previous message, so re-insert to message pool - // and assume log driver is hosed - PutMessage(msg) - continue - } - - if err := r.l.Log(msg); err != nil { - logrus.WithField("driver", r.l.Name()). - WithField("container", r.logInfo.ContainerID). - WithError(err). - Errorf("Error writing log message") - logErr = true - } - } - return r.l.Close() -} - -// run consumes messages from the ring buffer and forwards them to the underling -// logger. -// This is run in a goroutine when the RingLogger is created -func (r *RingLogger) run() { - for { - if r.closed() { - return - } - msg, err := r.buffer.Dequeue() - if err != nil { - // buffer is closed - return - } - if err := r.l.Log(msg); err != nil { - logrus.WithField("driver", r.l.Name()). - WithField("container", r.logInfo.ContainerID). - WithError(err). - Errorf("Error writing log message") - } - } -} - -type messageRing struct { - mu sync.Mutex - // signals callers of `Dequeue` to wake up either on `Close` or when a new `Message` is added - wait *sync.Cond - - sizeBytes int64 // current buffer size - maxBytes int64 // max buffer size size - queue []*Message - closed bool -} - -func newRing(maxBytes int64) *messageRing { - queueSize := 1000 - if maxBytes == 0 || maxBytes == 1 { - // With 0 or 1 max byte size, the maximum size of the queue would only ever be 1 - // message long. - queueSize = 1 - } - - r := &messageRing{queue: make([]*Message, 0, queueSize), maxBytes: maxBytes} - r.wait = sync.NewCond(&r.mu) - return r -} - -// Enqueue adds a message to the buffer queue -// If the message is too big for the buffer it drops the new message. -// If there are no messages in the queue and the message is still too big, it adds the message anyway. -func (r *messageRing) Enqueue(m *Message) error { - mSize := int64(len(m.Line)) - - r.mu.Lock() - if r.closed { - r.mu.Unlock() - return errClosed - } - if mSize+r.sizeBytes > r.maxBytes && len(r.queue) > 0 { - r.wait.Signal() - r.mu.Unlock() - return nil - } - - r.queue = append(r.queue, m) - r.sizeBytes += mSize - r.wait.Signal() - r.mu.Unlock() - return nil -} - -// Dequeue pulls a message off the queue -// If there are no messages, it waits for one. -// If the buffer is closed, it will return immediately. -func (r *messageRing) Dequeue() (*Message, error) { - r.mu.Lock() - for len(r.queue) == 0 && !r.closed { - r.wait.Wait() - } - - if r.closed { - r.mu.Unlock() - return nil, errClosed - } - - msg := r.queue[0] - r.queue = r.queue[1:] - r.sizeBytes -= int64(len(msg.Line)) - r.mu.Unlock() - return msg, nil -} - -var errClosed = errors.New("closed") - -// Close closes the buffer ensuring no new messages can be added. -// Any callers waiting to dequeue a message will be woken up. -func (r *messageRing) Close() { - r.mu.Lock() - if r.closed { - r.mu.Unlock() - return - } - - r.closed = true - r.wait.Broadcast() - r.mu.Unlock() -} - -// Drain drains all messages from the queue. -// This can be used after `Close()` to get any remaining messages that were in queue. -func (r *messageRing) Drain() []*Message { - r.mu.Lock() - ls := make([]*Message, 0, len(r.queue)) - ls = append(ls, r.queue...) - r.sizeBytes = 0 - r.queue = r.queue[:0] - r.mu.Unlock() - return ls -} diff --git a/vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go b/vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go deleted file mode 100644 index 8756ffa3b..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/splunk/splunk.go +++ /dev/null @@ -1,649 +0,0 @@ -// Package splunk provides the log driver for forwarding server logs to -// Splunk HTTP Event Collector endpoint. -package splunk // import "github.com/docker/docker/daemon/logger/splunk" - -import ( - "bytes" - "compress/gzip" - "context" - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/urlutil" - "github.com/sirupsen/logrus" -) - -const ( - driverName = "splunk" - splunkURLKey = "splunk-url" - splunkTokenKey = "splunk-token" - splunkSourceKey = "splunk-source" - splunkSourceTypeKey = "splunk-sourcetype" - splunkIndexKey = "splunk-index" - splunkCAPathKey = "splunk-capath" - splunkCANameKey = "splunk-caname" - splunkInsecureSkipVerifyKey = "splunk-insecureskipverify" - splunkFormatKey = "splunk-format" - splunkVerifyConnectionKey = "splunk-verify-connection" - splunkGzipCompressionKey = "splunk-gzip" - splunkGzipCompressionLevelKey = "splunk-gzip-level" - envKey = "env" - envRegexKey = "env-regex" - labelsKey = "labels" - tagKey = "tag" -) - -const ( - // How often do we send messages (if we are not reaching batch size) - defaultPostMessagesFrequency = 5 * time.Second - // How big can be batch of messages - defaultPostMessagesBatchSize = 1000 - // Maximum number of messages we can store in buffer - defaultBufferMaximum = 10 * defaultPostMessagesBatchSize - // Number of messages allowed to be queued in the channel - defaultStreamChannelSize = 4 * defaultPostMessagesBatchSize - // maxResponseSize is the max amount that will be read from an http response - maxResponseSize = 1024 -) - -const ( - envVarPostMessagesFrequency = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_FREQUENCY" - envVarPostMessagesBatchSize = "SPLUNK_LOGGING_DRIVER_POST_MESSAGES_BATCH_SIZE" - envVarBufferMaximum = "SPLUNK_LOGGING_DRIVER_BUFFER_MAX" - envVarStreamChannelSize = "SPLUNK_LOGGING_DRIVER_CHANNEL_SIZE" -) - -var batchSendTimeout = 30 * time.Second - -type splunkLoggerInterface interface { - logger.Logger - worker() -} - -type splunkLogger struct { - client *http.Client - transport *http.Transport - - url string - auth string - nullMessage *splunkMessage - - // http compression - gzipCompression bool - gzipCompressionLevel int - - // Advanced options - postMessagesFrequency time.Duration - postMessagesBatchSize int - bufferMaximum int - - // For synchronization between background worker and logger. - // We use channel to send messages to worker go routine. - // All other variables for blocking Close call before we flush all messages to HEC - stream chan *splunkMessage - lock sync.RWMutex - closed bool - closedCond *sync.Cond -} - -type splunkLoggerInline struct { - *splunkLogger - - nullEvent *splunkMessageEvent -} - -type splunkLoggerJSON struct { - *splunkLoggerInline -} - -type splunkLoggerRaw struct { - *splunkLogger - - prefix []byte -} - -type splunkMessage struct { - Event interface{} `json:"event"` - Time string `json:"time"` - Host string `json:"host"` - Source string `json:"source,omitempty"` - SourceType string `json:"sourcetype,omitempty"` - Index string `json:"index,omitempty"` -} - -type splunkMessageEvent struct { - Line interface{} `json:"line"` - Source string `json:"source"` - Tag string `json:"tag,omitempty"` - Attrs map[string]string `json:"attrs,omitempty"` -} - -const ( - splunkFormatRaw = "raw" - splunkFormatJSON = "json" - splunkFormatInline = "inline" -) - -func init() { - if err := logger.RegisterLogDriver(driverName, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(driverName, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates splunk logger driver using configuration passed in context -func New(info logger.Info) (logger.Logger, error) { - hostname, err := info.Hostname() - if err != nil { - return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) - } - - // Parse and validate Splunk URL - splunkURL, err := parseURL(info) - if err != nil { - return nil, err - } - - // Splunk Token is required parameter - splunkToken, ok := info.Config[splunkTokenKey] - if !ok { - return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) - } - - tlsConfig := &tls.Config{} - - // Splunk is using autogenerated certificates by default, - // allow users to trust them with skipping verification - if insecureSkipVerifyStr, ok := info.Config[splunkInsecureSkipVerifyKey]; ok { - insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) - if err != nil { - return nil, err - } - tlsConfig.InsecureSkipVerify = insecureSkipVerify - } - - // If path to the root certificate is provided - load it - if caPath, ok := info.Config[splunkCAPathKey]; ok { - caCert, err := ioutil.ReadFile(caPath) - if err != nil { - return nil, err - } - caPool := x509.NewCertPool() - caPool.AppendCertsFromPEM(caCert) - tlsConfig.RootCAs = caPool - } - - if caName, ok := info.Config[splunkCANameKey]; ok { - tlsConfig.ServerName = caName - } - - gzipCompression := false - if gzipCompressionStr, ok := info.Config[splunkGzipCompressionKey]; ok { - gzipCompression, err = strconv.ParseBool(gzipCompressionStr) - if err != nil { - return nil, err - } - } - - gzipCompressionLevel := gzip.DefaultCompression - if gzipCompressionLevelStr, ok := info.Config[splunkGzipCompressionLevelKey]; ok { - var err error - gzipCompressionLevel64, err := strconv.ParseInt(gzipCompressionLevelStr, 10, 32) - if err != nil { - return nil, err - } - gzipCompressionLevel = int(gzipCompressionLevel64) - if gzipCompressionLevel < gzip.DefaultCompression || gzipCompressionLevel > gzip.BestCompression { - err := fmt.Errorf("not supported level '%s' for %s (supported values between %d and %d)", - gzipCompressionLevelStr, splunkGzipCompressionLevelKey, gzip.DefaultCompression, gzip.BestCompression) - return nil, err - } - } - - transport := &http.Transport{ - TLSClientConfig: tlsConfig, - Proxy: http.ProxyFromEnvironment, - } - client := &http.Client{ - Transport: transport, - } - - source := info.Config[splunkSourceKey] - sourceType := info.Config[splunkSourceTypeKey] - index := info.Config[splunkIndexKey] - - var nullMessage = &splunkMessage{ - Host: hostname, - Source: source, - SourceType: sourceType, - Index: index, - } - - // Allow user to remove tag from the messages by setting tag to empty string - tag := "" - if tagTemplate, ok := info.Config[tagKey]; !ok || tagTemplate != "" { - tag, err = loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) - if err != nil { - return nil, err - } - } - - attrs, err := info.ExtraAttributes(nil) - if err != nil { - return nil, err - } - - var ( - postMessagesFrequency = getAdvancedOptionDuration(envVarPostMessagesFrequency, defaultPostMessagesFrequency) - postMessagesBatchSize = getAdvancedOptionInt(envVarPostMessagesBatchSize, defaultPostMessagesBatchSize) - bufferMaximum = getAdvancedOptionInt(envVarBufferMaximum, defaultBufferMaximum) - streamChannelSize = getAdvancedOptionInt(envVarStreamChannelSize, defaultStreamChannelSize) - ) - - logger := &splunkLogger{ - client: client, - transport: transport, - url: splunkURL.String(), - auth: "Splunk " + splunkToken, - nullMessage: nullMessage, - gzipCompression: gzipCompression, - gzipCompressionLevel: gzipCompressionLevel, - stream: make(chan *splunkMessage, streamChannelSize), - postMessagesFrequency: postMessagesFrequency, - postMessagesBatchSize: postMessagesBatchSize, - bufferMaximum: bufferMaximum, - } - - // By default we verify connection, but we allow use to skip that - verifyConnection := true - if verifyConnectionStr, ok := info.Config[splunkVerifyConnectionKey]; ok { - var err error - verifyConnection, err = strconv.ParseBool(verifyConnectionStr) - if err != nil { - return nil, err - } - } - if verifyConnection { - err = verifySplunkConnection(logger) - if err != nil { - return nil, err - } - } - - var splunkFormat string - if splunkFormatParsed, ok := info.Config[splunkFormatKey]; ok { - switch splunkFormatParsed { - case splunkFormatInline: - case splunkFormatJSON: - case splunkFormatRaw: - default: - return nil, fmt.Errorf("Unknown format specified %s, supported formats are inline, json and raw", splunkFormat) - } - splunkFormat = splunkFormatParsed - } else { - splunkFormat = splunkFormatInline - } - - var loggerWrapper splunkLoggerInterface - - switch splunkFormat { - case splunkFormatInline: - nullEvent := &splunkMessageEvent{ - Tag: tag, - Attrs: attrs, - } - - loggerWrapper = &splunkLoggerInline{logger, nullEvent} - case splunkFormatJSON: - nullEvent := &splunkMessageEvent{ - Tag: tag, - Attrs: attrs, - } - - loggerWrapper = &splunkLoggerJSON{&splunkLoggerInline{logger, nullEvent}} - case splunkFormatRaw: - var prefix bytes.Buffer - if tag != "" { - prefix.WriteString(tag) - prefix.WriteString(" ") - } - for key, value := range attrs { - prefix.WriteString(key) - prefix.WriteString("=") - prefix.WriteString(value) - prefix.WriteString(" ") - } - - loggerWrapper = &splunkLoggerRaw{logger, prefix.Bytes()} - default: - return nil, fmt.Errorf("Unexpected format %s", splunkFormat) - } - - go loggerWrapper.worker() - - return loggerWrapper, nil -} - -func (l *splunkLoggerInline) Log(msg *logger.Message) error { - message := l.createSplunkMessage(msg) - - event := *l.nullEvent - event.Line = string(msg.Line) - event.Source = msg.Source - - message.Event = &event - logger.PutMessage(msg) - return l.queueMessageAsync(message) -} - -func (l *splunkLoggerJSON) Log(msg *logger.Message) error { - message := l.createSplunkMessage(msg) - event := *l.nullEvent - - var rawJSONMessage json.RawMessage - if err := json.Unmarshal(msg.Line, &rawJSONMessage); err == nil { - event.Line = &rawJSONMessage - } else { - event.Line = string(msg.Line) - } - - event.Source = msg.Source - - message.Event = &event - logger.PutMessage(msg) - return l.queueMessageAsync(message) -} - -func (l *splunkLoggerRaw) Log(msg *logger.Message) error { - // empty or whitespace-only messages are not accepted by HEC - if strings.TrimSpace(string(msg.Line)) == "" { - return nil - } - - message := l.createSplunkMessage(msg) - - message.Event = string(append(l.prefix, msg.Line...)) - logger.PutMessage(msg) - return l.queueMessageAsync(message) -} - -func (l *splunkLogger) queueMessageAsync(message *splunkMessage) error { - l.lock.RLock() - defer l.lock.RUnlock() - if l.closedCond != nil { - return fmt.Errorf("%s: driver is closed", driverName) - } - l.stream <- message - return nil -} - -func (l *splunkLogger) worker() { - timer := time.NewTicker(l.postMessagesFrequency) - var messages []*splunkMessage - for { - select { - case message, open := <-l.stream: - if !open { - l.postMessages(messages, true) - l.lock.Lock() - defer l.lock.Unlock() - l.transport.CloseIdleConnections() - l.closed = true - l.closedCond.Signal() - return - } - messages = append(messages, message) - // Only sending when we get exactly to the batch size, - // This also helps not to fire postMessages on every new message, - // when previous try failed. - if len(messages)%l.postMessagesBatchSize == 0 { - messages = l.postMessages(messages, false) - } - case <-timer.C: - messages = l.postMessages(messages, false) - } - } -} - -func (l *splunkLogger) postMessages(messages []*splunkMessage, lastChance bool) []*splunkMessage { - messagesLen := len(messages) - - ctx, cancel := context.WithTimeout(context.Background(), batchSendTimeout) - defer cancel() - - for i := 0; i < messagesLen; i += l.postMessagesBatchSize { - upperBound := i + l.postMessagesBatchSize - if upperBound > messagesLen { - upperBound = messagesLen - } - - if err := l.tryPostMessages(ctx, messages[i:upperBound]); err != nil { - logrus.WithError(err).WithField("module", "logger/splunk").Warn("Error while sending logs") - if messagesLen-i >= l.bufferMaximum || lastChance { - // If this is last chance - print them all to the daemon log - if lastChance { - upperBound = messagesLen - } - // Not all sent, but buffer has got to its maximum, let's log all messages - // we could not send and return buffer minus one batch size - for j := i; j < upperBound; j++ { - if jsonEvent, err := json.Marshal(messages[j]); err != nil { - logrus.Error(err) - } else { - logrus.Error(fmt.Errorf("Failed to send a message '%s'", string(jsonEvent))) - } - } - return messages[upperBound:messagesLen] - } - // Not all sent, returning buffer from where we have not sent messages - return messages[i:messagesLen] - } - } - // All sent, return empty buffer - return messages[:0] -} - -func (l *splunkLogger) tryPostMessages(ctx context.Context, messages []*splunkMessage) error { - if len(messages) == 0 { - return nil - } - var buffer bytes.Buffer - var writer io.Writer - var gzipWriter *gzip.Writer - var err error - // If gzip compression is enabled - create gzip writer with specified compression - // level. If gzip compression is disabled, use standard buffer as a writer - if l.gzipCompression { - gzipWriter, err = gzip.NewWriterLevel(&buffer, l.gzipCompressionLevel) - if err != nil { - return err - } - writer = gzipWriter - } else { - writer = &buffer - } - for _, message := range messages { - jsonEvent, err := json.Marshal(message) - if err != nil { - return err - } - if _, err := writer.Write(jsonEvent); err != nil { - return err - } - } - // If gzip compression is enabled, tell it, that we are done - if l.gzipCompression { - err = gzipWriter.Close() - if err != nil { - return err - } - } - req, err := http.NewRequest("POST", l.url, bytes.NewBuffer(buffer.Bytes())) - if err != nil { - return err - } - req = req.WithContext(ctx) - req.Header.Set("Authorization", l.auth) - // Tell if we are sending gzip compressed body - if l.gzipCompression { - req.Header.Set("Content-Encoding", "gzip") - } - resp, err := l.client.Do(req) - if err != nil { - return err - } - defer func() { - pools.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - }() - if resp.StatusCode != http.StatusOK { - rdr := io.LimitReader(resp.Body, maxResponseSize) - body, err := ioutil.ReadAll(rdr) - if err != nil { - return err - } - return fmt.Errorf("%s: failed to send event - %s - %s", driverName, resp.Status, string(body)) - } - return nil -} - -func (l *splunkLogger) Close() error { - l.lock.Lock() - defer l.lock.Unlock() - if l.closedCond == nil { - l.closedCond = sync.NewCond(&l.lock) - close(l.stream) - for !l.closed { - l.closedCond.Wait() - } - } - return nil -} - -func (l *splunkLogger) Name() string { - return driverName -} - -func (l *splunkLogger) createSplunkMessage(msg *logger.Message) *splunkMessage { - message := *l.nullMessage - message.Time = fmt.Sprintf("%f", float64(msg.Timestamp.UnixNano())/float64(time.Second)) - return &message -} - -// ValidateLogOpt looks for all supported by splunk driver options -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case splunkURLKey: - case splunkTokenKey: - case splunkSourceKey: - case splunkSourceTypeKey: - case splunkIndexKey: - case splunkCAPathKey: - case splunkCANameKey: - case splunkInsecureSkipVerifyKey: - case splunkFormatKey: - case splunkVerifyConnectionKey: - case splunkGzipCompressionKey: - case splunkGzipCompressionLevelKey: - case envKey: - case envRegexKey: - case labelsKey: - case tagKey: - default: - return fmt.Errorf("unknown log opt '%s' for %s log driver", key, driverName) - } - } - return nil -} - -func parseURL(info logger.Info) (*url.URL, error) { - splunkURLStr, ok := info.Config[splunkURLKey] - if !ok { - return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey) - } - - splunkURL, err := url.Parse(splunkURLStr) - if err != nil { - return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey) - } - - if !urlutil.IsURL(splunkURLStr) || - !splunkURL.IsAbs() || - (splunkURL.Path != "" && splunkURL.Path != "/") || - splunkURL.RawQuery != "" || - splunkURL.Fragment != "" { - return nil, fmt.Errorf("%s: expected format scheme://dns_name_or_ip:port for %s", driverName, splunkURLKey) - } - - splunkURL.Path = "/services/collector/event/1.0" - - return splunkURL, nil -} - -func verifySplunkConnection(l *splunkLogger) error { - req, err := http.NewRequest(http.MethodOptions, l.url, nil) - if err != nil { - return err - } - resp, err := l.client.Do(req) - if err != nil { - return err - } - defer func() { - pools.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - }() - - if resp.StatusCode != http.StatusOK { - rdr := io.LimitReader(resp.Body, maxResponseSize) - body, err := ioutil.ReadAll(rdr) - if err != nil { - return err - } - return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, resp.Status, string(body)) - } - return nil -} - -func getAdvancedOptionDuration(envName string, defaultValue time.Duration) time.Duration { - valueStr := os.Getenv(envName) - if valueStr == "" { - return defaultValue - } - parsedValue, err := time.ParseDuration(valueStr) - if err != nil { - logrus.Error(fmt.Sprintf("Failed to parse value of %s as duration. Using default %v. %v", envName, defaultValue, err)) - return defaultValue - } - return parsedValue -} - -func getAdvancedOptionInt(envName string, defaultValue int) int { - valueStr := os.Getenv(envName) - if valueStr == "" { - return defaultValue - } - parsedValue, err := strconv.ParseInt(valueStr, 10, 32) - if err != nil { - logrus.Error(fmt.Sprintf("Failed to parse value of %s as integer. Using default %d. %v", envName, defaultValue, err)) - return defaultValue - } - return int(parsedValue) -} diff --git a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go b/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go deleted file mode 100644 index 94bdee364..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/syslog/syslog.go +++ /dev/null @@ -1,266 +0,0 @@ -// Package syslog provides the logdriver for forwarding server logs to syslog endpoints. -package syslog // import "github.com/docker/docker/daemon/logger/syslog" - -import ( - "crypto/tls" - "errors" - "fmt" - "net" - "net/url" - "os" - "strconv" - "strings" - "time" - - syslog "github.com/RackSec/srslog" - - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/docker/pkg/urlutil" - "github.com/docker/go-connections/tlsconfig" - "github.com/sirupsen/logrus" -) - -const ( - name = "syslog" - secureProto = "tcp+tls" -) - -var facilities = map[string]syslog.Priority{ - "kern": syslog.LOG_KERN, - "user": syslog.LOG_USER, - "mail": syslog.LOG_MAIL, - "daemon": syslog.LOG_DAEMON, - "auth": syslog.LOG_AUTH, - "syslog": syslog.LOG_SYSLOG, - "lpr": syslog.LOG_LPR, - "news": syslog.LOG_NEWS, - "uucp": syslog.LOG_UUCP, - "cron": syslog.LOG_CRON, - "authpriv": syslog.LOG_AUTHPRIV, - "ftp": syslog.LOG_FTP, - "local0": syslog.LOG_LOCAL0, - "local1": syslog.LOG_LOCAL1, - "local2": syslog.LOG_LOCAL2, - "local3": syslog.LOG_LOCAL3, - "local4": syslog.LOG_LOCAL4, - "local5": syslog.LOG_LOCAL5, - "local6": syslog.LOG_LOCAL6, - "local7": syslog.LOG_LOCAL7, -} - -type syslogger struct { - writer *syslog.Writer -} - -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// rsyslog uses appname part of syslog message to fill in an %syslogtag% template -// attribute in rsyslog.conf. In order to be backward compatible to rfc3164 -// tag will be also used as an appname -func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { - timestamp := time.Now().Format(time.RFC3339) - pid := os.Getpid() - msg := fmt.Sprintf("<%d>%d %s %s %s %d %s - %s", - p, 1, timestamp, hostname, tag, pid, tag, content) - return msg -} - -// The timestamp field in rfc5424 is derived from rfc3339. Whereas rfc3339 makes allowances -// for multiple syntaxes, there are further restrictions in rfc5424, i.e., the maximum -// resolution is limited to "TIME-SECFRAC" which is 6 (microsecond resolution) -func rfc5424microformatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { - timestamp := time.Now().Format("2006-01-02T15:04:05.999999Z07:00") - pid := os.Getpid() - msg := fmt.Sprintf("<%d>%d %s %s %s %d %s - %s", - p, 1, timestamp, hostname, tag, pid, tag, content) - return msg -} - -// New creates a syslog logger using the configuration passed in on -// the context. Supported context configuration variables are -// syslog-address, syslog-facility, syslog-format. -func New(info logger.Info) (logger.Logger, error) { - tag, err := loggerutils.ParseLogTag(info, loggerutils.DefaultTemplate) - if err != nil { - return nil, err - } - - proto, address, err := parseAddress(info.Config["syslog-address"]) - if err != nil { - return nil, err - } - - facility, err := parseFacility(info.Config["syslog-facility"]) - if err != nil { - return nil, err - } - - syslogFormatter, syslogFramer, err := parseLogFormat(info.Config["syslog-format"], proto) - if err != nil { - return nil, err - } - - var log *syslog.Writer - if proto == secureProto { - tlsConfig, tlsErr := parseTLSConfig(info.Config) - if tlsErr != nil { - return nil, tlsErr - } - log, err = syslog.DialWithTLSConfig(proto, address, facility, tag, tlsConfig) - } else { - log, err = syslog.Dial(proto, address, facility, tag) - } - - if err != nil { - return nil, err - } - - log.SetFormatter(syslogFormatter) - log.SetFramer(syslogFramer) - - return &syslogger{ - writer: log, - }, nil -} - -func (s *syslogger) Log(msg *logger.Message) error { - line := string(msg.Line) - source := msg.Source - logger.PutMessage(msg) - if source == "stderr" { - return s.writer.Err(line) - } - return s.writer.Info(line) -} - -func (s *syslogger) Close() error { - return s.writer.Close() -} - -func (s *syslogger) Name() string { - return name -} - -func parseAddress(address string) (string, string, error) { - if address == "" { - return "", "", nil - } - if !urlutil.IsTransportURL(address) { - return "", "", fmt.Errorf("syslog-address should be in form proto://address, got %v", address) - } - url, err := url.Parse(address) - if err != nil { - return "", "", err - } - - // unix and unixgram socket validation - if url.Scheme == "unix" || url.Scheme == "unixgram" { - if _, err := os.Stat(url.Path); err != nil { - return "", "", err - } - return url.Scheme, url.Path, nil - } - - // here we process tcp|udp - host := url.Host - if _, _, err := net.SplitHostPort(host); err != nil { - if !strings.Contains(err.Error(), "missing port in address") { - return "", "", err - } - host = host + ":514" - } - - return url.Scheme, host, nil -} - -// ValidateLogOpt looks for syslog specific log options -// syslog-address, syslog-facility. -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case "env": - case "env-regex": - case "labels": - case "syslog-address": - case "syslog-facility": - case "syslog-tls-ca-cert": - case "syslog-tls-cert": - case "syslog-tls-key": - case "syslog-tls-skip-verify": - case "tag": - case "syslog-format": - default: - return fmt.Errorf("unknown log opt '%s' for syslog log driver", key) - } - } - if _, _, err := parseAddress(cfg["syslog-address"]); err != nil { - return err - } - if _, err := parseFacility(cfg["syslog-facility"]); err != nil { - return err - } - if _, _, err := parseLogFormat(cfg["syslog-format"], ""); err != nil { - return err - } - return nil -} - -func parseFacility(facility string) (syslog.Priority, error) { - if facility == "" { - return syslog.LOG_DAEMON, nil - } - - if syslogFacility, valid := facilities[facility]; valid { - return syslogFacility, nil - } - - fInt, err := strconv.Atoi(facility) - if err == nil && 0 <= fInt && fInt <= 23 { - return syslog.Priority(fInt << 3), nil - } - - return syslog.Priority(0), errors.New("invalid syslog facility") -} - -func parseTLSConfig(cfg map[string]string) (*tls.Config, error) { - _, skipVerify := cfg["syslog-tls-skip-verify"] - - opts := tlsconfig.Options{ - CAFile: cfg["syslog-tls-ca-cert"], - CertFile: cfg["syslog-tls-cert"], - KeyFile: cfg["syslog-tls-key"], - InsecureSkipVerify: skipVerify, - } - - return tlsconfig.Client(opts) -} - -func parseLogFormat(logFormat, proto string) (syslog.Formatter, syslog.Framer, error) { - switch logFormat { - case "": - return syslog.UnixFormatter, syslog.DefaultFramer, nil - case "rfc3164": - return syslog.RFC3164Formatter, syslog.DefaultFramer, nil - case "rfc5424": - if proto == secureProto { - return rfc5424formatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil - } - return rfc5424formatterWithAppNameAsTag, syslog.DefaultFramer, nil - case "rfc5424micro": - if proto == secureProto { - return rfc5424microformatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil - } - return rfc5424microformatterWithAppNameAsTag, syslog.DefaultFramer, nil - default: - return nil, nil, errors.New("Invalid syslog format") - } - -} diff --git a/vendor/github.com/docker/docker/daemon/logger/templates/templates.go b/vendor/github.com/docker/docker/daemon/logger/templates/templates.go deleted file mode 100644 index ab76d0f1c..000000000 --- a/vendor/github.com/docker/docker/daemon/logger/templates/templates.go +++ /dev/null @@ -1,50 +0,0 @@ -package templates // import "github.com/docker/docker/daemon/logger/templates" - -import ( - "bytes" - "encoding/json" - "strings" - "text/template" -) - -// basicFunctions are the set of initial -// functions provided to every template. -var basicFunctions = template.FuncMap{ - "json": func(v interface{}) string { - buf := &bytes.Buffer{} - enc := json.NewEncoder(buf) - enc.SetEscapeHTML(false) - enc.Encode(v) - // Remove the trailing new line added by the encoder - return strings.TrimSpace(buf.String()) - }, - "split": strings.Split, - "join": strings.Join, - "title": strings.Title, - "lower": strings.ToLower, - "upper": strings.ToUpper, - "pad": padWithSpace, - "truncate": truncateWithLength, -} - -// NewParse creates a new tagged template with the basic functions -// and parses the given format. -func NewParse(tag, format string) (*template.Template, error) { - return template.New(tag).Funcs(basicFunctions).Parse(format) -} - -// padWithSpace adds whitespace to the input if the input is non-empty -func padWithSpace(source string, prefix, suffix int) string { - if source == "" { - return source - } - return strings.Repeat(" ", prefix) + source + strings.Repeat(" ", suffix) -} - -// truncateWithLength truncates the source string up to the length provided by the input -func truncateWithLength(source string, length int) string { - if len(source) < length { - return source - } - return source[:length] -} diff --git a/vendor/github.com/docker/docker/daemon/logs.go b/vendor/github.com/docker/docker/daemon/logs.go deleted file mode 100644 index 37ca4caf6..000000000 --- a/vendor/github.com/docker/docker/daemon/logs.go +++ /dev/null @@ -1,209 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "strconv" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - containertypes "github.com/docker/docker/api/types/container" - timetypes "github.com/docker/docker/api/types/time" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// ContainerLogs copies the container's log channel to the channel provided in -// the config. If ContainerLogs returns an error, no messages have been copied. -// and the channel will be closed without data. -// -// if it returns nil, the config channel will be active and return log -// messages until it runs out or the context is canceled. -func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *types.ContainerLogsOptions) (messages <-chan *backend.LogMessage, isTTY bool, retErr error) { - lg := logrus.WithFields(logrus.Fields{ - "module": "daemon", - "method": "(*Daemon).ContainerLogs", - "container": containerName, - }) - - if !(config.ShowStdout || config.ShowStderr) { - return nil, false, errdefs.InvalidParameter(errors.New("You must choose at least one stream")) - } - container, err := daemon.GetContainer(containerName) - if err != nil { - return nil, false, err - } - - if container.RemovalInProgress || container.Dead { - return nil, false, errdefs.Conflict(errors.New("can not get logs from container which is dead or marked for removal")) - } - - if container.HostConfig.LogConfig.Type == "none" { - return nil, false, logger.ErrReadLogsNotSupported{} - } - - cLog, cLogCreated, err := daemon.getLogger(container) - if err != nil { - return nil, false, err - } - if cLogCreated { - defer func() { - if retErr != nil { - if err = cLog.Close(); err != nil { - logrus.Errorf("Error closing logger: %v", err) - } - } - }() - } - - logReader, ok := cLog.(logger.LogReader) - if !ok { - return nil, false, logger.ErrReadLogsNotSupported{} - } - - follow := config.Follow && !cLogCreated - tailLines, err := strconv.Atoi(config.Tail) - if err != nil { - tailLines = -1 - } - - var since time.Time - if config.Since != "" { - s, n, err := timetypes.ParseTimestamps(config.Since, 0) - if err != nil { - return nil, false, err - } - since = time.Unix(s, n) - } - - var until time.Time - if config.Until != "" && config.Until != "0" { - s, n, err := timetypes.ParseTimestamps(config.Until, 0) - if err != nil { - return nil, false, err - } - until = time.Unix(s, n) - } - - readConfig := logger.ReadConfig{ - Since: since, - Until: until, - Tail: tailLines, - Follow: follow, - } - - logs := logReader.ReadLogs(readConfig) - - // past this point, we can't possibly return any errors, so we can just - // start a goroutine and return to tell the caller not to expect errors - // (if the caller wants to give up on logs, they have to cancel the context) - // this goroutine functions as a shim between the logger and the caller. - messageChan := make(chan *backend.LogMessage, 1) - go func() { - if cLogCreated { - defer func() { - if err = cLog.Close(); err != nil { - logrus.Errorf("Error closing logger: %v", err) - } - }() - } - // set up some defers - defer logs.Close() - - // close the messages channel. closing is the only way to signal above - // that we're doing with logs (other than context cancel i guess). - defer close(messageChan) - - lg.Debug("begin logs") - for { - select { - // i do not believe as the system is currently designed any error - // is possible, but we should be prepared to handle it anyway. if - // we do get an error, copy only the error field to a new object so - // we don't end up with partial data in the other fields - case err := <-logs.Err: - lg.Errorf("Error streaming logs: %v", err) - select { - case <-ctx.Done(): - case messageChan <- &backend.LogMessage{Err: err}: - } - return - case <-ctx.Done(): - lg.Debugf("logs: end stream, ctx is done: %v", ctx.Err()) - return - case msg, ok := <-logs.Msg: - // there is some kind of pool or ring buffer in the logger that - // produces these messages, and a possible future optimization - // might be to use that pool and reuse message objects - if !ok { - lg.Debug("end logs") - return - } - m := msg.AsLogMessage() // just a pointer conversion, does not copy data - - // there could be a case where the reader stops accepting - // messages and the context is canceled. we need to check that - // here, or otherwise we risk blocking forever on the message - // send. - select { - case <-ctx.Done(): - return - case messageChan <- m: - } - } - } - }() - return messageChan, container.Config.Tty, nil -} - -func (daemon *Daemon) getLogger(container *container.Container) (l logger.Logger, created bool, err error) { - container.Lock() - if container.State.Running { - l = container.LogDriver - } - container.Unlock() - if l == nil { - created = true - l, err = container.StartLogger() - } - return -} - -// mergeLogConfig merges the daemon log config to the container's log config if the container's log driver is not specified. -func (daemon *Daemon) mergeAndVerifyLogConfig(cfg *containertypes.LogConfig) error { - if cfg.Type == "" { - cfg.Type = daemon.defaultLogConfig.Type - } - - if cfg.Config == nil { - cfg.Config = make(map[string]string) - } - - if cfg.Type == daemon.defaultLogConfig.Type { - for k, v := range daemon.defaultLogConfig.Config { - if _, ok := cfg.Config[k]; !ok { - cfg.Config[k] = v - } - } - } - - return logger.ValidateLogOpts(cfg.Type, cfg.Config) -} - -func (daemon *Daemon) setupDefaultLogConfig() error { - config := daemon.configStore - if len(config.LogConfig.Config) > 0 { - if err := logger.ValidateLogOpts(config.LogConfig.Type, config.LogConfig.Config); err != nil { - return errors.Wrap(err, "failed to set log opts") - } - } - daemon.defaultLogConfig = containertypes.LogConfig{ - Type: config.LogConfig.Type, - Config: config.LogConfig.Config, - } - logrus.Debugf("Using default logging driver %s", daemon.defaultLogConfig.Type) - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/metrics.go b/vendor/github.com/docker/docker/daemon/metrics.go deleted file mode 100644 index f6961a355..000000000 --- a/vendor/github.com/docker/docker/daemon/metrics.go +++ /dev/null @@ -1,192 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "sync" - - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/plugins" - "github.com/docker/go-metrics" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" -) - -const metricsPluginType = "MetricsCollector" - -var ( - containerActions metrics.LabeledTimer - networkActions metrics.LabeledTimer - engineInfo metrics.LabeledGauge - engineCpus metrics.Gauge - engineMemory metrics.Gauge - healthChecksCounter metrics.Counter - healthChecksFailedCounter metrics.Counter - - stateCtr *stateCounter -) - -func init() { - ns := metrics.NewNamespace("engine", "daemon", nil) - containerActions = ns.NewLabeledTimer("container_actions", "The number of seconds it takes to process each container action", "action") - for _, a := range []string{ - "start", - "changes", - "commit", - "create", - "delete", - } { - containerActions.WithValues(a).Update(0) - } - - networkActions = ns.NewLabeledTimer("network_actions", "The number of seconds it takes to process each network action", "action") - engineInfo = ns.NewLabeledGauge("engine", "The information related to the engine and the OS it is running on", metrics.Unit("info"), - "version", - "commit", - "architecture", - "graphdriver", - "kernel", "os", - "os_type", - "daemon_id", // ID is a randomly generated unique identifier (e.g. UUID4) - ) - engineCpus = ns.NewGauge("engine_cpus", "The number of cpus that the host system of the engine has", metrics.Unit("cpus")) - engineMemory = ns.NewGauge("engine_memory", "The number of bytes of memory that the host system of the engine has", metrics.Bytes) - healthChecksCounter = ns.NewCounter("health_checks", "The total number of health checks") - healthChecksFailedCounter = ns.NewCounter("health_checks_failed", "The total number of failed health checks") - - stateCtr = newStateCounter(ns.NewDesc("container_states", "The count of containers in various states", metrics.Unit("containers"), "state")) - ns.Add(stateCtr) - - metrics.Register(ns) -} - -type stateCounter struct { - mu sync.Mutex - states map[string]string - desc *prometheus.Desc -} - -func newStateCounter(desc *prometheus.Desc) *stateCounter { - return &stateCounter{ - states: make(map[string]string), - desc: desc, - } -} - -func (ctr *stateCounter) get() (running int, paused int, stopped int) { - ctr.mu.Lock() - defer ctr.mu.Unlock() - - states := map[string]int{ - "running": 0, - "paused": 0, - "stopped": 0, - } - for _, state := range ctr.states { - states[state]++ - } - return states["running"], states["paused"], states["stopped"] -} - -func (ctr *stateCounter) set(id, label string) { - ctr.mu.Lock() - ctr.states[id] = label - ctr.mu.Unlock() -} - -func (ctr *stateCounter) del(id string) { - ctr.mu.Lock() - delete(ctr.states, id) - ctr.mu.Unlock() -} - -func (ctr *stateCounter) Describe(ch chan<- *prometheus.Desc) { - ch <- ctr.desc -} - -func (ctr *stateCounter) Collect(ch chan<- prometheus.Metric) { - running, paused, stopped := ctr.get() - ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(running), "running") - ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(paused), "paused") - ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(stopped), "stopped") -} - -func (d *Daemon) cleanupMetricsPlugins() { - ls := d.PluginStore.GetAllManagedPluginsByCap(metricsPluginType) - var wg sync.WaitGroup - wg.Add(len(ls)) - - for _, plugin := range ls { - p := plugin - go func() { - defer wg.Done() - - adapter, err := makePluginAdapter(p) - if err != nil { - logrus.WithError(err).WithField("plugin", p.Name()).Error("Error creating metrics plugin adapater") - return - } - if err := adapter.StopMetrics(); err != nil { - logrus.WithError(err).WithField("plugin", p.Name()).Error("Error stopping plugin metrics collection") - } - }() - } - wg.Wait() - - if d.metricsPluginListener != nil { - d.metricsPluginListener.Close() - } -} - -type metricsPlugin interface { - StartMetrics() error - StopMetrics() error -} - -func makePluginAdapter(p plugingetter.CompatPlugin) (metricsPlugin, error) { // nolint: interfacer - if pc, ok := p.(plugingetter.PluginWithV1Client); ok { - return &metricsPluginAdapter{pc.Client(), p.Name()}, nil - } - - pa, ok := p.(plugingetter.PluginAddr) - if !ok { - return nil, errdefs.System(errors.Errorf("got unknown plugin type %T", p)) - } - - if pa.Protocol() != plugins.ProtocolSchemeHTTPV1 { - return nil, errors.Errorf("plugin protocol not supported: %s", pa.Protocol()) - } - - addr := pa.Addr() - client, err := plugins.NewClientWithTimeout(addr.Network()+"://"+addr.String(), nil, pa.Timeout()) - if err != nil { - return nil, errors.Wrap(err, "error creating metrics plugin client") - } - return &metricsPluginAdapter{client, p.Name()}, nil -} - -type metricsPluginAdapter struct { - c *plugins.Client - name string -} - -func (a *metricsPluginAdapter) StartMetrics() error { - type metricsPluginResponse struct { - Err string - } - var res metricsPluginResponse - if err := a.c.Call(metricsPluginType+".StartMetrics", nil, &res); err != nil { - return errors.Wrap(err, "could not start metrics plugin") - } - if res.Err != "" { - return errors.New(res.Err) - } - return nil -} - -func (a *metricsPluginAdapter) StopMetrics() error { - if err := a.c.Call(metricsPluginType+".StopMetrics", nil, nil); err != nil { - return errors.Wrap(err, "error stopping metrics collector") - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/metrics_unix.go b/vendor/github.com/docker/docker/daemon/metrics_unix.go deleted file mode 100644 index 452424e68..000000000 --- a/vendor/github.com/docker/docker/daemon/metrics_unix.go +++ /dev/null @@ -1,60 +0,0 @@ -// +build !windows - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "net" - "net/http" - "path/filepath" - - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/plugins" - "github.com/docker/docker/plugin" - "github.com/docker/go-metrics" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -func (daemon *Daemon) listenMetricsSock() (string, error) { - path := filepath.Join(daemon.configStore.ExecRoot, "metrics.sock") - unix.Unlink(path) - l, err := net.Listen("unix", path) - if err != nil { - return "", errors.Wrap(err, "error setting up metrics plugin listener") - } - - mux := http.NewServeMux() - mux.Handle("/metrics", metrics.Handler()) - go func() { - http.Serve(l, mux) - }() - daemon.metricsPluginListener = l - return path, nil -} - -func registerMetricsPluginCallback(store *plugin.Store, sockPath string) { - store.RegisterRuntimeOpt(metricsPluginType, func(s *specs.Spec) { - f := plugin.WithSpecMounts([]specs.Mount{ - {Type: "bind", Source: sockPath, Destination: "/run/docker/metrics.sock", Options: []string{"bind", "ro"}}, - }) - f(s) - }) - store.Handle(metricsPluginType, func(name string, client *plugins.Client) { - // Use lookup since nothing in the system can really reference it, no need - // to protect against removal - p, err := store.Get(name, metricsPluginType, plugingetter.Lookup) - if err != nil { - return - } - - adapter, err := makePluginAdapter(p) - if err != nil { - logrus.WithError(err).WithField("plugin", p.Name()).Error("Error creating plugin adapater") - } - if err := adapter.StartMetrics(); err != nil { - logrus.WithError(err).WithField("plugin", p.Name()).Error("Error starting metrics collector plugin") - } - }) -} diff --git a/vendor/github.com/docker/docker/daemon/metrics_unsupported.go b/vendor/github.com/docker/docker/daemon/metrics_unsupported.go deleted file mode 100644 index 653c77fc3..000000000 --- a/vendor/github.com/docker/docker/daemon/metrics_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build windows - -package daemon // import "github.com/docker/docker/daemon" - -import "github.com/docker/docker/pkg/plugingetter" - -func registerMetricsPluginCallback(getter plugingetter.PluginGetter, sockPath string) { -} - -func (daemon *Daemon) listenMetricsSock() (string, error) { - return "", nil -} diff --git a/vendor/github.com/docker/docker/daemon/monitor.go b/vendor/github.com/docker/docker/daemon/monitor.go deleted file mode 100644 index 5e740dd4f..000000000 --- a/vendor/github.com/docker/docker/daemon/monitor.go +++ /dev/null @@ -1,212 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "errors" - "fmt" - "runtime" - "strconv" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/restartmanager" - "github.com/sirupsen/logrus" -) - -func (daemon *Daemon) setStateCounter(c *container.Container) { - switch c.StateString() { - case "paused": - stateCtr.set(c.ID, "paused") - case "running": - stateCtr.set(c.ID, "running") - default: - stateCtr.set(c.ID, "stopped") - } -} - -// ProcessEvent is called by libcontainerd whenever an event occurs -func (daemon *Daemon) ProcessEvent(id string, e libcontainerd.EventType, ei libcontainerd.EventInfo) error { - c, err := daemon.GetContainer(id) - if c == nil || err != nil { - return fmt.Errorf("no such container: %s", id) - } - - switch e { - case libcontainerd.EventOOM: - // StateOOM is Linux specific and should never be hit on Windows - if runtime.GOOS == "windows" { - return errors.New("received StateOOM from libcontainerd on Windows. This should never happen") - } - - c.Lock() - defer c.Unlock() - daemon.updateHealthMonitor(c) - if err := c.CheckpointTo(daemon.containersReplica); err != nil { - return err - } - - daemon.LogContainerEvent(c, "oom") - case libcontainerd.EventExit: - if int(ei.Pid) == c.Pid { - c.Lock() - _, _, err := daemon.containerd.DeleteTask(context.Background(), c.ID) - if err != nil { - logrus.WithError(err).Warnf("failed to delete container %s from containerd", c.ID) - } - - c.StreamConfig.Wait() - c.Reset(false) - - exitStatus := container.ExitStatus{ - ExitCode: int(ei.ExitCode), - ExitedAt: ei.ExitedAt, - OOMKilled: ei.OOMKilled, - } - restart, wait, err := c.RestartManager().ShouldRestart(ei.ExitCode, daemon.IsShuttingDown() || c.HasBeenManuallyStopped, time.Since(c.StartedAt)) - if err == nil && restart { - c.RestartCount++ - c.SetRestarting(&exitStatus) - } else { - if ei.Error != nil { - c.SetError(ei.Error) - } - c.SetStopped(&exitStatus) - defer daemon.autoRemove(c) - } - defer c.Unlock() // needs to be called before autoRemove - - // cancel healthcheck here, they will be automatically - // restarted if/when the container is started again - daemon.stopHealthchecks(c) - attributes := map[string]string{ - "exitCode": strconv.Itoa(int(ei.ExitCode)), - } - daemon.LogContainerEventWithAttributes(c, "die", attributes) - daemon.Cleanup(c) - - if err == nil && restart { - go func() { - err := <-wait - if err == nil { - // daemon.netController is initialized when daemon is restoring containers. - // But containerStart will use daemon.netController segment. - // So to avoid panic at startup process, here must wait util daemon restore done. - daemon.waitForStartupDone() - if err = daemon.containerStart(c, "", "", false); err != nil { - logrus.Debugf("failed to restart container: %+v", err) - } - } - if err != nil { - c.Lock() - c.SetStopped(&exitStatus) - c.Unlock() - defer daemon.autoRemove(c) - if err != restartmanager.ErrRestartCanceled { - logrus.Errorf("restartmanger wait error: %+v", err) - } - } - }() - } - - daemon.setStateCounter(c) - return c.CheckpointTo(daemon.containersReplica) - } - - if execConfig := c.ExecCommands.Get(ei.ProcessID); execConfig != nil { - ec := int(ei.ExitCode) - execConfig.Lock() - defer execConfig.Unlock() - execConfig.ExitCode = &ec - execConfig.Running = false - execConfig.StreamConfig.Wait() - if err := execConfig.CloseStreams(); err != nil { - logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err) - } - - // remove the exec command from the container's store only and not the - // daemon's store so that the exec command can be inspected. - c.ExecCommands.Delete(execConfig.ID, execConfig.Pid) - attributes := map[string]string{ - "execID": execConfig.ID, - "exitCode": strconv.Itoa(ec), - } - daemon.LogContainerEventWithAttributes(c, "exec_die", attributes) - } else { - logrus.WithFields(logrus.Fields{ - "container": c.ID, - "exec-id": ei.ProcessID, - "exec-pid": ei.Pid, - }).Warnf("Ignoring Exit Event, no such exec command found") - } - case libcontainerd.EventStart: - c.Lock() - defer c.Unlock() - - // This is here to handle start not generated by docker - if !c.Running { - c.SetRunning(int(ei.Pid), false) - c.HasBeenManuallyStopped = false - c.HasBeenStartedBefore = true - daemon.setStateCounter(c) - - daemon.initHealthMonitor(c) - - if err := c.CheckpointTo(daemon.containersReplica); err != nil { - return err - } - daemon.LogContainerEvent(c, "start") - } - - case libcontainerd.EventPaused: - c.Lock() - defer c.Unlock() - - if !c.Paused { - c.Paused = true - daemon.setStateCounter(c) - daemon.updateHealthMonitor(c) - if err := c.CheckpointTo(daemon.containersReplica); err != nil { - return err - } - daemon.LogContainerEvent(c, "pause") - } - case libcontainerd.EventResumed: - c.Lock() - defer c.Unlock() - - if c.Paused { - c.Paused = false - daemon.setStateCounter(c) - daemon.updateHealthMonitor(c) - - if err := c.CheckpointTo(daemon.containersReplica); err != nil { - return err - } - daemon.LogContainerEvent(c, "unpause") - } - } - return nil -} - -func (daemon *Daemon) autoRemove(c *container.Container) { - c.Lock() - ar := c.HostConfig.AutoRemove - c.Unlock() - if !ar { - return - } - - var err error - if err = daemon.ContainerRm(c.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err == nil { - return - } - if c := daemon.containers.Get(c.ID); c == nil { - return - } - - if err != nil { - logrus.WithError(err).WithField("container", c.ID).Error("error removing container") - } -} diff --git a/vendor/github.com/docker/docker/daemon/mounts.go b/vendor/github.com/docker/docker/daemon/mounts.go deleted file mode 100644 index 383a38e7e..000000000 --- a/vendor/github.com/docker/docker/daemon/mounts.go +++ /dev/null @@ -1,55 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - "strings" - - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/container" - volumesservice "github.com/docker/docker/volume/service" -) - -func (daemon *Daemon) prepareMountPoints(container *container.Container) error { - for _, config := range container.MountPoints { - if err := daemon.lazyInitializeVolume(container.ID, config); err != nil { - return err - } - } - return nil -} - -func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error { - var rmErrors []string - ctx := context.TODO() - for _, m := range container.MountPoints { - if m.Type != mounttypes.TypeVolume || m.Volume == nil { - continue - } - daemon.volumes.Release(ctx, m.Volume.Name(), container.ID) - if !rm { - continue - } - - // Do not remove named mountpoints - // these are mountpoints specified like `docker run -v :/foo` - if m.Spec.Source != "" { - continue - } - - err := daemon.volumes.Remove(ctx, m.Volume.Name()) - // Ignore volume in use errors because having this - // volume being referenced by other container is - // not an error, but an implementation detail. - // This prevents docker from logging "ERROR: Volume in use" - // where there is another container using the volume. - if err != nil && !volumesservice.IsInUse(err) { - rmErrors = append(rmErrors, err.Error()) - } - } - - if len(rmErrors) > 0 { - return fmt.Errorf("Error removing volumes:\n%v", strings.Join(rmErrors, "\n")) - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/names.go b/vendor/github.com/docker/docker/daemon/names.go deleted file mode 100644 index 6c3194977..000000000 --- a/vendor/github.com/docker/docker/daemon/names.go +++ /dev/null @@ -1,113 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "strings" - - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/names" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/namesgenerator" - "github.com/docker/docker/pkg/stringid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var ( - validContainerNameChars = names.RestrictedNameChars - validContainerNamePattern = names.RestrictedNamePattern -) - -func (daemon *Daemon) registerName(container *container.Container) error { - if daemon.Exists(container.ID) { - return fmt.Errorf("Container is already loaded") - } - if err := validateID(container.ID); err != nil { - return err - } - if container.Name == "" { - name, err := daemon.generateNewName(container.ID) - if err != nil { - return err - } - container.Name = name - } - return daemon.containersReplica.ReserveName(container.Name, container.ID) -} - -func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { - var ( - err error - id = stringid.GenerateNonCryptoID() - ) - - if name == "" { - if name, err = daemon.generateNewName(id); err != nil { - return "", "", err - } - return id, name, nil - } - - if name, err = daemon.reserveName(id, name); err != nil { - return "", "", err - } - - return id, name, nil -} - -func (daemon *Daemon) reserveName(id, name string) (string, error) { - if !validContainerNamePattern.MatchString(strings.TrimPrefix(name, "/")) { - return "", errdefs.InvalidParameter(errors.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars)) - } - if name[0] != '/' { - name = "/" + name - } - - if err := daemon.containersReplica.ReserveName(name, id); err != nil { - if err == container.ErrNameReserved { - id, err := daemon.containersReplica.Snapshot().GetID(name) - if err != nil { - logrus.Errorf("got unexpected error while looking up reserved name: %v", err) - return "", err - } - return "", nameConflictError{id: id, name: name} - } - return "", errors.Wrapf(err, "error reserving name: %q", name) - } - return name, nil -} - -func (daemon *Daemon) releaseName(name string) { - daemon.containersReplica.ReleaseName(name) -} - -func (daemon *Daemon) generateNewName(id string) (string, error) { - var name string - for i := 0; i < 6; i++ { - name = namesgenerator.GetRandomName(i) - if name[0] != '/' { - name = "/" + name - } - - if err := daemon.containersReplica.ReserveName(name, id); err != nil { - if err == container.ErrNameReserved { - continue - } - return "", err - } - return name, nil - } - - name = "/" + stringid.TruncateID(id) - if err := daemon.containersReplica.ReserveName(name, id); err != nil { - return "", err - } - return name, nil -} - -func validateID(id string) error { - if id == "" { - return fmt.Errorf("Invalid empty id") - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/names/names.go b/vendor/github.com/docker/docker/daemon/names/names.go deleted file mode 100644 index 22bba53d6..000000000 --- a/vendor/github.com/docker/docker/daemon/names/names.go +++ /dev/null @@ -1,9 +0,0 @@ -package names // import "github.com/docker/docker/daemon/names" - -import "regexp" - -// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names. -const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` - -// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters. -var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`) diff --git a/vendor/github.com/docker/docker/daemon/network.go b/vendor/github.com/docker/docker/daemon/network.go deleted file mode 100644 index 4263409be..000000000 --- a/vendor/github.com/docker/docker/daemon/network.go +++ /dev/null @@ -1,918 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - "net" - "runtime" - "sort" - "strconv" - "strings" - "sync" - - "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/container" - clustertypes "github.com/docker/docker/daemon/cluster/provider" - internalnetwork "github.com/docker/docker/daemon/network" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/runconfig" - "github.com/docker/go-connections/nat" - "github.com/docker/libnetwork" - lncluster "github.com/docker/libnetwork/cluster" - "github.com/docker/libnetwork/driverapi" - "github.com/docker/libnetwork/ipamapi" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/options" - networktypes "github.com/docker/libnetwork/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// PredefinedNetworkError is returned when user tries to create predefined network that already exists. -type PredefinedNetworkError string - -func (pnr PredefinedNetworkError) Error() string { - return fmt.Sprintf("operation is not permitted on predefined %s network ", string(pnr)) -} - -// Forbidden denotes the type of this error -func (pnr PredefinedNetworkError) Forbidden() {} - -// NetworkControllerEnabled checks if the networking stack is enabled. -// This feature depends on OS primitives and it's disabled in systems like Windows. -func (daemon *Daemon) NetworkControllerEnabled() bool { - return daemon.netController != nil -} - -// FindNetwork returns a network based on: -// 1. Full ID -// 2. Full Name -// 3. Partial ID -// as long as there is no ambiguity -func (daemon *Daemon) FindNetwork(term string) (libnetwork.Network, error) { - listByFullName := []libnetwork.Network{} - listByPartialID := []libnetwork.Network{} - for _, nw := range daemon.getAllNetworks() { - if nw.ID() == term { - return nw, nil - } - if nw.Name() == term { - listByFullName = append(listByFullName, nw) - } - if strings.HasPrefix(nw.ID(), term) { - listByPartialID = append(listByPartialID, nw) - } - } - switch { - case len(listByFullName) == 1: - return listByFullName[0], nil - case len(listByFullName) > 1: - return nil, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found on name)", term, len(listByFullName))) - case len(listByPartialID) == 1: - return listByPartialID[0], nil - case len(listByPartialID) > 1: - return nil, errdefs.InvalidParameter(errors.Errorf("network %s is ambiguous (%d matches found based on ID prefix)", term, len(listByPartialID))) - } - - // Be very careful to change the error type here, the - // libnetwork.ErrNoSuchNetwork error is used by the controller - // to retry the creation of the network as managed through the swarm manager - return nil, errdefs.NotFound(libnetwork.ErrNoSuchNetwork(term)) -} - -// GetNetworkByID function returns a network whose ID matches the given ID. -// It fails with an error if no matching network is found. -func (daemon *Daemon) GetNetworkByID(id string) (libnetwork.Network, error) { - c := daemon.netController - if c == nil { - return nil, libnetwork.ErrNoSuchNetwork(id) - } - return c.NetworkByID(id) -} - -// GetNetworkByName function returns a network for a given network name. -// If no network name is given, the default network is returned. -func (daemon *Daemon) GetNetworkByName(name string) (libnetwork.Network, error) { - c := daemon.netController - if c == nil { - return nil, libnetwork.ErrNoSuchNetwork(name) - } - if name == "" { - name = c.Config().Daemon.DefaultNetwork - } - return c.NetworkByName(name) -} - -// GetNetworksByIDPrefix returns a list of networks whose ID partially matches zero or more networks -func (daemon *Daemon) GetNetworksByIDPrefix(partialID string) []libnetwork.Network { - c := daemon.netController - if c == nil { - return nil - } - list := []libnetwork.Network{} - l := func(nw libnetwork.Network) bool { - if strings.HasPrefix(nw.ID(), partialID) { - list = append(list, nw) - } - return false - } - c.WalkNetworks(l) - - return list -} - -// getAllNetworks returns a list containing all networks -func (daemon *Daemon) getAllNetworks() []libnetwork.Network { - c := daemon.netController - if c == nil { - return nil - } - return c.Networks() -} - -type ingressJob struct { - create *clustertypes.NetworkCreateRequest - ip net.IP - jobDone chan struct{} -} - -var ( - ingressWorkerOnce sync.Once - ingressJobsChannel chan *ingressJob - ingressID string -) - -func (daemon *Daemon) startIngressWorker() { - ingressJobsChannel = make(chan *ingressJob, 100) - go func() { - // nolint: gosimple - for { - select { - case r := <-ingressJobsChannel: - if r.create != nil { - daemon.setupIngress(r.create, r.ip, ingressID) - ingressID = r.create.ID - } else { - daemon.releaseIngress(ingressID) - ingressID = "" - } - close(r.jobDone) - } - } - }() -} - -// enqueueIngressJob adds a ingress add/rm request to the worker queue. -// It guarantees the worker is started. -func (daemon *Daemon) enqueueIngressJob(job *ingressJob) { - ingressWorkerOnce.Do(daemon.startIngressWorker) - ingressJobsChannel <- job -} - -// SetupIngress setups ingress networking. -// The function returns a channel which will signal the caller when the programming is completed. -func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) (<-chan struct{}, error) { - ip, _, err := net.ParseCIDR(nodeIP) - if err != nil { - return nil, err - } - done := make(chan struct{}) - daemon.enqueueIngressJob(&ingressJob{&create, ip, done}) - return done, nil -} - -// ReleaseIngress releases the ingress networking. -// The function returns a channel which will signal the caller when the programming is completed. -func (daemon *Daemon) ReleaseIngress() (<-chan struct{}, error) { - done := make(chan struct{}) - daemon.enqueueIngressJob(&ingressJob{nil, nil, done}) - return done, nil -} - -func (daemon *Daemon) setupIngress(create *clustertypes.NetworkCreateRequest, ip net.IP, staleID string) { - controller := daemon.netController - controller.AgentInitWait() - - if staleID != "" && staleID != create.ID { - daemon.releaseIngress(staleID) - } - - if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil { - // If it is any other error other than already - // exists error log error and return. - if _, ok := err.(libnetwork.NetworkNameError); !ok { - logrus.Errorf("Failed creating ingress network: %v", err) - return - } - // Otherwise continue down the call to create or recreate sandbox. - } - - _, err := daemon.GetNetworkByID(create.ID) - if err != nil { - logrus.Errorf("Failed getting ingress network by id after creating: %v", err) - } -} - -func (daemon *Daemon) releaseIngress(id string) { - controller := daemon.netController - - if id == "" { - return - } - - n, err := controller.NetworkByID(id) - if err != nil { - logrus.Errorf("failed to retrieve ingress network %s: %v", id, err) - return - } - - daemon.deleteLoadBalancerSandbox(n) - - if err := n.Delete(); err != nil { - logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err) - return - } -} - -// SetNetworkBootstrapKeys sets the bootstrap keys. -func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error { - err := daemon.netController.SetKeys(keys) - if err == nil { - // Upon successful key setting dispatch the keys available event - daemon.cluster.SendClusterEvent(lncluster.EventNetworkKeysAvailable) - } - return err -} - -// UpdateAttachment notifies the attacher about the attachment config. -func (daemon *Daemon) UpdateAttachment(networkName, networkID, containerID string, config *network.NetworkingConfig) error { - if daemon.clusterProvider == nil { - return fmt.Errorf("cluster provider is not initialized") - } - - if err := daemon.clusterProvider.UpdateAttachment(networkName, containerID, config); err != nil { - return daemon.clusterProvider.UpdateAttachment(networkID, containerID, config) - } - - return nil -} - -// WaitForDetachment makes the cluster manager wait for detachment of -// the container from the network. -func (daemon *Daemon) WaitForDetachment(ctx context.Context, networkName, networkID, taskID, containerID string) error { - if daemon.clusterProvider == nil { - return fmt.Errorf("cluster provider is not initialized") - } - - return daemon.clusterProvider.WaitForDetachment(ctx, networkName, networkID, taskID, containerID) -} - -// CreateManagedNetwork creates an agent network. -func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error { - _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true) - return err -} - -// CreateNetwork creates a network with the given name, driver and other optional parameters -func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) { - resp, err := daemon.createNetwork(create, "", false) - if err != nil { - return nil, err - } - return resp, err -} - -func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) { - if runconfig.IsPreDefinedNetwork(create.Name) { - return nil, PredefinedNetworkError(create.Name) - } - - var warning string - nw, err := daemon.GetNetworkByName(create.Name) - if err != nil { - if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { - return nil, err - } - } - if nw != nil { - // check if user defined CheckDuplicate, if set true, return err - // otherwise prepare a warning message - if create.CheckDuplicate { - if !agent || nw.Info().Dynamic() { - return nil, libnetwork.NetworkNameError(create.Name) - } - } - warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID()) - } - - c := daemon.netController - driver := create.Driver - if driver == "" { - driver = c.Config().Daemon.DefaultDriver - } - - nwOptions := []libnetwork.NetworkOption{ - libnetwork.NetworkOptionEnableIPv6(create.EnableIPv6), - libnetwork.NetworkOptionDriverOpts(create.Options), - libnetwork.NetworkOptionLabels(create.Labels), - libnetwork.NetworkOptionAttachable(create.Attachable), - libnetwork.NetworkOptionIngress(create.Ingress), - libnetwork.NetworkOptionScope(create.Scope), - } - - if create.ConfigOnly { - nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigOnly()) - } - - if create.IPAM != nil { - ipam := create.IPAM - v4Conf, v6Conf, err := getIpamConfig(ipam.Config) - if err != nil { - return nil, err - } - nwOptions = append(nwOptions, libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options)) - } - - if create.Internal { - nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork()) - } - if agent { - nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic()) - nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false)) - } - - if create.ConfigFrom != nil { - nwOptions = append(nwOptions, libnetwork.NetworkOptionConfigFrom(create.ConfigFrom.Network)) - } - - if agent && driver == "overlay" && (create.Ingress || runtime.GOOS == "windows") { - nodeIP, exists := daemon.GetAttachmentStore().GetIPForNetwork(id) - if !exists { - return nil, fmt.Errorf("Failed to find a load balancer IP to use for network: %v", id) - } - - nwOptions = append(nwOptions, libnetwork.NetworkOptionLBEndpoint(nodeIP)) - } - - n, err := c.NewNetwork(driver, create.Name, id, nwOptions...) - if err != nil { - if _, ok := err.(libnetwork.ErrDataStoreNotInitialized); ok { - // nolint: golint - return nil, errors.New("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") - } - return nil, err - } - - daemon.pluginRefCount(driver, driverapi.NetworkPluginEndpointType, plugingetter.Acquire) - if create.IPAM != nil { - daemon.pluginRefCount(create.IPAM.Driver, ipamapi.PluginEndpointType, plugingetter.Acquire) - } - daemon.LogNetworkEvent(n, "create") - - return &types.NetworkCreateResponse{ - ID: n.ID(), - Warning: warning, - }, nil -} - -func (daemon *Daemon) pluginRefCount(driver, capability string, mode int) { - var builtinDrivers []string - - if capability == driverapi.NetworkPluginEndpointType { - builtinDrivers = daemon.netController.BuiltinDrivers() - } else if capability == ipamapi.PluginEndpointType { - builtinDrivers = daemon.netController.BuiltinIPAMDrivers() - } - - for _, d := range builtinDrivers { - if d == driver { - return - } - } - - if daemon.PluginStore != nil { - _, err := daemon.PluginStore.Get(driver, capability, mode) - if err != nil { - logrus.WithError(err).WithFields(logrus.Fields{"mode": mode, "driver": driver}).Error("Error handling plugin refcount operation") - } - } -} - -func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnetwork.IpamConf, error) { - ipamV4Cfg := []*libnetwork.IpamConf{} - ipamV6Cfg := []*libnetwork.IpamConf{} - for _, d := range data { - iCfg := libnetwork.IpamConf{} - iCfg.PreferredPool = d.Subnet - iCfg.SubPool = d.IPRange - iCfg.Gateway = d.Gateway - iCfg.AuxAddresses = d.AuxAddress - ip, _, err := net.ParseCIDR(d.Subnet) - if err != nil { - return nil, nil, fmt.Errorf("Invalid subnet %s : %v", d.Subnet, err) - } - if ip.To4() != nil { - ipamV4Cfg = append(ipamV4Cfg, &iCfg) - } else { - ipamV6Cfg = append(ipamV6Cfg, &iCfg) - } - } - return ipamV4Cfg, ipamV6Cfg, nil -} - -// UpdateContainerServiceConfig updates a service configuration. -func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error { - container, err := daemon.GetContainer(containerName) - if err != nil { - return err - } - - container.NetworkSettings.Service = serviceConfig - return nil -} - -// ConnectContainerToNetwork connects the given container to the given -// network. If either cannot be found, an err is returned. If the -// network cannot be set up, an err is returned. -func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error { - container, err := daemon.GetContainer(containerName) - if err != nil { - return err - } - return daemon.ConnectToNetwork(container, networkName, endpointConfig) -} - -// DisconnectContainerFromNetwork disconnects the given container from -// the given network. If either cannot be found, an err is returned. -func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error { - container, err := daemon.GetContainer(containerName) - if err != nil { - if force { - return daemon.ForceEndpointDelete(containerName, networkName) - } - return err - } - return daemon.DisconnectFromNetwork(container, networkName, force) -} - -// GetNetworkDriverList returns the list of plugins drivers -// registered for network. -func (daemon *Daemon) GetNetworkDriverList() []string { - if !daemon.NetworkControllerEnabled() { - return nil - } - - pluginList := daemon.netController.BuiltinDrivers() - - managedPlugins := daemon.PluginStore.GetAllManagedPluginsByCap(driverapi.NetworkPluginEndpointType) - - for _, plugin := range managedPlugins { - pluginList = append(pluginList, plugin.Name()) - } - - pluginMap := make(map[string]bool) - for _, plugin := range pluginList { - pluginMap[plugin] = true - } - - networks := daemon.netController.Networks() - - for _, network := range networks { - if !pluginMap[network.Type()] { - pluginList = append(pluginList, network.Type()) - pluginMap[network.Type()] = true - } - } - - sort.Strings(pluginList) - - return pluginList -} - -// DeleteManagedNetwork deletes an agent network. -// The requirement of networkID is enforced. -func (daemon *Daemon) DeleteManagedNetwork(networkID string) error { - n, err := daemon.GetNetworkByID(networkID) - if err != nil { - return err - } - return daemon.deleteNetwork(n, true) -} - -// DeleteNetwork destroys a network unless it's one of docker's predefined networks. -func (daemon *Daemon) DeleteNetwork(networkID string) error { - n, err := daemon.GetNetworkByID(networkID) - if err != nil { - return err - } - return daemon.deleteNetwork(n, false) -} - -func (daemon *Daemon) deleteLoadBalancerSandbox(n libnetwork.Network) { - controller := daemon.netController - - //The only endpoint left should be the LB endpoint (nw.Name() + "-endpoint") - endpoints := n.Endpoints() - if len(endpoints) == 1 { - sandboxName := n.Name() + "-sbox" - - info := endpoints[0].Info() - if info != nil { - sb := info.Sandbox() - if sb != nil { - if err := sb.DisableService(); err != nil { - logrus.Warnf("Failed to disable service on sandbox %s: %v", sandboxName, err) - //Ignore error and attempt to delete the load balancer endpoint - } - } - } - - if err := endpoints[0].Delete(true); err != nil { - logrus.Warnf("Failed to delete endpoint %s (%s) in %s: %v", endpoints[0].Name(), endpoints[0].ID(), sandboxName, err) - //Ignore error and attempt to delete the sandbox. - } - - if err := controller.SandboxDestroy(sandboxName); err != nil { - logrus.Warnf("Failed to delete %s sandbox: %v", sandboxName, err) - //Ignore error and attempt to delete the network. - } - } -} - -func (daemon *Daemon) deleteNetwork(nw libnetwork.Network, dynamic bool) error { - if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic { - err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name()) - return errdefs.Forbidden(err) - } - - if dynamic && !nw.Info().Dynamic() { - if runconfig.IsPreDefinedNetwork(nw.Name()) { - // Predefined networks now support swarm services. Make this - // a no-op when cluster requests to remove the predefined network. - return nil - } - err := fmt.Errorf("%s is not a dynamic network", nw.Name()) - return errdefs.Forbidden(err) - } - - if err := nw.Delete(); err != nil { - return err - } - - // If this is not a configuration only network, we need to - // update the corresponding remote drivers' reference counts - if !nw.Info().ConfigOnly() { - daemon.pluginRefCount(nw.Type(), driverapi.NetworkPluginEndpointType, plugingetter.Release) - ipamType, _, _, _ := nw.Info().IpamConfig() - daemon.pluginRefCount(ipamType, ipamapi.PluginEndpointType, plugingetter.Release) - daemon.LogNetworkEvent(nw, "destroy") - } - - return nil -} - -// GetNetworks returns a list of all networks -func (daemon *Daemon) GetNetworks() []libnetwork.Network { - return daemon.getAllNetworks() -} - -// clearAttachableNetworks removes the attachable networks -// after disconnecting any connected container -func (daemon *Daemon) clearAttachableNetworks() { - for _, n := range daemon.getAllNetworks() { - if !n.Info().Attachable() { - continue - } - for _, ep := range n.Endpoints() { - epInfo := ep.Info() - if epInfo == nil { - continue - } - sb := epInfo.Sandbox() - if sb == nil { - continue - } - containerID := sb.ContainerID() - if err := daemon.DisconnectContainerFromNetwork(containerID, n.ID(), true); err != nil { - logrus.Warnf("Failed to disconnect container %s from swarm network %s on cluster leave: %v", - containerID, n.Name(), err) - } - } - if err := daemon.DeleteManagedNetwork(n.ID()); err != nil { - logrus.Warnf("Failed to remove swarm network %s on cluster leave: %v", n.Name(), err) - } - } -} - -// buildCreateEndpointOptions builds endpoint options from a given network. -func buildCreateEndpointOptions(c *container.Container, n libnetwork.Network, epConfig *network.EndpointSettings, sb libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) { - var ( - bindings = make(nat.PortMap) - pbList []networktypes.PortBinding - exposeList []networktypes.TransportPort - createOptions []libnetwork.EndpointOption - ) - - defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() - - if (!c.EnableServiceDiscoveryOnDefaultNetwork() && n.Name() == defaultNetName) || - c.NetworkSettings.IsAnonymousEndpoint { - createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) - } - - if epConfig != nil { - ipam := epConfig.IPAMConfig - - if ipam != nil { - var ( - ipList []net.IP - ip, ip6, linkip net.IP - ) - - for _, ips := range ipam.LinkLocalIPs { - if linkip = net.ParseIP(ips); linkip == nil && ips != "" { - return nil, errors.Errorf("Invalid link-local IP address: %s", ipam.LinkLocalIPs) - } - ipList = append(ipList, linkip) - - } - - if ip = net.ParseIP(ipam.IPv4Address); ip == nil && ipam.IPv4Address != "" { - return nil, errors.Errorf("Invalid IPv4 address: %s)", ipam.IPv4Address) - } - - if ip6 = net.ParseIP(ipam.IPv6Address); ip6 == nil && ipam.IPv6Address != "" { - return nil, errors.Errorf("Invalid IPv6 address: %s)", ipam.IPv6Address) - } - - createOptions = append(createOptions, - libnetwork.CreateOptionIpam(ip, ip6, ipList, nil)) - - } - - for _, alias := range epConfig.Aliases { - createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias)) - } - for k, v := range epConfig.DriverOpts { - createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v})) - } - } - - if c.NetworkSettings.Service != nil { - svcCfg := c.NetworkSettings.Service - - var vip string - if svcCfg.VirtualAddresses[n.ID()] != nil { - vip = svcCfg.VirtualAddresses[n.ID()].IPv4 - } - - var portConfigs []*libnetwork.PortConfig - for _, portConfig := range svcCfg.ExposedPorts { - portConfigs = append(portConfigs, &libnetwork.PortConfig{ - Name: portConfig.Name, - Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol), - TargetPort: portConfig.TargetPort, - PublishedPort: portConfig.PublishedPort, - }) - } - - createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()])) - } - - if !containertypes.NetworkMode(n.Name()).IsUserDefined() { - createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution()) - } - - // configs that are applicable only for the endpoint in the network - // to which container was connected to on docker run. - // Ideally all these network-specific endpoint configurations must be moved under - // container.NetworkSettings.Networks[n.Name()] - if n.Name() == c.HostConfig.NetworkMode.NetworkName() || - (n.Name() == defaultNetName && c.HostConfig.NetworkMode.IsDefault()) { - if c.Config.MacAddress != "" { - mac, err := net.ParseMAC(c.Config.MacAddress) - if err != nil { - return nil, err - } - - genericOption := options.Generic{ - netlabel.MacAddress: mac, - } - - createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) - } - - } - - // Port-mapping rules belong to the container & applicable only to non-internal networks - portmaps := getSandboxPortMapInfo(sb) - if n.Info().Internal() || len(portmaps) > 0 { - return createOptions, nil - } - - if c.HostConfig.PortBindings != nil { - for p, b := range c.HostConfig.PortBindings { - bindings[p] = []nat.PortBinding{} - for _, bb := range b { - bindings[p] = append(bindings[p], nat.PortBinding{ - HostIP: bb.HostIP, - HostPort: bb.HostPort, - }) - } - } - } - - portSpecs := c.Config.ExposedPorts - ports := make([]nat.Port, len(portSpecs)) - var i int - for p := range portSpecs { - ports[i] = p - i++ - } - nat.SortPortMap(ports, bindings) - for _, port := range ports { - expose := networktypes.TransportPort{} - expose.Proto = networktypes.ParseProtocol(port.Proto()) - expose.Port = uint16(port.Int()) - exposeList = append(exposeList, expose) - - pb := networktypes.PortBinding{Port: expose.Port, Proto: expose.Proto} - binding := bindings[port] - for i := 0; i < len(binding); i++ { - pbCopy := pb.GetCopy() - newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) - var portStart, portEnd int - if err == nil { - portStart, portEnd, err = newP.Range() - } - if err != nil { - return nil, errors.Wrapf(err, "Error parsing HostPort value (%s)", binding[i].HostPort) - } - pbCopy.HostPort = uint16(portStart) - pbCopy.HostPortEnd = uint16(portEnd) - pbCopy.HostIP = net.ParseIP(binding[i].HostIP) - pbList = append(pbList, pbCopy) - } - - if c.HostConfig.PublishAllPorts && len(binding) == 0 { - pbList = append(pbList, pb) - } - } - - var dns []string - - if len(c.HostConfig.DNS) > 0 { - dns = c.HostConfig.DNS - } else if len(daemonDNS) > 0 { - dns = daemonDNS - } - - if len(dns) > 0 { - createOptions = append(createOptions, - libnetwork.CreateOptionDNS(dns)) - } - - createOptions = append(createOptions, - libnetwork.CreateOptionPortMapping(pbList), - libnetwork.CreateOptionExposedPorts(exposeList)) - - return createOptions, nil -} - -// getEndpointInNetwork returns the container's endpoint to the provided network. -func getEndpointInNetwork(name string, n libnetwork.Network) (libnetwork.Endpoint, error) { - endpointName := strings.TrimPrefix(name, "/") - return n.EndpointByName(endpointName) -} - -// getSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox -func getSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap { - pm := nat.PortMap{} - if sb == nil { - return pm - } - - for _, ep := range sb.Endpoints() { - pm, _ = getEndpointPortMapInfo(ep) - if len(pm) > 0 { - break - } - } - return pm -} - -func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) { - pm := nat.PortMap{} - driverInfo, err := ep.DriverInfo() - if err != nil { - return pm, err - } - - if driverInfo == nil { - // It is not an error for epInfo to be nil - return pm, nil - } - - if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { - if exposedPorts, ok := expData.([]networktypes.TransportPort); ok { - for _, tp := range exposedPorts { - natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) - if err != nil { - return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err) - } - pm[natPort] = nil - } - } - } - - mapData, ok := driverInfo[netlabel.PortMap] - if !ok { - return pm, nil - } - - if portMapping, ok := mapData.([]networktypes.PortBinding); ok { - for _, pp := range portMapping { - natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) - if err != nil { - return pm, err - } - natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))} - pm[natPort] = append(pm[natPort], natBndg) - } - } - - return pm, nil -} - -// buildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint. -func buildEndpointInfo(networkSettings *internalnetwork.Settings, n libnetwork.Network, ep libnetwork.Endpoint) error { - if ep == nil { - return errors.New("endpoint cannot be nil") - } - - if networkSettings == nil { - return errors.New("network cannot be nil") - } - - epInfo := ep.Info() - if epInfo == nil { - // It is not an error to get an empty endpoint info - return nil - } - - if _, ok := networkSettings.Networks[n.Name()]; !ok { - networkSettings.Networks[n.Name()] = &internalnetwork.EndpointSettings{ - EndpointSettings: &network.EndpointSettings{}, - } - } - networkSettings.Networks[n.Name()].NetworkID = n.ID() - networkSettings.Networks[n.Name()].EndpointID = ep.ID() - - iface := epInfo.Iface() - if iface == nil { - return nil - } - - if iface.MacAddress() != nil { - networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() - } - - if iface.Address() != nil { - ones, _ := iface.Address().Mask.Size() - networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() - networkSettings.Networks[n.Name()].IPPrefixLen = ones - } - - if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil { - onesv6, _ := iface.AddressIPv6().Mask.Size() - networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() - networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 - } - - return nil -} - -// buildJoinOptions builds endpoint Join options from a given network. -func buildJoinOptions(networkSettings *internalnetwork.Settings, n interface { - Name() string -}) ([]libnetwork.EndpointOption, error) { - var joinOptions []libnetwork.EndpointOption - if epConfig, ok := networkSettings.Networks[n.Name()]; ok { - for _, str := range epConfig.Links { - name, alias, err := opts.ParseLink(str) - if err != nil { - return nil, err - } - joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias)) - } - for k, v := range epConfig.DriverOpts { - joinOptions = append(joinOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v})) - } - } - - return joinOptions, nil -} diff --git a/vendor/github.com/docker/docker/daemon/network/settings.go b/vendor/github.com/docker/docker/daemon/network/settings.go deleted file mode 100644 index b0460ed6a..000000000 --- a/vendor/github.com/docker/docker/daemon/network/settings.go +++ /dev/null @@ -1,69 +0,0 @@ -package network // import "github.com/docker/docker/daemon/network" - -import ( - "net" - - networktypes "github.com/docker/docker/api/types/network" - clustertypes "github.com/docker/docker/daemon/cluster/provider" - "github.com/docker/go-connections/nat" - "github.com/pkg/errors" -) - -// Settings stores configuration details about the daemon network config -// TODO Windows. Many of these fields can be factored out., -type Settings struct { - Bridge string - SandboxID string - HairpinMode bool - LinkLocalIPv6Address string - LinkLocalIPv6PrefixLen int - Networks map[string]*EndpointSettings - Service *clustertypes.ServiceConfig - Ports nat.PortMap - SandboxKey string - SecondaryIPAddresses []networktypes.Address - SecondaryIPv6Addresses []networktypes.Address - IsAnonymousEndpoint bool - HasSwarmEndpoint bool -} - -// EndpointSettings is a package local wrapper for -// networktypes.EndpointSettings which stores Endpoint state that -// needs to be persisted to disk but not exposed in the api. -type EndpointSettings struct { - *networktypes.EndpointSettings - IPAMOperational bool -} - -// AttachmentStore stores the load balancer IP address for a network id. -type AttachmentStore struct { - //key: networkd id - //value: load balancer ip address - networkToNodeLBIP map[string]net.IP -} - -// ResetAttachments clears any existing load balancer IP to network mapping and -// sets the mapping to the given attachments. -func (store *AttachmentStore) ResetAttachments(attachments map[string]string) error { - store.ClearAttachments() - for nid, nodeIP := range attachments { - ip, _, err := net.ParseCIDR(nodeIP) - if err != nil { - store.networkToNodeLBIP = make(map[string]net.IP) - return errors.Wrapf(err, "Failed to parse load balancer address %s", nodeIP) - } - store.networkToNodeLBIP[nid] = ip - } - return nil -} - -// ClearAttachments clears all the mappings of network to load balancer IP Address. -func (store *AttachmentStore) ClearAttachments() { - store.networkToNodeLBIP = make(map[string]net.IP) -} - -// GetIPForNetwork return the load balancer IP address for the given network. -func (store *AttachmentStore) GetIPForNetwork(networkID string) (net.IP, bool) { - ip, exists := store.networkToNodeLBIP[networkID] - return ip, exists -} diff --git a/vendor/github.com/docker/docker/daemon/oci_linux.go b/vendor/github.com/docker/docker/daemon/oci_linux.go deleted file mode 100644 index 9b39a64ee..000000000 --- a/vendor/github.com/docker/docker/daemon/oci_linux.go +++ /dev/null @@ -1,941 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/caps" - daemonconfig "github.com/docker/docker/daemon/config" - "github.com/docker/docker/oci" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - volumemounts "github.com/docker/docker/volume/mounts" - "github.com/opencontainers/runc/libcontainer/apparmor" - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/opencontainers/runc/libcontainer/devices" - "github.com/opencontainers/runc/libcontainer/user" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// nolint: gosimple -var ( - deviceCgroupRuleRegex = regexp.MustCompile("^([acb]) ([0-9]+|\\*):([0-9]+|\\*) ([rwm]{1,3})$") -) - -func setResources(s *specs.Spec, r containertypes.Resources) error { - weightDevices, err := getBlkioWeightDevices(r) - if err != nil { - return err - } - readBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadBps) - if err != nil { - return err - } - writeBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteBps) - if err != nil { - return err - } - readIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadIOps) - if err != nil { - return err - } - writeIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteIOps) - if err != nil { - return err - } - - memoryRes := getMemoryResources(r) - cpuRes, err := getCPUResources(r) - if err != nil { - return err - } - blkioWeight := r.BlkioWeight - - specResources := &specs.LinuxResources{ - Memory: memoryRes, - CPU: cpuRes, - BlockIO: &specs.LinuxBlockIO{ - Weight: &blkioWeight, - WeightDevice: weightDevices, - ThrottleReadBpsDevice: readBpsDevice, - ThrottleWriteBpsDevice: writeBpsDevice, - ThrottleReadIOPSDevice: readIOpsDevice, - ThrottleWriteIOPSDevice: writeIOpsDevice, - }, - Pids: &specs.LinuxPids{ - Limit: r.PidsLimit, - }, - } - - if s.Linux.Resources != nil && len(s.Linux.Resources.Devices) > 0 { - specResources.Devices = s.Linux.Resources.Devices - } - - s.Linux.Resources = specResources - return nil -} - -func setDevices(s *specs.Spec, c *container.Container) error { - // Build lists of devices allowed and created within the container. - var devs []specs.LinuxDevice - devPermissions := s.Linux.Resources.Devices - if c.HostConfig.Privileged { - hostDevices, err := devices.HostDevices() - if err != nil { - return err - } - for _, d := range hostDevices { - devs = append(devs, oci.Device(d)) - } - devPermissions = []specs.LinuxDeviceCgroup{ - { - Allow: true, - Access: "rwm", - }, - } - } else { - for _, deviceMapping := range c.HostConfig.Devices { - d, dPermissions, err := oci.DevicesFromPath(deviceMapping.PathOnHost, deviceMapping.PathInContainer, deviceMapping.CgroupPermissions) - if err != nil { - return err - } - devs = append(devs, d...) - devPermissions = append(devPermissions, dPermissions...) - } - - for _, deviceCgroupRule := range c.HostConfig.DeviceCgroupRules { - ss := deviceCgroupRuleRegex.FindAllStringSubmatch(deviceCgroupRule, -1) - if len(ss[0]) != 5 { - return fmt.Errorf("invalid device cgroup rule format: '%s'", deviceCgroupRule) - } - matches := ss[0] - - dPermissions := specs.LinuxDeviceCgroup{ - Allow: true, - Type: matches[1], - Access: matches[4], - } - if matches[2] == "*" { - major := int64(-1) - dPermissions.Major = &major - } else { - major, err := strconv.ParseInt(matches[2], 10, 64) - if err != nil { - return fmt.Errorf("invalid major value in device cgroup rule format: '%s'", deviceCgroupRule) - } - dPermissions.Major = &major - } - if matches[3] == "*" { - minor := int64(-1) - dPermissions.Minor = &minor - } else { - minor, err := strconv.ParseInt(matches[3], 10, 64) - if err != nil { - return fmt.Errorf("invalid minor value in device cgroup rule format: '%s'", deviceCgroupRule) - } - dPermissions.Minor = &minor - } - devPermissions = append(devPermissions, dPermissions) - } - } - - s.Linux.Devices = append(s.Linux.Devices, devs...) - s.Linux.Resources.Devices = devPermissions - return nil -} - -func (daemon *Daemon) setRlimits(s *specs.Spec, c *container.Container) error { - var rlimits []specs.POSIXRlimit - - // We want to leave the original HostConfig alone so make a copy here - hostConfig := *c.HostConfig - // Merge with the daemon defaults - daemon.mergeUlimits(&hostConfig) - for _, ul := range hostConfig.Ulimits { - rlimits = append(rlimits, specs.POSIXRlimit{ - Type: "RLIMIT_" + strings.ToUpper(ul.Name), - Soft: uint64(ul.Soft), - Hard: uint64(ul.Hard), - }) - } - - s.Process.Rlimits = rlimits - return nil -} - -func setUser(s *specs.Spec, c *container.Container) error { - uid, gid, additionalGids, err := getUser(c, c.Config.User) - if err != nil { - return err - } - s.Process.User.UID = uid - s.Process.User.GID = gid - s.Process.User.AdditionalGids = additionalGids - return nil -} - -func readUserFile(c *container.Container, p string) (io.ReadCloser, error) { - fp, err := c.GetResourcePath(p) - if err != nil { - return nil, err - } - return os.Open(fp) -} - -func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { - passwdPath, err := user.GetPasswdPath() - if err != nil { - return 0, 0, nil, err - } - groupPath, err := user.GetGroupPath() - if err != nil { - return 0, 0, nil, err - } - passwdFile, err := readUserFile(c, passwdPath) - if err == nil { - defer passwdFile.Close() - } - groupFile, err := readUserFile(c, groupPath) - if err == nil { - defer groupFile.Close() - } - - execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) - if err != nil { - return 0, 0, nil, err - } - - // todo: fix this double read by a change to libcontainer/user pkg - groupFile, err = readUserFile(c, groupPath) - if err == nil { - defer groupFile.Close() - } - var addGroups []int - if len(c.HostConfig.GroupAdd) > 0 { - addGroups, err = user.GetAdditionalGroups(c.HostConfig.GroupAdd, groupFile) - if err != nil { - return 0, 0, nil, err - } - } - uid := uint32(execUser.Uid) - gid := uint32(execUser.Gid) - sgids := append(execUser.Sgids, addGroups...) - var additionalGids []uint32 - for _, g := range sgids { - additionalGids = append(additionalGids, uint32(g)) - } - return uid, gid, additionalGids, nil -} - -func setNamespace(s *specs.Spec, ns specs.LinuxNamespace) { - for i, n := range s.Linux.Namespaces { - if n.Type == ns.Type { - s.Linux.Namespaces[i] = ns - return - } - } - s.Linux.Namespaces = append(s.Linux.Namespaces, ns) -} - -func setCapabilities(s *specs.Spec, c *container.Container) error { - var caplist []string - var err error - if c.HostConfig.Privileged { - caplist = caps.GetAllCapabilities() - } else { - caplist, err = caps.TweakCapabilities(s.Process.Capabilities.Bounding, c.HostConfig.CapAdd, c.HostConfig.CapDrop) - if err != nil { - return err - } - } - s.Process.Capabilities.Effective = caplist - s.Process.Capabilities.Bounding = caplist - s.Process.Capabilities.Permitted = caplist - s.Process.Capabilities.Inheritable = caplist - // setUser has already been executed here - // if non root drop capabilities in the way execve does - if s.Process.User.UID != 0 { - s.Process.Capabilities.Effective = []string{} - s.Process.Capabilities.Permitted = []string{} - } - return nil -} - -func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error { - userNS := false - // user - if c.HostConfig.UsernsMode.IsPrivate() { - uidMap := daemon.idMappings.UIDs() - if uidMap != nil { - userNS = true - ns := specs.LinuxNamespace{Type: "user"} - setNamespace(s, ns) - s.Linux.UIDMappings = specMapping(uidMap) - s.Linux.GIDMappings = specMapping(daemon.idMappings.GIDs()) - } - } - // network - if !c.Config.NetworkDisabled { - ns := specs.LinuxNamespace{Type: "network"} - parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) - if parts[0] == "container" { - nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) - if err != nil { - return err - } - ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID()) - if userNS { - // to share a net namespace, they must also share a user namespace - nsUser := specs.LinuxNamespace{Type: "user"} - nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID()) - setNamespace(s, nsUser) - } - } else if c.HostConfig.NetworkMode.IsHost() { - ns.Path = c.NetworkSettings.SandboxKey - } - setNamespace(s, ns) - } - - // ipc - ipcMode := c.HostConfig.IpcMode - switch { - case ipcMode.IsContainer(): - ns := specs.LinuxNamespace{Type: "ipc"} - ic, err := daemon.getIpcContainer(ipcMode.Container()) - if err != nil { - return err - } - ns.Path = fmt.Sprintf("/proc/%d/ns/ipc", ic.State.GetPID()) - setNamespace(s, ns) - if userNS { - // to share an IPC namespace, they must also share a user namespace - nsUser := specs.LinuxNamespace{Type: "user"} - nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID()) - setNamespace(s, nsUser) - } - case ipcMode.IsHost(): - oci.RemoveNamespace(s, specs.LinuxNamespaceType("ipc")) - case ipcMode.IsEmpty(): - // A container was created by an older version of the daemon. - // The default behavior used to be what is now called "shareable". - fallthrough - case ipcMode.IsPrivate(), ipcMode.IsShareable(), ipcMode.IsNone(): - ns := specs.LinuxNamespace{Type: "ipc"} - setNamespace(s, ns) - default: - return fmt.Errorf("Invalid IPC mode: %v", ipcMode) - } - - // pid - if c.HostConfig.PidMode.IsContainer() { - ns := specs.LinuxNamespace{Type: "pid"} - pc, err := daemon.getPidContainer(c) - if err != nil { - return err - } - ns.Path = fmt.Sprintf("/proc/%d/ns/pid", pc.State.GetPID()) - setNamespace(s, ns) - if userNS { - // to share a PID namespace, they must also share a user namespace - nsUser := specs.LinuxNamespace{Type: "user"} - nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", pc.State.GetPID()) - setNamespace(s, nsUser) - } - } else if c.HostConfig.PidMode.IsHost() { - oci.RemoveNamespace(s, specs.LinuxNamespaceType("pid")) - } else { - ns := specs.LinuxNamespace{Type: "pid"} - setNamespace(s, ns) - } - // uts - if c.HostConfig.UTSMode.IsHost() { - oci.RemoveNamespace(s, specs.LinuxNamespaceType("uts")) - s.Hostname = "" - } - - return nil -} - -func specMapping(s []idtools.IDMap) []specs.LinuxIDMapping { - var ids []specs.LinuxIDMapping - for _, item := range s { - ids = append(ids, specs.LinuxIDMapping{ - HostID: uint32(item.HostID), - ContainerID: uint32(item.ContainerID), - Size: uint32(item.Size), - }) - } - return ids -} - -// Get the source mount point of directory passed in as argument. Also return -// optional fields. -func getSourceMount(source string) (string, string, error) { - // Ensure any symlinks are resolved. - sourcePath, err := filepath.EvalSymlinks(source) - if err != nil { - return "", "", err - } - - mi, err := mount.GetMounts(mount.ParentsFilter(sourcePath)) - if err != nil { - return "", "", err - } - if len(mi) < 1 { - return "", "", fmt.Errorf("Can't find mount point of %s", source) - } - - // find the longest mount point - var idx, maxlen int - for i := range mi { - if len(mi[i].Mountpoint) > maxlen { - maxlen = len(mi[i].Mountpoint) - idx = i - } - } - return mi[idx].Mountpoint, mi[idx].Optional, nil -} - -const ( - sharedPropagationOption = "shared:" - slavePropagationOption = "master:" -) - -// hasMountinfoOption checks if any of the passed any of the given option values -// are set in the passed in option string. -func hasMountinfoOption(opts string, vals ...string) bool { - for _, opt := range strings.Split(opts, " ") { - for _, val := range vals { - if strings.HasPrefix(opt, val) { - return true - } - } - } - return false -} - -// Ensure mount point on which path is mounted, is shared. -func ensureShared(path string) error { - sourceMount, optionalOpts, err := getSourceMount(path) - if err != nil { - return err - } - // Make sure source mount point is shared. - if !hasMountinfoOption(optionalOpts, sharedPropagationOption) { - return errors.Errorf("path %s is mounted on %s but it is not a shared mount", path, sourceMount) - } - return nil -} - -// Ensure mount point on which path is mounted, is either shared or slave. -func ensureSharedOrSlave(path string) error { - sourceMount, optionalOpts, err := getSourceMount(path) - if err != nil { - return err - } - - if !hasMountinfoOption(optionalOpts, sharedPropagationOption, slavePropagationOption) { - return errors.Errorf("path %s is mounted on %s but it is not a shared or slave mount", path, sourceMount) - } - return nil -} - -// Get the set of mount flags that are set on the mount that contains the given -// path and are locked by CL_UNPRIVILEGED. This is necessary to ensure that -// bind-mounting "with options" will not fail with user namespaces, due to -// kernel restrictions that require user namespace mounts to preserve -// CL_UNPRIVILEGED locked flags. -func getUnprivilegedMountFlags(path string) ([]string, error) { - var statfs unix.Statfs_t - if err := unix.Statfs(path, &statfs); err != nil { - return nil, err - } - - // The set of keys come from https://github.com/torvalds/linux/blob/v4.13/fs/namespace.c#L1034-L1048. - unprivilegedFlags := map[uint64]string{ - unix.MS_RDONLY: "ro", - unix.MS_NODEV: "nodev", - unix.MS_NOEXEC: "noexec", - unix.MS_NOSUID: "nosuid", - unix.MS_NOATIME: "noatime", - unix.MS_RELATIME: "relatime", - unix.MS_NODIRATIME: "nodiratime", - } - - var flags []string - for mask, flag := range unprivilegedFlags { - if uint64(statfs.Flags)&mask == mask { - flags = append(flags, flag) - } - } - - return flags, nil -} - -var ( - mountPropagationMap = map[string]int{ - "private": mount.PRIVATE, - "rprivate": mount.RPRIVATE, - "shared": mount.SHARED, - "rshared": mount.RSHARED, - "slave": mount.SLAVE, - "rslave": mount.RSLAVE, - } - - mountPropagationReverseMap = map[int]string{ - mount.PRIVATE: "private", - mount.RPRIVATE: "rprivate", - mount.SHARED: "shared", - mount.RSHARED: "rshared", - mount.SLAVE: "slave", - mount.RSLAVE: "rslave", - } -) - -// inSlice tests whether a string is contained in a slice of strings or not. -// Comparison is case sensitive -func inSlice(slice []string, s string) bool { - for _, ss := range slice { - if s == ss { - return true - } - } - return false -} - -func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []container.Mount) error { - userMounts := make(map[string]struct{}) - for _, m := range mounts { - userMounts[m.Destination] = struct{}{} - } - - // Copy all mounts from spec to defaultMounts, except for - // - mounts overriden by a user supplied mount; - // - all mounts under /dev if a user supplied /dev is present; - // - /dev/shm, in case IpcMode is none. - // While at it, also - // - set size for /dev/shm from shmsize. - defaultMounts := s.Mounts[:0] - _, mountDev := userMounts["/dev"] - for _, m := range s.Mounts { - if _, ok := userMounts[m.Destination]; ok { - // filter out mount overridden by a user supplied mount - continue - } - if mountDev && strings.HasPrefix(m.Destination, "/dev/") { - // filter out everything under /dev if /dev is user-mounted - continue - } - - if m.Destination == "/dev/shm" { - if c.HostConfig.IpcMode.IsNone() { - // filter out /dev/shm for "none" IpcMode - continue - } - // set size for /dev/shm mount from spec - sizeOpt := "size=" + strconv.FormatInt(c.HostConfig.ShmSize, 10) - m.Options = append(m.Options, sizeOpt) - } - - defaultMounts = append(defaultMounts, m) - } - - s.Mounts = defaultMounts - for _, m := range mounts { - for _, cm := range s.Mounts { - if cm.Destination == m.Destination { - return duplicateMountPointError(m.Destination) - } - } - - if m.Source == "tmpfs" { - data := m.Data - parser := volumemounts.NewParser("linux") - options := []string{"noexec", "nosuid", "nodev", string(parser.DefaultPropagationMode())} - if data != "" { - options = append(options, strings.Split(data, ",")...) - } - - merged, err := mount.MergeTmpfsOptions(options) - if err != nil { - return err - } - - s.Mounts = append(s.Mounts, specs.Mount{Destination: m.Destination, Source: m.Source, Type: "tmpfs", Options: merged}) - continue - } - - mt := specs.Mount{Destination: m.Destination, Source: m.Source, Type: "bind"} - - // Determine property of RootPropagation based on volume - // properties. If a volume is shared, then keep root propagation - // shared. This should work for slave and private volumes too. - // - // For slave volumes, it can be either [r]shared/[r]slave. - // - // For private volumes any root propagation value should work. - pFlag := mountPropagationMap[m.Propagation] - switch pFlag { - case mount.SHARED, mount.RSHARED: - if err := ensureShared(m.Source); err != nil { - return err - } - rootpg := mountPropagationMap[s.Linux.RootfsPropagation] - if rootpg != mount.SHARED && rootpg != mount.RSHARED { - s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED] - } - case mount.SLAVE, mount.RSLAVE: - var fallback bool - if err := ensureSharedOrSlave(m.Source); err != nil { - // For backwards compatability purposes, treat mounts from the daemon root - // as special since we automatically add rslave propagation to these mounts - // when the user did not set anything, so we should fallback to the old - // behavior which is to use private propagation which is normally the - // default. - if !strings.HasPrefix(m.Source, daemon.root) && !strings.HasPrefix(daemon.root, m.Source) { - return err - } - - cm, ok := c.MountPoints[m.Destination] - if !ok { - return err - } - if cm.Spec.BindOptions != nil && cm.Spec.BindOptions.Propagation != "" { - // This means the user explicitly set a propagation, do not fallback in that case. - return err - } - fallback = true - logrus.WithField("container", c.ID).WithField("source", m.Source).Warn("Falling back to default propagation for bind source in daemon root") - } - if !fallback { - rootpg := mountPropagationMap[s.Linux.RootfsPropagation] - if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { - s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE] - } - } - } - - opts := []string{"rbind"} - if !m.Writable { - opts = append(opts, "ro") - } - if pFlag != 0 { - opts = append(opts, mountPropagationReverseMap[pFlag]) - } - - // If we are using user namespaces, then we must make sure that we - // don't drop any of the CL_UNPRIVILEGED "locked" flags of the source - // "mount" when we bind-mount. The reason for this is that at the point - // when runc sets up the root filesystem, it is already inside a user - // namespace, and thus cannot change any flags that are locked. - if daemon.configStore.RemappedRoot != "" { - unprivOpts, err := getUnprivilegedMountFlags(m.Source) - if err != nil { - return err - } - opts = append(opts, unprivOpts...) - } - - mt.Options = opts - s.Mounts = append(s.Mounts, mt) - } - - if s.Root.Readonly { - for i, m := range s.Mounts { - switch m.Destination { - case "/proc", "/dev/pts", "/dev/shm", "/dev/mqueue", "/dev": - continue - } - if _, ok := userMounts[m.Destination]; !ok { - if !inSlice(m.Options, "ro") { - s.Mounts[i].Options = append(s.Mounts[i].Options, "ro") - } - } - } - } - - if c.HostConfig.Privileged { - // clear readonly for /sys - for i := range s.Mounts { - if s.Mounts[i].Destination == "/sys" { - clearReadOnly(&s.Mounts[i]) - } - } - s.Linux.ReadonlyPaths = nil - s.Linux.MaskedPaths = nil - } - - // TODO: until a kernel/mount solution exists for handling remount in a user namespace, - // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) - if uidMap := daemon.idMappings.UIDs(); uidMap != nil || c.HostConfig.Privileged { - for i, m := range s.Mounts { - if m.Type == "cgroup" { - clearReadOnly(&s.Mounts[i]) - } - } - } - - return nil -} - -func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { - if c.BaseFS == nil { - return errors.New("populateCommonSpec: BaseFS of container " + c.ID + " is unexpectedly nil") - } - linkedEnv, err := daemon.setupLinkedContainers(c) - if err != nil { - return err - } - s.Root = &specs.Root{ - Path: c.BaseFS.Path(), - Readonly: c.HostConfig.ReadonlyRootfs, - } - if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil { - return err - } - cwd := c.Config.WorkingDir - if len(cwd) == 0 { - cwd = "/" - } - s.Process.Args = append([]string{c.Path}, c.Args...) - - // only add the custom init if it is specified and the container is running in its - // own private pid namespace. It does not make sense to add if it is running in the - // host namespace or another container's pid namespace where we already have an init - if c.HostConfig.PidMode.IsPrivate() { - if (c.HostConfig.Init != nil && *c.HostConfig.Init) || - (c.HostConfig.Init == nil && daemon.configStore.Init) { - s.Process.Args = append([]string{"/dev/init", "--", c.Path}, c.Args...) - var path string - if daemon.configStore.InitPath == "" { - path, err = exec.LookPath(daemonconfig.DefaultInitBinary) - if err != nil { - return err - } - } - if daemon.configStore.InitPath != "" { - path = daemon.configStore.InitPath - } - s.Mounts = append(s.Mounts, specs.Mount{ - Destination: "/dev/init", - Type: "bind", - Source: path, - Options: []string{"bind", "ro"}, - }) - } - } - s.Process.Cwd = cwd - s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) - s.Process.Terminal = c.Config.Tty - s.Hostname = c.FullHostname() - - return nil -} - -func (daemon *Daemon) createSpec(c *container.Container) (retSpec *specs.Spec, err error) { - s := oci.DefaultSpec() - if err := daemon.populateCommonSpec(&s, c); err != nil { - return nil, err - } - - var cgroupsPath string - scopePrefix := "docker" - parent := "/docker" - useSystemd := UsingSystemd(daemon.configStore) - if useSystemd { - parent = "system.slice" - } - - if c.HostConfig.CgroupParent != "" { - parent = c.HostConfig.CgroupParent - } else if daemon.configStore.CgroupParent != "" { - parent = daemon.configStore.CgroupParent - } - - if useSystemd { - cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID - logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath) - } else { - cgroupsPath = filepath.Join(parent, c.ID) - } - s.Linux.CgroupsPath = cgroupsPath - - if err := setResources(&s, c.HostConfig.Resources); err != nil { - return nil, fmt.Errorf("linux runtime spec resources: %v", err) - } - s.Linux.Sysctl = c.HostConfig.Sysctls - - p := s.Linux.CgroupsPath - if useSystemd { - initPath, err := cgroups.GetInitCgroup("cpu") - if err != nil { - return nil, err - } - _, err = cgroups.GetOwnCgroup("cpu") - if err != nil { - return nil, err - } - p = filepath.Join(initPath, s.Linux.CgroupsPath) - } - - // Clean path to guard against things like ../../../BAD - parentPath := filepath.Dir(p) - if !filepath.IsAbs(parentPath) { - parentPath = filepath.Clean("/" + parentPath) - } - - if err := daemon.initCgroupsPath(parentPath); err != nil { - return nil, fmt.Errorf("linux init cgroups path: %v", err) - } - if err := setDevices(&s, c); err != nil { - return nil, fmt.Errorf("linux runtime spec devices: %v", err) - } - if err := daemon.setRlimits(&s, c); err != nil { - return nil, fmt.Errorf("linux runtime spec rlimits: %v", err) - } - if err := setUser(&s, c); err != nil { - return nil, fmt.Errorf("linux spec user: %v", err) - } - if err := setNamespaces(daemon, &s, c); err != nil { - return nil, fmt.Errorf("linux spec namespaces: %v", err) - } - if err := setCapabilities(&s, c); err != nil { - return nil, fmt.Errorf("linux spec capabilities: %v", err) - } - if err := setSeccomp(daemon, &s, c); err != nil { - return nil, fmt.Errorf("linux seccomp: %v", err) - } - - if err := daemon.setupContainerMountsRoot(c); err != nil { - return nil, err - } - - if err := daemon.setupIpcDirs(c); err != nil { - return nil, err - } - - defer func() { - if err != nil { - daemon.cleanupSecretDir(c) - } - }() - - if err := daemon.setupSecretDir(c); err != nil { - return nil, err - } - - ms, err := daemon.setupMounts(c) - if err != nil { - return nil, err - } - - if !c.HostConfig.IpcMode.IsPrivate() && !c.HostConfig.IpcMode.IsEmpty() { - ms = append(ms, c.IpcMounts()...) - } - - tmpfsMounts, err := c.TmpfsMounts() - if err != nil { - return nil, err - } - ms = append(ms, tmpfsMounts...) - - secretMounts, err := c.SecretMounts() - if err != nil { - return nil, err - } - ms = append(ms, secretMounts...) - - sort.Sort(mounts(ms)) - if err := setMounts(daemon, &s, c, ms); err != nil { - return nil, fmt.Errorf("linux mounts: %v", err) - } - - for _, ns := range s.Linux.Namespaces { - if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled { - target := filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe") - s.Hooks = &specs.Hooks{ - Prestart: []specs.Hook{{ - Path: target, - Args: []string{"libnetwork-setkey", c.ID, daemon.netController.ID()}, - }}, - } - } - } - - if apparmor.IsEnabled() { - var appArmorProfile string - if c.AppArmorProfile != "" { - appArmorProfile = c.AppArmorProfile - } else if c.HostConfig.Privileged { - appArmorProfile = "unconfined" - } else { - appArmorProfile = "docker-default" - } - - if appArmorProfile == "docker-default" { - // Unattended upgrades and other fun services can unload AppArmor - // profiles inadvertently. Since we cannot store our profile in - // /etc/apparmor.d, nor can we practically add other ways of - // telling the system to keep our profile loaded, in order to make - // sure that we keep the default profile enabled we dynamically - // reload it if necessary. - if err := ensureDefaultAppArmorProfile(); err != nil { - return nil, err - } - } - - s.Process.ApparmorProfile = appArmorProfile - } - s.Process.SelinuxLabel = c.GetProcessLabel() - s.Process.NoNewPrivileges = c.NoNewPrivileges - s.Process.OOMScoreAdj = &c.HostConfig.OomScoreAdj - s.Linux.MountLabel = c.MountLabel - - // Set the masked and readonly paths with regard to the host config options if they are set. - if c.HostConfig.MaskedPaths != nil { - s.Linux.MaskedPaths = c.HostConfig.MaskedPaths - } - if c.HostConfig.ReadonlyPaths != nil { - s.Linux.ReadonlyPaths = c.HostConfig.ReadonlyPaths - } - - return &s, nil -} - -func clearReadOnly(m *specs.Mount) { - var opt []string - for _, o := range m.Options { - if o != "ro" { - opt = append(opt, o) - } - } - m.Options = opt -} - -// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig -func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { - ulimits := c.Ulimits - // Merge ulimits with daemon defaults - ulIdx := make(map[string]struct{}) - for _, ul := range ulimits { - ulIdx[ul.Name] = struct{}{} - } - for name, ul := range daemon.configStore.Ulimits { - if _, exists := ulIdx[name]; !exists { - ulimits = append(ulimits, ul) - } - } - c.Ulimits = ulimits -} diff --git a/vendor/github.com/docker/docker/daemon/oci_windows.go b/vendor/github.com/docker/docker/daemon/oci_windows.go deleted file mode 100644 index f00ab3363..000000000 --- a/vendor/github.com/docker/docker/daemon/oci_windows.go +++ /dev/null @@ -1,408 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "runtime" - "strings" - - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/container" - "github.com/docker/docker/oci" - "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/pkg/system" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "golang.org/x/sys/windows" - "golang.org/x/sys/windows/registry" -) - -const ( - credentialSpecRegistryLocation = `SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` - credentialSpecFileLocation = "CredentialSpecs" -) - -func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { - img, err := daemon.imageService.GetImage(string(c.ImageID)) - if err != nil { - return nil, err - } - - s := oci.DefaultOSSpec(img.OS) - - linkedEnv, err := daemon.setupLinkedContainers(c) - if err != nil { - return nil, err - } - - // Note, unlike Unix, we do NOT call into SetupWorkingDirectory as - // this is done in VMCompute. Further, we couldn't do it for Hyper-V - // containers anyway. - - // In base spec - s.Hostname = c.FullHostname() - - if err := daemon.setupSecretDir(c); err != nil { - return nil, err - } - - if err := daemon.setupConfigDir(c); err != nil { - return nil, err - } - - // In s.Mounts - mounts, err := daemon.setupMounts(c) - if err != nil { - return nil, err - } - - var isHyperV bool - if c.HostConfig.Isolation.IsDefault() { - // Container using default isolation, so take the default from the daemon configuration - isHyperV = daemon.defaultIsolation.IsHyperV() - } else { - // Container may be requesting an explicit isolation mode. - isHyperV = c.HostConfig.Isolation.IsHyperV() - } - - if isHyperV { - s.Windows.HyperV = &specs.WindowsHyperV{} - } - - // If the container has not been started, and has configs or secrets - // secrets, create symlinks to each config and secret. If it has been - // started before, the symlinks should have already been created. Also, it - // is important to not mount a Hyper-V container that has been started - // before, to protect the host from the container; for example, from - // malicious mutation of NTFS data structures. - if !c.HasBeenStartedBefore && (len(c.SecretReferences) > 0 || len(c.ConfigReferences) > 0) { - // The container file system is mounted before this function is called, - // except for Hyper-V containers, so mount it here in that case. - if isHyperV { - if err := daemon.Mount(c); err != nil { - return nil, err - } - defer daemon.Unmount(c) - } - if err := c.CreateSecretSymlinks(); err != nil { - return nil, err - } - if err := c.CreateConfigSymlinks(); err != nil { - return nil, err - } - } - - secretMounts, err := c.SecretMounts() - if err != nil { - return nil, err - } - if secretMounts != nil { - mounts = append(mounts, secretMounts...) - } - - configMounts := c.ConfigMounts() - if configMounts != nil { - mounts = append(mounts, configMounts...) - } - - for _, mount := range mounts { - m := specs.Mount{ - Source: mount.Source, - Destination: mount.Destination, - } - if !mount.Writable { - m.Options = append(m.Options, "ro") - } - if img.OS != runtime.GOOS { - m.Type = "bind" - m.Options = append(m.Options, "rbind") - m.Options = append(m.Options, fmt.Sprintf("uvmpath=/tmp/gcs/%s/binds", c.ID)) - } - s.Mounts = append(s.Mounts, m) - } - - // In s.Process - s.Process.Args = append([]string{c.Path}, c.Args...) - if !c.Config.ArgsEscaped && img.OS == "windows" { - s.Process.Args = escapeArgs(s.Process.Args) - } - - s.Process.Cwd = c.Config.WorkingDir - s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv) - if c.Config.Tty { - s.Process.Terminal = c.Config.Tty - s.Process.ConsoleSize = &specs.Box{ - Height: c.HostConfig.ConsoleSize[0], - Width: c.HostConfig.ConsoleSize[1], - } - } - s.Process.User.Username = c.Config.User - s.Windows.LayerFolders, err = daemon.imageService.GetLayerFolders(img, c.RWLayer) - if err != nil { - return nil, errors.Wrapf(err, "container %s", c.ID) - } - - dnsSearch := daemon.getDNSSearchSettings(c) - - // Get endpoints for the libnetwork allocated networks to the container - var epList []string - AllowUnqualifiedDNSQuery := false - gwHNSID := "" - if c.NetworkSettings != nil { - for n := range c.NetworkSettings.Networks { - sn, err := daemon.FindNetwork(n) - if err != nil { - continue - } - - ep, err := getEndpointInNetwork(c.Name, sn) - if err != nil { - continue - } - - data, err := ep.DriverInfo() - if err != nil { - continue - } - - if data["GW_INFO"] != nil { - gwInfo := data["GW_INFO"].(map[string]interface{}) - if gwInfo["hnsid"] != nil { - gwHNSID = gwInfo["hnsid"].(string) - } - } - - if data["hnsid"] != nil { - epList = append(epList, data["hnsid"].(string)) - } - - if data["AllowUnqualifiedDNSQuery"] != nil { - AllowUnqualifiedDNSQuery = true - } - } - } - - var networkSharedContainerID string - if c.HostConfig.NetworkMode.IsContainer() { - networkSharedContainerID = c.NetworkSharedContainerID - for _, ep := range c.SharedEndpointList { - epList = append(epList, ep) - } - } - - if gwHNSID != "" { - epList = append(epList, gwHNSID) - } - - s.Windows.Network = &specs.WindowsNetwork{ - AllowUnqualifiedDNSQuery: AllowUnqualifiedDNSQuery, - DNSSearchList: dnsSearch, - EndpointList: epList, - NetworkSharedContainerName: networkSharedContainerID, - } - - switch img.OS { - case "windows": - if err := daemon.createSpecWindowsFields(c, &s, isHyperV); err != nil { - return nil, err - } - case "linux": - if !system.LCOWSupported() { - return nil, fmt.Errorf("Linux containers on Windows are not supported") - } - daemon.createSpecLinuxFields(c, &s) - default: - return nil, fmt.Errorf("Unsupported platform %q", img.OS) - } - - return (*specs.Spec)(&s), nil -} - -// Sets the Windows-specific fields of the OCI spec -func (daemon *Daemon) createSpecWindowsFields(c *container.Container, s *specs.Spec, isHyperV bool) error { - if len(s.Process.Cwd) == 0 { - // We default to C:\ to workaround the oddity of the case that the - // default directory for cmd running as LocalSystem (or - // ContainerAdministrator) is c:\windows\system32. Hence docker run - // cmd will by default end in c:\windows\system32, rather - // than 'root' (/) on Linux. The oddity is that if you have a dockerfile - // which has no WORKDIR and has a COPY file ., . will be interpreted - // as c:\. Hence, setting it to default of c:\ makes for consistency. - s.Process.Cwd = `C:\` - } - - s.Root.Readonly = false // Windows does not support a read-only root filesystem - if !isHyperV { - if c.BaseFS == nil { - return errors.New("createSpecWindowsFields: BaseFS of container " + c.ID + " is unexpectedly nil") - } - - s.Root.Path = c.BaseFS.Path() // This is not set for Hyper-V containers - if !strings.HasSuffix(s.Root.Path, `\`) { - s.Root.Path = s.Root.Path + `\` // Ensure a correctly formatted volume GUID path \\?\Volume{GUID}\ - } - } - - // First boot optimization - s.Windows.IgnoreFlushesDuringBoot = !c.HasBeenStartedBefore - - // In s.Windows.Resources - cpuShares := uint16(c.HostConfig.CPUShares) - cpuMaximum := uint16(c.HostConfig.CPUPercent) * 100 - cpuCount := uint64(c.HostConfig.CPUCount) - if c.HostConfig.NanoCPUs > 0 { - if isHyperV { - cpuCount = uint64(c.HostConfig.NanoCPUs / 1e9) - leftoverNanoCPUs := c.HostConfig.NanoCPUs % 1e9 - if leftoverNanoCPUs != 0 { - cpuCount++ - cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(cpuCount) / (1e9 / 10000)) - if cpuMaximum < 1 { - // The requested NanoCPUs is so small that we rounded to 0, use 1 instead - cpuMaximum = 1 - } - } - } else { - cpuMaximum = uint16(c.HostConfig.NanoCPUs / int64(sysinfo.NumCPU()) / (1e9 / 10000)) - if cpuMaximum < 1 { - // The requested NanoCPUs is so small that we rounded to 0, use 1 instead - cpuMaximum = 1 - } - } - } - memoryLimit := uint64(c.HostConfig.Memory) - s.Windows.Resources = &specs.WindowsResources{ - CPU: &specs.WindowsCPUResources{ - Maximum: &cpuMaximum, - Shares: &cpuShares, - Count: &cpuCount, - }, - Memory: &specs.WindowsMemoryResources{ - Limit: &memoryLimit, - }, - Storage: &specs.WindowsStorageResources{ - Bps: &c.HostConfig.IOMaximumBandwidth, - Iops: &c.HostConfig.IOMaximumIOps, - }, - } - - // Read and add credentials from the security options if a credential spec has been provided. - if c.HostConfig.SecurityOpt != nil { - cs := "" - for _, sOpt := range c.HostConfig.SecurityOpt { - sOpt = strings.ToLower(sOpt) - if !strings.Contains(sOpt, "=") { - return fmt.Errorf("invalid security option: no equals sign in supplied value %s", sOpt) - } - var splitsOpt []string - splitsOpt = strings.SplitN(sOpt, "=", 2) - if len(splitsOpt) != 2 { - return fmt.Errorf("invalid security option: %s", sOpt) - } - if splitsOpt[0] != "credentialspec" { - return fmt.Errorf("security option not supported: %s", splitsOpt[0]) - } - - var ( - match bool - csValue string - err error - ) - if match, csValue = getCredentialSpec("file://", splitsOpt[1]); match { - if csValue == "" { - return fmt.Errorf("no value supplied for file:// credential spec security option") - } - if cs, err = readCredentialSpecFile(c.ID, daemon.root, filepath.Clean(csValue)); err != nil { - return err - } - } else if match, csValue = getCredentialSpec("registry://", splitsOpt[1]); match { - if csValue == "" { - return fmt.Errorf("no value supplied for registry:// credential spec security option") - } - if cs, err = readCredentialSpecRegistry(c.ID, csValue); err != nil { - return err - } - } else { - return fmt.Errorf("invalid credential spec security option - value must be prefixed file:// or registry:// followed by a value") - } - } - s.Windows.CredentialSpec = cs - } - - return nil -} - -// Sets the Linux-specific fields of the OCI spec -// TODO: @jhowardmsft LCOW Support. We need to do a lot more pulling in what can -// be pulled in from oci_linux.go. -func (daemon *Daemon) createSpecLinuxFields(c *container.Container, s *specs.Spec) { - if len(s.Process.Cwd) == 0 { - s.Process.Cwd = `/` - } - s.Root.Path = "rootfs" - s.Root.Readonly = c.HostConfig.ReadonlyRootfs -} - -func escapeArgs(args []string) []string { - escapedArgs := make([]string, len(args)) - for i, a := range args { - escapedArgs[i] = windows.EscapeArg(a) - } - return escapedArgs -} - -// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig -// It will do nothing on non-Linux platform -func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) { - return -} - -// getCredentialSpec is a helper function to get the value of a credential spec supplied -// on the CLI, stripping the prefix -func getCredentialSpec(prefix, value string) (bool, string) { - if strings.HasPrefix(value, prefix) { - return true, strings.TrimPrefix(value, prefix) - } - return false, "" -} - -// readCredentialSpecRegistry is a helper function to read a credential spec from -// the registry. If not found, we return an empty string and warn in the log. -// This allows for staging on machines which do not have the necessary components. -func readCredentialSpecRegistry(id, name string) (string, error) { - var ( - k registry.Key - err error - val string - ) - if k, err = registry.OpenKey(registry.LOCAL_MACHINE, credentialSpecRegistryLocation, registry.QUERY_VALUE); err != nil { - return "", fmt.Errorf("failed handling spec %q for container %s - %s could not be opened", name, id, credentialSpecRegistryLocation) - } - if val, _, err = k.GetStringValue(name); err != nil { - if err == registry.ErrNotExist { - return "", fmt.Errorf("credential spec %q for container %s as it was not found", name, id) - } - return "", fmt.Errorf("error %v reading credential spec %q from registry for container %s", err, name, id) - } - return val, nil -} - -// readCredentialSpecFile is a helper function to read a credential spec from -// a file. If not found, we return an empty string and warn in the log. -// This allows for staging on machines which do not have the necessary components. -func readCredentialSpecFile(id, root, location string) (string, error) { - if filepath.IsAbs(location) { - return "", fmt.Errorf("invalid credential spec - file:// path cannot be absolute") - } - base := filepath.Join(root, credentialSpecFileLocation) - full := filepath.Join(base, location) - if !strings.HasPrefix(full, base) { - return "", fmt.Errorf("invalid credential spec - file:// path must be under %s", base) - } - bcontents, err := ioutil.ReadFile(full) - if err != nil { - return "", fmt.Errorf("credential spec '%s' for container %s as the file could not be read: %q", full, id, err) - } - return string(bcontents[:]), nil -} diff --git a/vendor/github.com/docker/docker/daemon/pause.go b/vendor/github.com/docker/docker/daemon/pause.go deleted file mode 100644 index be6ec1b92..000000000 --- a/vendor/github.com/docker/docker/daemon/pause.go +++ /dev/null @@ -1,55 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - - "github.com/docker/docker/container" - "github.com/sirupsen/logrus" -) - -// ContainerPause pauses a container -func (daemon *Daemon) ContainerPause(name string) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - return daemon.containerPause(container) -} - -// containerPause pauses the container execution without stopping the process. -// The execution can be resumed by calling containerUnpause. -func (daemon *Daemon) containerPause(container *container.Container) error { - container.Lock() - defer container.Unlock() - - // We cannot Pause the container which is not running - if !container.Running { - return errNotRunning(container.ID) - } - - // We cannot Pause the container which is already paused - if container.Paused { - return errNotPaused(container.ID) - } - - // We cannot Pause the container which is restarting - if container.Restarting { - return errContainerIsRestarting(container.ID) - } - - if err := daemon.containerd.Pause(context.Background(), container.ID); err != nil { - return fmt.Errorf("Cannot pause container %s: %s", container.ID, err) - } - - container.Paused = true - daemon.setStateCounter(container) - daemon.updateHealthMonitor(container) - daemon.LogContainerEvent(container, "pause") - - if err := container.CheckpointTo(daemon.containersReplica); err != nil { - logrus.WithError(err).Warn("could not save container to disk") - } - - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/prune.go b/vendor/github.com/docker/docker/daemon/prune.go deleted file mode 100644 index b690f2e55..000000000 --- a/vendor/github.com/docker/docker/daemon/prune.go +++ /dev/null @@ -1,250 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - "regexp" - "sync/atomic" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - timetypes "github.com/docker/docker/api/types/time" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/runconfig" - "github.com/docker/libnetwork" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var ( - // errPruneRunning is returned when a prune request is received while - // one is in progress - errPruneRunning = errdefs.Conflict(errors.New("a prune operation is already running")) - - containersAcceptedFilters = map[string]bool{ - "label": true, - "label!": true, - "until": true, - } - - networksAcceptedFilters = map[string]bool{ - "label": true, - "label!": true, - "until": true, - } -) - -// ContainersPrune removes unused containers -func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (*types.ContainersPruneReport, error) { - if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { - return nil, errPruneRunning - } - defer atomic.StoreInt32(&daemon.pruneRunning, 0) - - rep := &types.ContainersPruneReport{} - - // make sure that only accepted filters have been received - err := pruneFilters.Validate(containersAcceptedFilters) - if err != nil { - return nil, err - } - - until, err := getUntilFromPruneFilters(pruneFilters) - if err != nil { - return nil, err - } - - allContainers := daemon.List() - for _, c := range allContainers { - select { - case <-ctx.Done(): - logrus.Debugf("ContainersPrune operation cancelled: %#v", *rep) - return rep, nil - default: - } - - if !c.IsRunning() { - if !until.IsZero() && c.Created.After(until) { - continue - } - if !matchLabels(pruneFilters, c.Config.Labels) { - continue - } - cSize, _ := daemon.imageService.GetContainerLayerSize(c.ID) - // TODO: sets RmLink to true? - err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{}) - if err != nil { - logrus.Warnf("failed to prune container %s: %v", c.ID, err) - continue - } - if cSize > 0 { - rep.SpaceReclaimed += uint64(cSize) - } - rep.ContainersDeleted = append(rep.ContainersDeleted, c.ID) - } - } - - return rep, nil -} - -// localNetworksPrune removes unused local networks -func (daemon *Daemon) localNetworksPrune(ctx context.Context, pruneFilters filters.Args) *types.NetworksPruneReport { - rep := &types.NetworksPruneReport{} - - until, _ := getUntilFromPruneFilters(pruneFilters) - - // When the function returns true, the walk will stop. - l := func(nw libnetwork.Network) bool { - select { - case <-ctx.Done(): - // context cancelled - return true - default: - } - if nw.Info().ConfigOnly() { - return false - } - if !until.IsZero() && nw.Info().Created().After(until) { - return false - } - if !matchLabels(pruneFilters, nw.Info().Labels()) { - return false - } - nwName := nw.Name() - if runconfig.IsPreDefinedNetwork(nwName) { - return false - } - if len(nw.Endpoints()) > 0 { - return false - } - if err := daemon.DeleteNetwork(nw.ID()); err != nil { - logrus.Warnf("could not remove local network %s: %v", nwName, err) - return false - } - rep.NetworksDeleted = append(rep.NetworksDeleted, nwName) - return false - } - daemon.netController.WalkNetworks(l) - return rep -} - -// clusterNetworksPrune removes unused cluster networks -func (daemon *Daemon) clusterNetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) { - rep := &types.NetworksPruneReport{} - - until, _ := getUntilFromPruneFilters(pruneFilters) - - cluster := daemon.GetCluster() - - if !cluster.IsManager() { - return rep, nil - } - - networks, err := cluster.GetNetworks() - if err != nil { - return rep, err - } - networkIsInUse := regexp.MustCompile(`network ([[:alnum:]]+) is in use`) - for _, nw := range networks { - select { - case <-ctx.Done(): - return rep, nil - default: - if nw.Ingress { - // Routing-mesh network removal has to be explicitly invoked by user - continue - } - if !until.IsZero() && nw.Created.After(until) { - continue - } - if !matchLabels(pruneFilters, nw.Labels) { - continue - } - // https://github.com/docker/docker/issues/24186 - // `docker network inspect` unfortunately displays ONLY those containers that are local to that node. - // So we try to remove it anyway and check the error - err = cluster.RemoveNetwork(nw.ID) - if err != nil { - // we can safely ignore the "network .. is in use" error - match := networkIsInUse.FindStringSubmatch(err.Error()) - if len(match) != 2 || match[1] != nw.ID { - logrus.Warnf("could not remove cluster network %s: %v", nw.Name, err) - } - continue - } - rep.NetworksDeleted = append(rep.NetworksDeleted, nw.Name) - } - } - return rep, nil -} - -// NetworksPrune removes unused networks -func (daemon *Daemon) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (*types.NetworksPruneReport, error) { - if !atomic.CompareAndSwapInt32(&daemon.pruneRunning, 0, 1) { - return nil, errPruneRunning - } - defer atomic.StoreInt32(&daemon.pruneRunning, 0) - - // make sure that only accepted filters have been received - err := pruneFilters.Validate(networksAcceptedFilters) - if err != nil { - return nil, err - } - - if _, err := getUntilFromPruneFilters(pruneFilters); err != nil { - return nil, err - } - - rep := &types.NetworksPruneReport{} - if clusterRep, err := daemon.clusterNetworksPrune(ctx, pruneFilters); err == nil { - rep.NetworksDeleted = append(rep.NetworksDeleted, clusterRep.NetworksDeleted...) - } - - localRep := daemon.localNetworksPrune(ctx, pruneFilters) - rep.NetworksDeleted = append(rep.NetworksDeleted, localRep.NetworksDeleted...) - - select { - case <-ctx.Done(): - logrus.Debugf("NetworksPrune operation cancelled: %#v", *rep) - return rep, nil - default: - } - - return rep, nil -} - -func getUntilFromPruneFilters(pruneFilters filters.Args) (time.Time, error) { - until := time.Time{} - if !pruneFilters.Contains("until") { - return until, nil - } - untilFilters := pruneFilters.Get("until") - if len(untilFilters) > 1 { - return until, fmt.Errorf("more than one until filter specified") - } - ts, err := timetypes.GetTimestamp(untilFilters[0], time.Now()) - if err != nil { - return until, err - } - seconds, nanoseconds, err := timetypes.ParseTimestamps(ts, 0) - if err != nil { - return until, err - } - until = time.Unix(seconds, nanoseconds) - return until, nil -} - -func matchLabels(pruneFilters filters.Args, labels map[string]string) bool { - if !pruneFilters.MatchKVList("label", labels) { - return false - } - // By default MatchKVList will return true if field (like 'label!') does not exist - // So we have to add additional Contains("label!") check - if pruneFilters.Contains("label!") { - if pruneFilters.MatchKVList("label!", labels) { - return false - } - } - return true -} diff --git a/vendor/github.com/docker/docker/daemon/reload.go b/vendor/github.com/docker/docker/daemon/reload.go deleted file mode 100644 index 210864ff8..000000000 --- a/vendor/github.com/docker/docker/daemon/reload.go +++ /dev/null @@ -1,324 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "encoding/json" - "fmt" - - "github.com/docker/docker/daemon/config" - "github.com/docker/docker/daemon/discovery" - "github.com/sirupsen/logrus" -) - -// Reload reads configuration changes and modifies the -// daemon according to those changes. -// These are the settings that Reload changes: -// - Platform runtime -// - Daemon debug log level -// - Daemon max concurrent downloads -// - Daemon max concurrent uploads -// - Daemon shutdown timeout (in seconds) -// - Cluster discovery (reconfigure and restart) -// - Daemon labels -// - Insecure registries -// - Registry mirrors -// - Daemon live restore -func (daemon *Daemon) Reload(conf *config.Config) (err error) { - daemon.configStore.Lock() - attributes := map[string]string{} - - defer func() { - jsonString, _ := json.Marshal(daemon.configStore) - - // we're unlocking here, because - // LogDaemonEventWithAttributes() -> SystemInfo() -> GetAllRuntimes() - // holds that lock too. - daemon.configStore.Unlock() - if err == nil { - logrus.Infof("Reloaded configuration: %s", jsonString) - daemon.LogDaemonEventWithAttributes("reload", attributes) - } - }() - - if err := daemon.reloadPlatform(conf, attributes); err != nil { - return err - } - daemon.reloadDebug(conf, attributes) - daemon.reloadMaxConcurrentDownloadsAndUploads(conf, attributes) - daemon.reloadShutdownTimeout(conf, attributes) - - if err := daemon.reloadClusterDiscovery(conf, attributes); err != nil { - return err - } - if err := daemon.reloadLabels(conf, attributes); err != nil { - return err - } - if err := daemon.reloadAllowNondistributableArtifacts(conf, attributes); err != nil { - return err - } - if err := daemon.reloadInsecureRegistries(conf, attributes); err != nil { - return err - } - if err := daemon.reloadRegistryMirrors(conf, attributes); err != nil { - return err - } - if err := daemon.reloadLiveRestore(conf, attributes); err != nil { - return err - } - return daemon.reloadNetworkDiagnosticPort(conf, attributes) -} - -// reloadDebug updates configuration with Debug option -// and updates the passed attributes -func (daemon *Daemon) reloadDebug(conf *config.Config, attributes map[string]string) { - // update corresponding configuration - if conf.IsValueSet("debug") { - daemon.configStore.Debug = conf.Debug - } - // prepare reload event attributes with updatable configurations - attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) -} - -// reloadMaxConcurrentDownloadsAndUploads updates configuration with max concurrent -// download and upload options and updates the passed attributes -func (daemon *Daemon) reloadMaxConcurrentDownloadsAndUploads(conf *config.Config, attributes map[string]string) { - // If no value is set for max-concurrent-downloads we assume it is the default value - // We always "reset" as the cost is lightweight and easy to maintain. - if conf.IsValueSet("max-concurrent-downloads") && conf.MaxConcurrentDownloads != nil { - *daemon.configStore.MaxConcurrentDownloads = *conf.MaxConcurrentDownloads - } else { - maxConcurrentDownloads := config.DefaultMaxConcurrentDownloads - daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads - } - logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) - - // If no value is set for max-concurrent-upload we assume it is the default value - // We always "reset" as the cost is lightweight and easy to maintain. - if conf.IsValueSet("max-concurrent-uploads") && conf.MaxConcurrentUploads != nil { - *daemon.configStore.MaxConcurrentUploads = *conf.MaxConcurrentUploads - } else { - maxConcurrentUploads := config.DefaultMaxConcurrentUploads - daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads - } - logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) - - daemon.imageService.UpdateConfig(conf.MaxConcurrentDownloads, conf.MaxConcurrentUploads) - // prepare reload event attributes with updatable configurations - attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) - // prepare reload event attributes with updatable configurations - attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) -} - -// reloadShutdownTimeout updates configuration with daemon shutdown timeout option -// and updates the passed attributes -func (daemon *Daemon) reloadShutdownTimeout(conf *config.Config, attributes map[string]string) { - // update corresponding configuration - if conf.IsValueSet("shutdown-timeout") { - daemon.configStore.ShutdownTimeout = conf.ShutdownTimeout - logrus.Debugf("Reset Shutdown Timeout: %d", daemon.configStore.ShutdownTimeout) - } - - // prepare reload event attributes with updatable configurations - attributes["shutdown-timeout"] = fmt.Sprintf("%d", daemon.configStore.ShutdownTimeout) -} - -// reloadClusterDiscovery updates configuration with cluster discovery options -// and updates the passed attributes -func (daemon *Daemon) reloadClusterDiscovery(conf *config.Config, attributes map[string]string) (err error) { - defer func() { - // prepare reload event attributes with updatable configurations - attributes["cluster-store"] = conf.ClusterStore - attributes["cluster-advertise"] = conf.ClusterAdvertise - - attributes["cluster-store-opts"] = "{}" - if daemon.configStore.ClusterOpts != nil { - opts, err2 := json.Marshal(conf.ClusterOpts) - if err != nil { - err = err2 - } - attributes["cluster-store-opts"] = string(opts) - } - }() - - newAdvertise := conf.ClusterAdvertise - newClusterStore := daemon.configStore.ClusterStore - if conf.IsValueSet("cluster-advertise") { - if conf.IsValueSet("cluster-store") { - newClusterStore = conf.ClusterStore - } - newAdvertise, err = config.ParseClusterAdvertiseSettings(newClusterStore, conf.ClusterAdvertise) - if err != nil && err != discovery.ErrDiscoveryDisabled { - return err - } - } - - if daemon.clusterProvider != nil { - if err := conf.IsSwarmCompatible(); err != nil { - return err - } - } - - // check discovery modifications - if !config.ModifiedDiscoverySettings(daemon.configStore, newClusterStore, newAdvertise, conf.ClusterOpts) { - return nil - } - - // enable discovery for the first time if it was not previously enabled - if daemon.discoveryWatcher == nil { - discoveryWatcher, err := discovery.Init(newClusterStore, newAdvertise, conf.ClusterOpts) - if err != nil { - return fmt.Errorf("failed to initialize discovery: %v", err) - } - daemon.discoveryWatcher = discoveryWatcher - } else if err == discovery.ErrDiscoveryDisabled { - // disable discovery if it was previously enabled and it's disabled now - daemon.discoveryWatcher.Stop() - } else if err = daemon.discoveryWatcher.Reload(conf.ClusterStore, newAdvertise, conf.ClusterOpts); err != nil { - // reload discovery - return err - } - - daemon.configStore.ClusterStore = newClusterStore - daemon.configStore.ClusterOpts = conf.ClusterOpts - daemon.configStore.ClusterAdvertise = newAdvertise - - if daemon.netController == nil { - return nil - } - netOptions, err := daemon.networkOptions(daemon.configStore, daemon.PluginStore, nil) - if err != nil { - logrus.WithError(err).Warnf("failed to get options with network controller") - return nil - } - err = daemon.netController.ReloadConfiguration(netOptions...) - if err != nil { - logrus.Warnf("Failed to reload configuration with network controller: %v", err) - } - return nil -} - -// reloadLabels updates configuration with engine labels -// and updates the passed attributes -func (daemon *Daemon) reloadLabels(conf *config.Config, attributes map[string]string) error { - // update corresponding configuration - if conf.IsValueSet("labels") { - daemon.configStore.Labels = conf.Labels - } - - // prepare reload event attributes with updatable configurations - if daemon.configStore.Labels != nil { - labels, err := json.Marshal(daemon.configStore.Labels) - if err != nil { - return err - } - attributes["labels"] = string(labels) - } else { - attributes["labels"] = "[]" - } - - return nil -} - -// reloadAllowNondistributableArtifacts updates the configuration with allow-nondistributable-artifacts options -// and updates the passed attributes. -func (daemon *Daemon) reloadAllowNondistributableArtifacts(conf *config.Config, attributes map[string]string) error { - // Update corresponding configuration. - if conf.IsValueSet("allow-nondistributable-artifacts") { - daemon.configStore.AllowNondistributableArtifacts = conf.AllowNondistributableArtifacts - if err := daemon.RegistryService.LoadAllowNondistributableArtifacts(conf.AllowNondistributableArtifacts); err != nil { - return err - } - } - - // Prepare reload event attributes with updatable configurations. - if daemon.configStore.AllowNondistributableArtifacts != nil { - v, err := json.Marshal(daemon.configStore.AllowNondistributableArtifacts) - if err != nil { - return err - } - attributes["allow-nondistributable-artifacts"] = string(v) - } else { - attributes["allow-nondistributable-artifacts"] = "[]" - } - - return nil -} - -// reloadInsecureRegistries updates configuration with insecure registry option -// and updates the passed attributes -func (daemon *Daemon) reloadInsecureRegistries(conf *config.Config, attributes map[string]string) error { - // update corresponding configuration - if conf.IsValueSet("insecure-registries") { - daemon.configStore.InsecureRegistries = conf.InsecureRegistries - if err := daemon.RegistryService.LoadInsecureRegistries(conf.InsecureRegistries); err != nil { - return err - } - } - - // prepare reload event attributes with updatable configurations - if daemon.configStore.InsecureRegistries != nil { - insecureRegistries, err := json.Marshal(daemon.configStore.InsecureRegistries) - if err != nil { - return err - } - attributes["insecure-registries"] = string(insecureRegistries) - } else { - attributes["insecure-registries"] = "[]" - } - - return nil -} - -// reloadRegistryMirrors updates configuration with registry mirror options -// and updates the passed attributes -func (daemon *Daemon) reloadRegistryMirrors(conf *config.Config, attributes map[string]string) error { - // update corresponding configuration - if conf.IsValueSet("registry-mirrors") { - daemon.configStore.Mirrors = conf.Mirrors - if err := daemon.RegistryService.LoadMirrors(conf.Mirrors); err != nil { - return err - } - } - - // prepare reload event attributes with updatable configurations - if daemon.configStore.Mirrors != nil { - mirrors, err := json.Marshal(daemon.configStore.Mirrors) - if err != nil { - return err - } - attributes["registry-mirrors"] = string(mirrors) - } else { - attributes["registry-mirrors"] = "[]" - } - - return nil -} - -// reloadLiveRestore updates configuration with live retore option -// and updates the passed attributes -func (daemon *Daemon) reloadLiveRestore(conf *config.Config, attributes map[string]string) error { - // update corresponding configuration - if conf.IsValueSet("live-restore") { - daemon.configStore.LiveRestoreEnabled = conf.LiveRestoreEnabled - } - - // prepare reload event attributes with updatable configurations - attributes["live-restore"] = fmt.Sprintf("%t", daemon.configStore.LiveRestoreEnabled) - return nil -} - -// reloadNetworkDiagnosticPort updates the network controller starting the diagnostic if the config is valid -func (daemon *Daemon) reloadNetworkDiagnosticPort(conf *config.Config, attributes map[string]string) error { - if conf == nil || daemon.netController == nil || !conf.IsValueSet("network-diagnostic-port") || - conf.NetworkDiagnosticPort < 1 || conf.NetworkDiagnosticPort > 65535 { - // If there is no config make sure that the diagnostic is off - if daemon.netController != nil { - daemon.netController.StopDiagnostic() - } - return nil - } - // Enable the network diagnostic if the flag is set with a valid port withing the range - logrus.WithFields(logrus.Fields{"port": conf.NetworkDiagnosticPort, "ip": "127.0.0.1"}).Warn("Starting network diagnostic server") - daemon.netController.StartDiagnostic(conf.NetworkDiagnosticPort) - - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/reload_unix.go b/vendor/github.com/docker/docker/daemon/reload_unix.go deleted file mode 100644 index 9c1bb992a..000000000 --- a/vendor/github.com/docker/docker/daemon/reload_unix.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build linux freebsd - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "bytes" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/daemon/config" -) - -// reloadPlatform updates configuration with platform specific options -// and updates the passed attributes -func (daemon *Daemon) reloadPlatform(conf *config.Config, attributes map[string]string) error { - if err := conf.ValidatePlatformConfig(); err != nil { - return err - } - - if conf.IsValueSet("runtimes") { - // Always set the default one - conf.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} - if err := daemon.initRuntimes(conf.Runtimes); err != nil { - return err - } - daemon.configStore.Runtimes = conf.Runtimes - } - - if conf.DefaultRuntime != "" { - daemon.configStore.DefaultRuntime = conf.DefaultRuntime - } - - if conf.IsValueSet("default-shm-size") { - daemon.configStore.ShmSize = conf.ShmSize - } - - if conf.IpcMode != "" { - daemon.configStore.IpcMode = conf.IpcMode - } - - // Update attributes - var runtimeList bytes.Buffer - for name, rt := range daemon.configStore.Runtimes { - if runtimeList.Len() > 0 { - runtimeList.WriteRune(' ') - } - runtimeList.WriteString(fmt.Sprintf("%s:%s", name, rt)) - } - - attributes["runtimes"] = runtimeList.String() - attributes["default-runtime"] = daemon.configStore.DefaultRuntime - attributes["default-shm-size"] = fmt.Sprintf("%d", daemon.configStore.ShmSize) - attributes["default-ipc-mode"] = daemon.configStore.IpcMode - - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/reload_windows.go b/vendor/github.com/docker/docker/daemon/reload_windows.go deleted file mode 100644 index 548466e8e..000000000 --- a/vendor/github.com/docker/docker/daemon/reload_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import "github.com/docker/docker/daemon/config" - -// reloadPlatform updates configuration with platform specific options -// and updates the passed attributes -func (daemon *Daemon) reloadPlatform(config *config.Config, attributes map[string]string) error { - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/rename.go b/vendor/github.com/docker/docker/daemon/rename.go deleted file mode 100644 index 2b2c48b29..000000000 --- a/vendor/github.com/docker/docker/daemon/rename.go +++ /dev/null @@ -1,123 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "strings" - - dockercontainer "github.com/docker/docker/container" - "github.com/docker/docker/errdefs" - "github.com/docker/libnetwork" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// ContainerRename changes the name of a container, using the oldName -// to find the container. An error is returned if newName is already -// reserved. -func (daemon *Daemon) ContainerRename(oldName, newName string) error { - var ( - sid string - sb libnetwork.Sandbox - ) - - if oldName == "" || newName == "" { - return errdefs.InvalidParameter(errors.New("Neither old nor new names may be empty")) - } - - if newName[0] != '/' { - newName = "/" + newName - } - - container, err := daemon.GetContainer(oldName) - if err != nil { - return err - } - - container.Lock() - defer container.Unlock() - - oldName = container.Name - oldIsAnonymousEndpoint := container.NetworkSettings.IsAnonymousEndpoint - - if oldName == newName { - return errdefs.InvalidParameter(errors.New("Renaming a container with the same name as its current name")) - } - - links := map[string]*dockercontainer.Container{} - for k, v := range daemon.linkIndex.children(container) { - if !strings.HasPrefix(k, oldName) { - return errdefs.InvalidParameter(errors.Errorf("Linked container %s does not match parent %s", k, oldName)) - } - links[strings.TrimPrefix(k, oldName)] = v - } - - if newName, err = daemon.reserveName(container.ID, newName); err != nil { - return errors.Wrap(err, "Error when allocating new name") - } - - for k, v := range links { - daemon.containersReplica.ReserveName(newName+k, v.ID) - daemon.linkIndex.link(container, v, newName+k) - } - - container.Name = newName - container.NetworkSettings.IsAnonymousEndpoint = false - - defer func() { - if err != nil { - container.Name = oldName - container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint - daemon.reserveName(container.ID, oldName) - for k, v := range links { - daemon.containersReplica.ReserveName(oldName+k, v.ID) - daemon.linkIndex.link(container, v, oldName+k) - daemon.linkIndex.unlink(newName+k, v, container) - daemon.containersReplica.ReleaseName(newName + k) - } - daemon.releaseName(newName) - } - }() - - for k, v := range links { - daemon.linkIndex.unlink(oldName+k, v, container) - daemon.containersReplica.ReleaseName(oldName + k) - } - daemon.releaseName(oldName) - if err = container.CheckpointTo(daemon.containersReplica); err != nil { - return err - } - - attributes := map[string]string{ - "oldName": oldName, - } - - if !container.Running { - daemon.LogContainerEventWithAttributes(container, "rename", attributes) - return nil - } - - defer func() { - if err != nil { - container.Name = oldName - container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint - if e := container.CheckpointTo(daemon.containersReplica); e != nil { - logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) - } - } - }() - - sid = container.NetworkSettings.SandboxID - if sid != "" && daemon.netController != nil { - sb, err = daemon.netController.SandboxByID(sid) - if err != nil { - return err - } - - err = sb.Rename(strings.TrimPrefix(container.Name, "/")) - if err != nil { - return err - } - } - - daemon.LogContainerEventWithAttributes(container, "rename", attributes) - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/resize.go b/vendor/github.com/docker/docker/daemon/resize.go deleted file mode 100644 index 21240650f..000000000 --- a/vendor/github.com/docker/docker/daemon/resize.go +++ /dev/null @@ -1,50 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - "time" - - "github.com/docker/docker/libcontainerd" -) - -// ContainerResize changes the size of the TTY of the process running -// in the container with the given name to the given height and width. -func (daemon *Daemon) ContainerResize(name string, height, width int) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - if !container.IsRunning() { - return errNotRunning(container.ID) - } - - if err = daemon.containerd.ResizeTerminal(context.Background(), container.ID, libcontainerd.InitProcessName, width, height); err == nil { - attributes := map[string]string{ - "height": fmt.Sprintf("%d", height), - "width": fmt.Sprintf("%d", width), - } - daemon.LogContainerEventWithAttributes(container, "resize", attributes) - } - return err -} - -// ContainerExecResize changes the size of the TTY of the process -// running in the exec with the given name to the given height and -// width. -func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { - ec, err := daemon.getExecConfig(name) - if err != nil { - return err - } - // TODO: the timeout is hardcoded here, it would be more flexible to make it - // a parameter in resize request context, which would need API changes. - timeout := 10 * time.Second - select { - case <-ec.Started: - return daemon.containerd.ResizeTerminal(context.Background(), ec.ContainerID, ec.ID, width, height) - case <-time.After(timeout): - return fmt.Errorf("timeout waiting for exec session ready") - } -} diff --git a/vendor/github.com/docker/docker/daemon/restart.go b/vendor/github.com/docker/docker/daemon/restart.go deleted file mode 100644 index 0f06dea26..000000000 --- a/vendor/github.com/docker/docker/daemon/restart.go +++ /dev/null @@ -1,70 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - - "github.com/docker/docker/container" - "github.com/sirupsen/logrus" -) - -// ContainerRestart stops and starts a container. It attempts to -// gracefully stop the container within the given timeout, forcefully -// stopping it if the timeout is exceeded. If given a negative -// timeout, ContainerRestart will wait forever until a graceful -// stop. Returns an error if the container cannot be found, or if -// there is an underlying error at any stage of the restart. -func (daemon *Daemon) ContainerRestart(name string, seconds *int) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - if seconds == nil { - stopTimeout := container.StopTimeout() - seconds = &stopTimeout - } - if err := daemon.containerRestart(container, *seconds); err != nil { - return fmt.Errorf("Cannot restart container %s: %v", name, err) - } - return nil - -} - -// containerRestart attempts to gracefully stop and then start the -// container. When stopping, wait for the given duration in seconds to -// gracefully stop, before forcefully terminating the container. If -// given a negative duration, wait forever for a graceful stop. -func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error { - // Avoid unnecessarily unmounting and then directly mounting - // the container when the container stops and then starts - // again - if err := daemon.Mount(container); err == nil { - defer daemon.Unmount(container) - } - - if container.IsRunning() { - // set AutoRemove flag to false before stop so the container won't be - // removed during restart process - autoRemove := container.HostConfig.AutoRemove - - container.HostConfig.AutoRemove = false - err := daemon.containerStop(container, seconds) - // restore AutoRemove irrespective of whether the stop worked or not - container.HostConfig.AutoRemove = autoRemove - // containerStop will write HostConfig to disk, we shall restore AutoRemove - // in disk too - if toDiskErr := daemon.checkpointAndSave(container); toDiskErr != nil { - logrus.Errorf("Write container to disk error: %v", toDiskErr) - } - - if err != nil { - return err - } - } - - if err := daemon.containerStart(container, "", "", true); err != nil { - return err - } - - daemon.LogContainerEvent(container, "restart") - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/seccomp_disabled.go b/vendor/github.com/docker/docker/daemon/seccomp_disabled.go deleted file mode 100644 index 3855c7830..000000000 --- a/vendor/github.com/docker/docker/daemon/seccomp_disabled.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build linux,!seccomp - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - - "github.com/docker/docker/container" - "github.com/opencontainers/runtime-spec/specs-go" -) - -var supportsSeccomp = false - -func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { - if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { - return fmt.Errorf("seccomp profiles are not supported on this daemon, you cannot specify a custom seccomp profile") - } - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/seccomp_linux.go b/vendor/github.com/docker/docker/daemon/seccomp_linux.go deleted file mode 100644 index 66ab8c768..000000000 --- a/vendor/github.com/docker/docker/daemon/seccomp_linux.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build linux,seccomp - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - - "github.com/docker/docker/container" - "github.com/docker/docker/profiles/seccomp" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" -) - -var supportsSeccomp = true - -func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { - var profile *specs.LinuxSeccomp - var err error - - if c.HostConfig.Privileged { - return nil - } - - if !daemon.seccompEnabled { - if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { - return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.") - } - logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.") - c.SeccompProfile = "unconfined" - } - if c.SeccompProfile == "unconfined" { - return nil - } - if c.SeccompProfile != "" { - profile, err = seccomp.LoadProfile(c.SeccompProfile, rs) - if err != nil { - return err - } - } else { - if daemon.seccompProfile != nil { - profile, err = seccomp.LoadProfile(string(daemon.seccompProfile), rs) - if err != nil { - return err - } - } else { - profile, err = seccomp.GetDefaultProfile(rs) - if err != nil { - return err - } - } - } - - rs.Linux.Seccomp = profile - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/seccomp_unsupported.go b/vendor/github.com/docker/docker/daemon/seccomp_unsupported.go deleted file mode 100644 index a323fe0be..000000000 --- a/vendor/github.com/docker/docker/daemon/seccomp_unsupported.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !linux - -package daemon // import "github.com/docker/docker/daemon" - -var supportsSeccomp = false diff --git a/vendor/github.com/docker/docker/daemon/secrets.go b/vendor/github.com/docker/docker/daemon/secrets.go deleted file mode 100644 index 6d368a9fd..000000000 --- a/vendor/github.com/docker/docker/daemon/secrets.go +++ /dev/null @@ -1,23 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - swarmtypes "github.com/docker/docker/api/types/swarm" - "github.com/sirupsen/logrus" -) - -// SetContainerSecretReferences sets the container secret references needed -func (daemon *Daemon) SetContainerSecretReferences(name string, refs []*swarmtypes.SecretReference) error { - if !secretsSupported() && len(refs) > 0 { - logrus.Warn("secrets are not supported on this platform") - return nil - } - - c, err := daemon.GetContainer(name) - if err != nil { - return err - } - - c.SecretReferences = refs - - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/secrets_linux.go b/vendor/github.com/docker/docker/daemon/secrets_linux.go deleted file mode 100644 index 2be70be31..000000000 --- a/vendor/github.com/docker/docker/daemon/secrets_linux.go +++ /dev/null @@ -1,5 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -func secretsSupported() bool { - return true -} diff --git a/vendor/github.com/docker/docker/daemon/secrets_unsupported.go b/vendor/github.com/docker/docker/daemon/secrets_unsupported.go deleted file mode 100644 index edad69c56..000000000 --- a/vendor/github.com/docker/docker/daemon/secrets_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux,!windows - -package daemon // import "github.com/docker/docker/daemon" - -func secretsSupported() bool { - return false -} diff --git a/vendor/github.com/docker/docker/daemon/secrets_windows.go b/vendor/github.com/docker/docker/daemon/secrets_windows.go deleted file mode 100644 index 2be70be31..000000000 --- a/vendor/github.com/docker/docker/daemon/secrets_windows.go +++ /dev/null @@ -1,5 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -func secretsSupported() bool { - return true -} diff --git a/vendor/github.com/docker/docker/daemon/selinux_linux.go b/vendor/github.com/docker/docker/daemon/selinux_linux.go deleted file mode 100644 index f87b30b73..000000000 --- a/vendor/github.com/docker/docker/daemon/selinux_linux.go +++ /dev/null @@ -1,15 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import "github.com/opencontainers/selinux/go-selinux" - -func selinuxSetDisabled() { - selinux.SetDisabled() -} - -func selinuxFreeLxcContexts(label string) { - selinux.ReleaseLabel(label) -} - -func selinuxEnabled() bool { - return selinux.GetEnabled() -} diff --git a/vendor/github.com/docker/docker/daemon/selinux_unsupported.go b/vendor/github.com/docker/docker/daemon/selinux_unsupported.go deleted file mode 100644 index 49d0d13bc..000000000 --- a/vendor/github.com/docker/docker/daemon/selinux_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package daemon // import "github.com/docker/docker/daemon" - -func selinuxSetDisabled() { -} - -func selinuxFreeLxcContexts(label string) { -} - -func selinuxEnabled() bool { - return false -} diff --git a/vendor/github.com/docker/docker/daemon/start.go b/vendor/github.com/docker/docker/daemon/start.go deleted file mode 100644 index c00bd9ceb..000000000 --- a/vendor/github.com/docker/docker/daemon/start.go +++ /dev/null @@ -1,254 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "runtime" - "time" - - "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/container" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/mount" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// ContainerStart starts a container. -func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error { - if checkpoint != "" && !daemon.HasExperimental() { - return errdefs.InvalidParameter(errors.New("checkpoint is only supported in experimental mode")) - } - - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - validateState := func() error { - container.Lock() - defer container.Unlock() - - if container.Paused { - return errdefs.Conflict(errors.New("cannot start a paused container, try unpause instead")) - } - - if container.Running { - return containerNotModifiedError{running: true} - } - - if container.RemovalInProgress || container.Dead { - return errdefs.Conflict(errors.New("container is marked for removal and cannot be started")) - } - return nil - } - - if err := validateState(); err != nil { - return err - } - - // Windows does not have the backwards compatibility issue here. - if runtime.GOOS != "windows" { - // This is kept for backward compatibility - hostconfig should be passed when - // creating a container, not during start. - if hostConfig != nil { - logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12") - oldNetworkMode := container.HostConfig.NetworkMode - if err := daemon.setSecurityOptions(container, hostConfig); err != nil { - return errdefs.InvalidParameter(err) - } - if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil { - return errdefs.InvalidParameter(err) - } - if err := daemon.setHostConfig(container, hostConfig); err != nil { - return errdefs.InvalidParameter(err) - } - newNetworkMode := container.HostConfig.NetworkMode - if string(oldNetworkMode) != string(newNetworkMode) { - // if user has change the network mode on starting, clean up the - // old networks. It is a deprecated feature and has been removed in Docker 1.12 - container.NetworkSettings.Networks = nil - if err := container.CheckpointTo(daemon.containersReplica); err != nil { - return errdefs.System(err) - } - } - container.InitDNSHostConfig() - } - } else { - if hostConfig != nil { - return errdefs.InvalidParameter(errors.New("Supplying a hostconfig on start is not supported. It should be supplied on create")) - } - } - - // check if hostConfig is in line with the current system settings. - // It may happen cgroups are umounted or the like. - if _, err = daemon.verifyContainerSettings(container.OS, container.HostConfig, nil, false); err != nil { - return errdefs.InvalidParameter(err) - } - // Adapt for old containers in case we have updates in this function and - // old containers never have chance to call the new function in create stage. - if hostConfig != nil { - if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil { - return errdefs.InvalidParameter(err) - } - } - return daemon.containerStart(container, checkpoint, checkpointDir, true) -} - -// containerStart prepares the container to run by setting up everything the -// container needs, such as storage and networking, as well as links -// between containers. The container is left waiting for a signal to -// begin running. -func (daemon *Daemon) containerStart(container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (err error) { - start := time.Now() - container.Lock() - defer container.Unlock() - - if resetRestartManager && container.Running { // skip this check if already in restarting step and resetRestartManager==false - return nil - } - - if container.RemovalInProgress || container.Dead { - return errdefs.Conflict(errors.New("container is marked for removal and cannot be started")) - } - - if checkpointDir != "" { - // TODO(mlaventure): how would we support that? - return errdefs.Forbidden(errors.New("custom checkpointdir is not supported")) - } - - // if we encounter an error during start we need to ensure that any other - // setup has been cleaned up properly - defer func() { - if err != nil { - container.SetError(err) - // if no one else has set it, make sure we don't leave it at zero - if container.ExitCode() == 0 { - container.SetExitCode(128) - } - if err := container.CheckpointTo(daemon.containersReplica); err != nil { - logrus.Errorf("%s: failed saving state on start failure: %v", container.ID, err) - } - container.Reset(false) - - daemon.Cleanup(container) - // if containers AutoRemove flag is set, remove it after clean up - if container.HostConfig.AutoRemove { - container.Unlock() - if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil { - logrus.Errorf("can't remove container %s: %v", container.ID, err) - } - container.Lock() - } - } - }() - - if err := daemon.conditionalMountOnStart(container); err != nil { - return err - } - - if err := daemon.initializeNetworking(container); err != nil { - return err - } - - spec, err := daemon.createSpec(container) - if err != nil { - return errdefs.System(err) - } - - if resetRestartManager { - container.ResetRestartManager(true) - } - - if daemon.saveApparmorConfig(container); err != nil { - return err - } - - if checkpoint != "" { - checkpointDir, err = getCheckpointDir(checkpointDir, checkpoint, container.Name, container.ID, container.CheckpointDir(), false) - if err != nil { - return err - } - } - - createOptions, err := daemon.getLibcontainerdCreateOptions(container) - if err != nil { - return err - } - - err = daemon.containerd.Create(context.Background(), container.ID, spec, createOptions) - if err != nil { - return translateContainerdStartErr(container.Path, container.SetExitCode, err) - } - - // TODO(mlaventure): we need to specify checkpoint options here - pid, err := daemon.containerd.Start(context.Background(), container.ID, checkpointDir, - container.StreamConfig.Stdin() != nil || container.Config.Tty, - container.InitializeStdio) - if err != nil { - if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil { - logrus.WithError(err).WithField("container", container.ID). - Error("failed to delete failed start container") - } - return translateContainerdStartErr(container.Path, container.SetExitCode, err) - } - - container.SetRunning(pid, true) - container.HasBeenManuallyStopped = false - container.HasBeenStartedBefore = true - daemon.setStateCounter(container) - - daemon.initHealthMonitor(container) - - if err := container.CheckpointTo(daemon.containersReplica); err != nil { - logrus.WithError(err).WithField("container", container.ID). - Errorf("failed to store container") - } - - daemon.LogContainerEvent(container, "start") - containerActions.WithValues("start").UpdateSince(start) - - return nil -} - -// Cleanup releases any network resources allocated to the container along with any rules -// around how containers are linked together. It also unmounts the container's root filesystem. -func (daemon *Daemon) Cleanup(container *container.Container) { - daemon.releaseNetwork(container) - - if err := container.UnmountIpcMount(detachMounted); err != nil { - logrus.Warnf("%s cleanup: failed to unmount IPC: %s", container.ID, err) - } - - if err := daemon.conditionalUnmountOnCleanup(container); err != nil { - // FIXME: remove once reference counting for graphdrivers has been refactored - // Ensure that all the mounts are gone - if mountid, err := daemon.imageService.GetLayerMountID(container.ID, container.OS); err == nil { - daemon.cleanupMountsByID(mountid) - } - } - - if err := container.UnmountSecrets(); err != nil { - logrus.Warnf("%s cleanup: failed to unmount secrets: %s", container.ID, err) - } - - if err := mount.RecursiveUnmount(container.Root); err != nil { - logrus.WithError(err).WithField("container", container.ID).Warn("Error while cleaning up container resource mounts.") - } - - for _, eConfig := range container.ExecCommands.Commands() { - daemon.unregisterExecCommand(container, eConfig) - } - - if container.BaseFS != nil && container.BaseFS.Path() != "" { - if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil { - logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) - } - } - - container.CancelAttachContext() - - if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil { - logrus.Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err) - } -} diff --git a/vendor/github.com/docker/docker/daemon/start_unix.go b/vendor/github.com/docker/docker/daemon/start_unix.go deleted file mode 100644 index e680b95f4..000000000 --- a/vendor/github.com/docker/docker/daemon/start_unix.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build !windows - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "os/exec" - "path/filepath" - - "github.com/containerd/containerd/runtime/linux/runctypes" - "github.com/docker/docker/container" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -func (daemon *Daemon) getRuntimeScript(container *container.Container) (string, error) { - name := container.HostConfig.Runtime - rt := daemon.configStore.GetRuntime(name) - if rt == nil { - return "", errdefs.InvalidParameter(errors.Errorf("no such runtime '%s'", name)) - } - - if len(rt.Args) > 0 { - // First check that the target exist, as using it in a script won't - // give us the right error - if _, err := exec.LookPath(rt.Path); err != nil { - return "", translateContainerdStartErr(container.Path, container.SetExitCode, err) - } - return filepath.Join(daemon.configStore.Root, "runtimes", name), nil - } - return rt.Path, nil -} - -// getLibcontainerdCreateOptions callers must hold a lock on the container -func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) (interface{}, error) { - // Ensure a runtime has been assigned to this container - if container.HostConfig.Runtime == "" { - container.HostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() - container.CheckpointTo(daemon.containersReplica) - } - - path, err := daemon.getRuntimeScript(container) - if err != nil { - return nil, err - } - opts := &runctypes.RuncOptions{ - Runtime: path, - RuntimeRoot: filepath.Join(daemon.configStore.ExecRoot, - fmt.Sprintf("runtime-%s", container.HostConfig.Runtime)), - } - - if UsingSystemd(daemon.configStore) { - opts.SystemdCgroup = true - } - - return opts, nil -} diff --git a/vendor/github.com/docker/docker/daemon/start_windows.go b/vendor/github.com/docker/docker/daemon/start_windows.go deleted file mode 100644 index f4606f7a6..000000000 --- a/vendor/github.com/docker/docker/daemon/start_windows.go +++ /dev/null @@ -1,38 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/Microsoft/opengcs/client" - "github.com/docker/docker/container" -) - -func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) (interface{}, error) { - // LCOW options. - if container.OS == "linux" { - config := &client.Config{} - if err := config.GenerateDefault(daemon.configStore.GraphOptions); err != nil { - return nil, err - } - // Override from user-supplied options. - for k, v := range container.HostConfig.StorageOpt { - switch k { - case "lcow.kirdpath": - config.KirdPath = v - case "lcow.kernel": - config.KernelFile = v - case "lcow.initrd": - config.InitrdFile = v - case "lcow.vhdx": - config.Vhdx = v - case "lcow.bootparameters": - config.BootParameters = v - } - } - if err := config.Validate(); err != nil { - return nil, err - } - - return config, nil - } - - return nil, nil -} diff --git a/vendor/github.com/docker/docker/daemon/stats.go b/vendor/github.com/docker/docker/daemon/stats.go deleted file mode 100644 index eb23e272a..000000000 --- a/vendor/github.com/docker/docker/daemon/stats.go +++ /dev/null @@ -1,155 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "encoding/json" - "errors" - "runtime" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/api/types/versions/v1p20" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/ioutils" -) - -// ContainerStats writes information about the container to the stream -// given in the config object. -func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, config *backend.ContainerStatsConfig) error { - // Engine API version (used for backwards compatibility) - apiVersion := config.Version - - container, err := daemon.GetContainer(prefixOrName) - if err != nil { - return err - } - - // If the container is either not running or restarting and requires no stream, return an empty stats. - if (!container.IsRunning() || container.IsRestarting()) && !config.Stream { - return json.NewEncoder(config.OutStream).Encode(&types.StatsJSON{ - Name: container.Name, - ID: container.ID}) - } - - outStream := config.OutStream - if config.Stream { - wf := ioutils.NewWriteFlusher(outStream) - defer wf.Close() - wf.Flush() - outStream = wf - } - - var preCPUStats types.CPUStats - var preRead time.Time - getStatJSON := func(v interface{}) *types.StatsJSON { - ss := v.(types.StatsJSON) - ss.Name = container.Name - ss.ID = container.ID - ss.PreCPUStats = preCPUStats - ss.PreRead = preRead - preCPUStats = ss.CPUStats - preRead = ss.Read - return &ss - } - - enc := json.NewEncoder(outStream) - - updates := daemon.subscribeToContainerStats(container) - defer daemon.unsubscribeToContainerStats(container, updates) - - noStreamFirstFrame := true - for { - select { - case v, ok := <-updates: - if !ok { - return nil - } - - var statsJSON interface{} - statsJSONPost120 := getStatJSON(v) - if versions.LessThan(apiVersion, "1.21") { - if runtime.GOOS == "windows" { - return errors.New("API versions pre v1.21 do not support stats on Windows") - } - var ( - rxBytes uint64 - rxPackets uint64 - rxErrors uint64 - rxDropped uint64 - txBytes uint64 - txPackets uint64 - txErrors uint64 - txDropped uint64 - ) - for _, v := range statsJSONPost120.Networks { - rxBytes += v.RxBytes - rxPackets += v.RxPackets - rxErrors += v.RxErrors - rxDropped += v.RxDropped - txBytes += v.TxBytes - txPackets += v.TxPackets - txErrors += v.TxErrors - txDropped += v.TxDropped - } - statsJSON = &v1p20.StatsJSON{ - Stats: statsJSONPost120.Stats, - Network: types.NetworkStats{ - RxBytes: rxBytes, - RxPackets: rxPackets, - RxErrors: rxErrors, - RxDropped: rxDropped, - TxBytes: txBytes, - TxPackets: txPackets, - TxErrors: txErrors, - TxDropped: txDropped, - }, - } - } else { - statsJSON = statsJSONPost120 - } - - if !config.Stream && noStreamFirstFrame { - // prime the cpu stats so they aren't 0 in the final output - noStreamFirstFrame = false - continue - } - - if err := enc.Encode(statsJSON); err != nil { - return err - } - - if !config.Stream { - return nil - } - case <-ctx.Done(): - return nil - } - } -} - -func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} { - return daemon.statsCollector.Collect(c) -} - -func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) { - daemon.statsCollector.Unsubscribe(c, ch) -} - -// GetContainerStats collects all the stats published by a container -func (daemon *Daemon) GetContainerStats(container *container.Container) (*types.StatsJSON, error) { - stats, err := daemon.stats(container) - if err != nil { - return nil, err - } - - // We already have the network stats on Windows directly from HCS. - if !container.Config.NetworkDisabled && runtime.GOOS != "windows" { - if stats.Networks, err = daemon.getNetworkStats(container); err != nil { - return nil, err - } - } - - return stats, nil -} diff --git a/vendor/github.com/docker/docker/daemon/stats/collector.go b/vendor/github.com/docker/docker/daemon/stats/collector.go deleted file mode 100644 index 88e20984b..000000000 --- a/vendor/github.com/docker/docker/daemon/stats/collector.go +++ /dev/null @@ -1,159 +0,0 @@ -package stats // import "github.com/docker/docker/daemon/stats" - -import ( - "bufio" - "sync" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/pubsub" - "github.com/sirupsen/logrus" -) - -// Collector manages and provides container resource stats -type Collector struct { - m sync.Mutex - supervisor supervisor - interval time.Duration - publishers map[*container.Container]*pubsub.Publisher - bufReader *bufio.Reader - - // The following fields are not set on Windows currently. - clockTicksPerSecond uint64 -} - -// NewCollector creates a stats collector that will poll the supervisor with the specified interval -func NewCollector(supervisor supervisor, interval time.Duration) *Collector { - s := &Collector{ - interval: interval, - supervisor: supervisor, - publishers: make(map[*container.Container]*pubsub.Publisher), - bufReader: bufio.NewReaderSize(nil, 128), - } - - platformNewStatsCollector(s) - - return s -} - -type supervisor interface { - // GetContainerStats collects all the stats related to a container - GetContainerStats(container *container.Container) (*types.StatsJSON, error) -} - -// Collect registers the container with the collector and adds it to -// the event loop for collection on the specified interval returning -// a channel for the subscriber to receive on. -func (s *Collector) Collect(c *container.Container) chan interface{} { - s.m.Lock() - defer s.m.Unlock() - publisher, exists := s.publishers[c] - if !exists { - publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) - s.publishers[c] = publisher - } - return publisher.Subscribe() -} - -// StopCollection closes the channels for all subscribers and removes -// the container from metrics collection. -func (s *Collector) StopCollection(c *container.Container) { - s.m.Lock() - if publisher, exists := s.publishers[c]; exists { - publisher.Close() - delete(s.publishers, c) - } - s.m.Unlock() -} - -// Unsubscribe removes a specific subscriber from receiving updates for a container's stats. -func (s *Collector) Unsubscribe(c *container.Container, ch chan interface{}) { - s.m.Lock() - publisher := s.publishers[c] - if publisher != nil { - publisher.Evict(ch) - if publisher.Len() == 0 { - delete(s.publishers, c) - } - } - s.m.Unlock() -} - -// Run starts the collectors and will indefinitely collect stats from the supervisor -func (s *Collector) Run() { - type publishersPair struct { - container *container.Container - publisher *pubsub.Publisher - } - // we cannot determine the capacity here. - // it will grow enough in first iteration - var pairs []publishersPair - - for { - // Put sleep at the start so that it will always be hit, - // preventing a tight loop if no stats are collected. - time.Sleep(s.interval) - - // it does not make sense in the first iteration, - // but saves allocations in further iterations - pairs = pairs[:0] - - s.m.Lock() - for container, publisher := range s.publishers { - // copy pointers here to release the lock ASAP - pairs = append(pairs, publishersPair{container, publisher}) - } - s.m.Unlock() - if len(pairs) == 0 { - continue - } - - onlineCPUs, err := s.getNumberOnlineCPUs() - if err != nil { - logrus.Errorf("collecting system online cpu count: %v", err) - continue - } - - for _, pair := range pairs { - stats, err := s.supervisor.GetContainerStats(pair.container) - - switch err.(type) { - case nil: - // Sample system CPU usage close to container usage to avoid - // noise in metric calculations. - systemUsage, err := s.getSystemCPUUsage() - if err != nil { - logrus.WithError(err).WithField("container_id", pair.container.ID).Errorf("collecting system cpu usage") - continue - } - - // FIXME: move to containerd on Linux (not Windows) - stats.CPUStats.SystemUsage = systemUsage - stats.CPUStats.OnlineCPUs = onlineCPUs - - pair.publisher.Publish(*stats) - - case notRunningErr, notFoundErr: - // publish empty stats containing only name and ID if not running or not found - pair.publisher.Publish(types.StatsJSON{ - Name: pair.container.Name, - ID: pair.container.ID, - }) - - default: - logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) - } - } - } -} - -type notRunningErr interface { - error - Conflict() -} - -type notFoundErr interface { - error - NotFound() -} diff --git a/vendor/github.com/docker/docker/daemon/stats/collector_unix.go b/vendor/github.com/docker/docker/daemon/stats/collector_unix.go deleted file mode 100644 index 2480aceb5..000000000 --- a/vendor/github.com/docker/docker/daemon/stats/collector_unix.go +++ /dev/null @@ -1,83 +0,0 @@ -// +build !windows - -package stats // import "github.com/docker/docker/daemon/stats" - -import ( - "fmt" - "os" - "strconv" - "strings" - - "github.com/opencontainers/runc/libcontainer/system" -) - -/* -#include -*/ -import "C" - -// platformNewStatsCollector performs platform specific initialisation of the -// Collector structure. -func platformNewStatsCollector(s *Collector) { - s.clockTicksPerSecond = uint64(system.GetClockTicks()) -} - -const nanoSecondsPerSecond = 1e9 - -// getSystemCPUUsage returns the host system's cpu usage in -// nanoseconds. An error is returned if the format of the underlying -// file does not match. -// -// Uses /proc/stat defined by POSIX. Looks for the cpu -// statistics line and then sums up the first seven fields -// provided. See `man 5 proc` for details on specific field -// information. -func (s *Collector) getSystemCPUUsage() (uint64, error) { - var line string - f, err := os.Open("/proc/stat") - if err != nil { - return 0, err - } - defer func() { - s.bufReader.Reset(nil) - f.Close() - }() - s.bufReader.Reset(f) - err = nil - for err == nil { - line, err = s.bufReader.ReadString('\n') - if err != nil { - break - } - parts := strings.Fields(line) - switch parts[0] { - case "cpu": - if len(parts) < 8 { - return 0, fmt.Errorf("invalid number of cpu fields") - } - var totalClockTicks uint64 - for _, i := range parts[1:8] { - v, err := strconv.ParseUint(i, 10, 64) - if err != nil { - return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) - } - totalClockTicks += v - } - return (totalClockTicks * nanoSecondsPerSecond) / - s.clockTicksPerSecond, nil - } - } - return 0, fmt.Errorf("invalid stat format. Error trying to parse the '/proc/stat' file") -} - -func (s *Collector) getNumberOnlineCPUs() (uint32, error) { - i, err := C.sysconf(C._SC_NPROCESSORS_ONLN) - // According to POSIX - errno is undefined after successful - // sysconf, and can be non-zero in several cases, so look for - // error in returned value not in errno. - // (https://sourceware.org/bugzilla/show_bug.cgi?id=21536) - if i == -1 { - return 0, err - } - return uint32(i), nil -} diff --git a/vendor/github.com/docker/docker/daemon/stats/collector_windows.go b/vendor/github.com/docker/docker/daemon/stats/collector_windows.go deleted file mode 100644 index 018e9065f..000000000 --- a/vendor/github.com/docker/docker/daemon/stats/collector_windows.go +++ /dev/null @@ -1,17 +0,0 @@ -package stats // import "github.com/docker/docker/daemon/stats" - -// platformNewStatsCollector performs platform specific initialisation of the -// Collector structure. This is a no-op on Windows. -func platformNewStatsCollector(s *Collector) { -} - -// getSystemCPUUsage returns the host system's cpu usage in -// nanoseconds. An error is returned if the format of the underlying -// file does not match. This is a no-op on Windows. -func (s *Collector) getSystemCPUUsage() (uint64, error) { - return 0, nil -} - -func (s *Collector) getNumberOnlineCPUs() (uint32, error) { - return 0, nil -} diff --git a/vendor/github.com/docker/docker/daemon/stats_collector.go b/vendor/github.com/docker/docker/daemon/stats_collector.go deleted file mode 100644 index 0490b2ea1..000000000 --- a/vendor/github.com/docker/docker/daemon/stats_collector.go +++ /dev/null @@ -1,26 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "runtime" - "time" - - "github.com/docker/docker/daemon/stats" - "github.com/docker/docker/pkg/system" -) - -// newStatsCollector returns a new statsCollector that collections -// stats for a registered container at the specified interval. -// The collector allows non-running containers to be added -// and will start processing stats when they are started. -func (daemon *Daemon) newStatsCollector(interval time.Duration) *stats.Collector { - // FIXME(vdemeester) move this elsewhere - if runtime.GOOS == "linux" { - meminfo, err := system.ReadMemInfo() - if err == nil && meminfo.MemTotal > 0 { - daemon.machineMemory = uint64(meminfo.MemTotal) - } - } - s := stats.NewCollector(daemon, interval) - go s.Run() - return s -} diff --git a/vendor/github.com/docker/docker/daemon/stats_unix.go b/vendor/github.com/docker/docker/daemon/stats_unix.go deleted file mode 100644 index ee78ca688..000000000 --- a/vendor/github.com/docker/docker/daemon/stats_unix.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build !windows - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/container" - "github.com/pkg/errors" -) - -// Resolve Network SandboxID in case the container reuse another container's network stack -func (daemon *Daemon) getNetworkSandboxID(c *container.Container) (string, error) { - curr := c - for curr.HostConfig.NetworkMode.IsContainer() { - containerID := curr.HostConfig.NetworkMode.ConnectedContainer() - connected, err := daemon.GetContainer(containerID) - if err != nil { - return "", errors.Wrapf(err, "Could not get container for %s", containerID) - } - curr = connected - } - return curr.NetworkSettings.SandboxID, nil -} - -func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { - sandboxID, err := daemon.getNetworkSandboxID(c) - if err != nil { - return nil, err - } - - sb, err := daemon.netController.SandboxByID(sandboxID) - if err != nil { - return nil, err - } - - lnstats, err := sb.Statistics() - if err != nil { - return nil, err - } - - stats := make(map[string]types.NetworkStats) - // Convert libnetwork nw stats into api stats - for ifName, ifStats := range lnstats { - stats[ifName] = types.NetworkStats{ - RxBytes: ifStats.RxBytes, - RxPackets: ifStats.RxPackets, - RxErrors: ifStats.RxErrors, - RxDropped: ifStats.RxDropped, - TxBytes: ifStats.TxBytes, - TxPackets: ifStats.TxPackets, - TxErrors: ifStats.TxErrors, - TxDropped: ifStats.TxDropped, - } - } - - return stats, nil -} diff --git a/vendor/github.com/docker/docker/daemon/stats_windows.go b/vendor/github.com/docker/docker/daemon/stats_windows.go deleted file mode 100644 index 0306332b4..000000000 --- a/vendor/github.com/docker/docker/daemon/stats_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/container" -) - -// Windows network stats are obtained directly through HCS, hence this is a no-op. -func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { - return make(map[string]types.NetworkStats), nil -} diff --git a/vendor/github.com/docker/docker/daemon/stop.go b/vendor/github.com/docker/docker/daemon/stop.go deleted file mode 100644 index c3ac09056..000000000 --- a/vendor/github.com/docker/docker/daemon/stop.go +++ /dev/null @@ -1,89 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "time" - - containerpkg "github.com/docker/docker/container" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// ContainerStop looks for the given container and stops it. -// In case the container fails to stop gracefully within a time duration -// specified by the timeout argument, in seconds, it is forcefully -// terminated (killed). -// -// If the timeout is nil, the container's StopTimeout value is used, if set, -// otherwise the engine default. A negative timeout value can be specified, -// meaning no timeout, i.e. no forceful termination is performed. -func (daemon *Daemon) ContainerStop(name string, timeout *int) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - if !container.IsRunning() { - return containerNotModifiedError{running: false} - } - if timeout == nil { - stopTimeout := container.StopTimeout() - timeout = &stopTimeout - } - if err := daemon.containerStop(container, *timeout); err != nil { - return errdefs.System(errors.Wrapf(err, "cannot stop container: %s", name)) - } - return nil -} - -// containerStop sends a stop signal, waits, sends a kill signal. -func (daemon *Daemon) containerStop(container *containerpkg.Container, seconds int) error { - if !container.IsRunning() { - return nil - } - - stopSignal := container.StopSignal() - // 1. Send a stop signal - if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil { - // While normally we might "return err" here we're not going to - // because if we can't stop the container by this point then - // it's probably because it's already stopped. Meaning, between - // the time of the IsRunning() call above and now it stopped. - // Also, since the err return will be environment specific we can't - // look for any particular (common) error that would indicate - // that the process is already dead vs something else going wrong. - // So, instead we'll give it up to 2 more seconds to complete and if - // by that time the container is still running, then the error - // we got is probably valid and so we force kill it. - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - - if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { - logrus.Infof("Container failed to stop after sending signal %d to the process, force killing", stopSignal) - if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { - return err - } - } - } - - // 2. Wait for the process to exit on its own - ctx := context.Background() - if seconds >= 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, time.Duration(seconds)*time.Second) - defer cancel() - } - - if status := <-container.Wait(ctx, containerpkg.WaitConditionNotRunning); status.Err() != nil { - logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal) - // 3. If it doesn't, then send SIGKILL - if err := daemon.Kill(container); err != nil { - // Wait without a timeout, ignore result. - <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning) - logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it - } - } - - daemon.LogContainerEvent(container, "stop") - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/top_unix.go b/vendor/github.com/docker/docker/daemon/top_unix.go deleted file mode 100644 index 99ca56f0f..000000000 --- a/vendor/github.com/docker/docker/daemon/top_unix.go +++ /dev/null @@ -1,189 +0,0 @@ -//+build !windows - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "bytes" - "context" - "fmt" - "os/exec" - "regexp" - "strconv" - "strings" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -func validatePSArgs(psArgs string) error { - // NOTE: \\s does not detect unicode whitespaces. - // So we use fieldsASCII instead of strings.Fields in parsePSOutput. - // See https://github.com/docker/docker/pull/24358 - // nolint: gosimple - re := regexp.MustCompile("\\s+([^\\s]*)=\\s*(PID[^\\s]*)") - for _, group := range re.FindAllStringSubmatch(psArgs, -1) { - if len(group) >= 3 { - k := group[1] - v := group[2] - if k != "pid" { - return fmt.Errorf("specifying \"%s=%s\" is not allowed", k, v) - } - } - } - return nil -} - -// fieldsASCII is similar to strings.Fields but only allows ASCII whitespaces -func fieldsASCII(s string) []string { - fn := func(r rune) bool { - switch r { - case '\t', '\n', '\f', '\r', ' ': - return true - } - return false - } - return strings.FieldsFunc(s, fn) -} - -func appendProcess2ProcList(procList *container.ContainerTopOKBody, fields []string) { - // Make sure number of fields equals number of header titles - // merging "overhanging" fields - process := fields[:len(procList.Titles)-1] - process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) - procList.Processes = append(procList.Processes, process) -} - -func hasPid(procs []uint32, pid int) bool { - for _, p := range procs { - if int(p) == pid { - return true - } - } - return false -} - -func parsePSOutput(output []byte, procs []uint32) (*container.ContainerTopOKBody, error) { - procList := &container.ContainerTopOKBody{} - - lines := strings.Split(string(output), "\n") - procList.Titles = fieldsASCII(lines[0]) - - pidIndex := -1 - for i, name := range procList.Titles { - if name == "PID" { - pidIndex = i - break - } - } - if pidIndex == -1 { - return nil, fmt.Errorf("Couldn't find PID field in ps output") - } - - // loop through the output and extract the PID from each line - // fixing #30580, be able to display thread line also when "m" option used - // in "docker top" client command - preContainedPidFlag := false - for _, line := range lines[1:] { - if len(line) == 0 { - continue - } - fields := fieldsASCII(line) - - var ( - p int - err error - ) - - if fields[pidIndex] == "-" { - if preContainedPidFlag { - appendProcess2ProcList(procList, fields) - } - continue - } - p, err = strconv.Atoi(fields[pidIndex]) - if err != nil { - return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) - } - - if hasPid(procs, p) { - preContainedPidFlag = true - appendProcess2ProcList(procList, fields) - continue - } - preContainedPidFlag = false - } - return procList, nil -} - -// psPidsArg converts a slice of PIDs to a string consisting -// of comma-separated list of PIDs prepended by "-q". -// For example, psPidsArg([]uint32{1,2,3}) returns "-q1,2,3". -func psPidsArg(pids []uint32) string { - b := []byte{'-', 'q'} - for i, p := range pids { - b = strconv.AppendUint(b, uint64(p), 10) - if i < len(pids)-1 { - b = append(b, ',') - } - } - return string(b) -} - -// ContainerTop lists the processes running inside of the given -// container by calling ps with the given args, or with the flags -// "-ef" if no args are given. An error is returned if the container -// is not found, or is not running, or if there are any problems -// running ps, or parsing the output. -func (daemon *Daemon) ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error) { - if psArgs == "" { - psArgs = "-ef" - } - - if err := validatePSArgs(psArgs); err != nil { - return nil, err - } - - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - if !container.IsRunning() { - return nil, errNotRunning(container.ID) - } - - if container.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) - } - - procs, err := daemon.containerd.ListPids(context.Background(), container.ID) - if err != nil { - return nil, err - } - - args := strings.Split(psArgs, " ") - pids := psPidsArg(procs) - output, err := exec.Command("ps", append(args, pids)...).Output() - if err != nil { - // some ps options (such as f) can't be used together with q, - // so retry without it - output, err = exec.Command("ps", args...).Output() - if err != nil { - if ee, ok := err.(*exec.ExitError); ok { - // first line of stderr shows why ps failed - line := bytes.SplitN(ee.Stderr, []byte{'\n'}, 2) - if len(line) > 0 && len(line[0]) > 0 { - err = errors.New(string(line[0])) - } - } - return nil, errdefs.System(errors.Wrap(err, "ps")) - } - } - procList, err := parsePSOutput(output, procs) - if err != nil { - return nil, err - } - daemon.LogContainerEvent(container, "top") - return procList, nil -} diff --git a/vendor/github.com/docker/docker/daemon/top_windows.go b/vendor/github.com/docker/docker/daemon/top_windows.go deleted file mode 100644 index 1b3f84396..000000000 --- a/vendor/github.com/docker/docker/daemon/top_windows.go +++ /dev/null @@ -1,63 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "errors" - "fmt" - "time" - - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/go-units" -) - -// ContainerTop handles `docker top` client requests. -// Future considerations: -// -- Windows users are far more familiar with CPU% total. -// Further, users on Windows rarely see user/kernel CPU stats split. -// The kernel returns everything in terms of 100ns. To obtain -// CPU%, we could do something like docker stats does which takes two -// samples, subtract the difference and do the maths. Unfortunately this -// would slow the stat call down and require two kernel calls. So instead, -// we do something similar to linux and display the CPU as combined HH:MM:SS.mmm. -// -- Perhaps we could add an argument to display "raw" stats -// -- "Memory" is an extremely overloaded term in Windows. Hence we do what -// task manager does and use the private working set as the memory counter. -// We could return more info for those who really understand how memory -// management works in Windows if we introduced a "raw" stats (above). -func (daemon *Daemon) ContainerTop(name string, psArgs string) (*containertypes.ContainerTopOKBody, error) { - // It's not at all an equivalent to linux 'ps' on Windows - if psArgs != "" { - return nil, errors.New("Windows does not support arguments to top") - } - - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - if !container.IsRunning() { - return nil, errNotRunning(container.ID) - } - - if container.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) - } - - s, err := daemon.containerd.Summary(context.Background(), container.ID) - if err != nil { - return nil, err - } - procList := &containertypes.ContainerTopOKBody{} - procList.Titles = []string{"Name", "PID", "CPU", "Private Working Set"} - - for _, j := range s { - d := time.Duration((j.KernelTime100ns + j.UserTime100ns) * 100) // Combined time in nanoseconds - procList.Processes = append(procList.Processes, []string{ - j.ImageName, - fmt.Sprint(j.ProcessId), - fmt.Sprintf("%02d:%02d:%02d.%03d", int(d.Hours()), int(d.Minutes())%60, int(d.Seconds())%60, int(d.Nanoseconds()/1000000)%1000), - units.HumanSize(float64(j.MemoryWorkingSetPrivateBytes))}) - } - - return procList, nil -} diff --git a/vendor/github.com/docker/docker/daemon/trustkey.go b/vendor/github.com/docker/docker/daemon/trustkey.go deleted file mode 100644 index bf00b6a3a..000000000 --- a/vendor/github.com/docker/docker/daemon/trustkey.go +++ /dev/null @@ -1,57 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "encoding/json" - "encoding/pem" - "fmt" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/system" - "github.com/docker/libtrust" -) - -// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, -// otherwise generates a new one -// TODO: this should use more of libtrust.LoadOrCreateTrustKey which may need -// a refactor or this function to be moved into libtrust -func loadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { - err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "") - if err != nil { - return nil, err - } - trustKey, err := libtrust.LoadKeyFile(trustKeyPath) - if err == libtrust.ErrKeyFileDoesNotExist { - trustKey, err = libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, fmt.Errorf("Error generating key: %s", err) - } - encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath)) - if err != nil { - return nil, fmt.Errorf("Error serializing key: %s", err) - } - if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil { - return nil, fmt.Errorf("Error saving key file: %s", err) - } - } else if err != nil { - return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) - } - return trustKey, nil -} - -func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) { - if ext == ".json" || ext == ".jwk" { - encoded, err = json.Marshal(key) - if err != nil { - return nil, fmt.Errorf("unable to encode private key JWK: %s", err) - } - } else { - pemBlock, err := key.PEMBlock() - if err != nil { - return nil, fmt.Errorf("unable to encode private key PEM: %s", err) - } - encoded = pem.EncodeToMemory(pemBlock) - } - return -} diff --git a/vendor/github.com/docker/docker/daemon/unpause.go b/vendor/github.com/docker/docker/daemon/unpause.go deleted file mode 100644 index 9061d50a1..000000000 --- a/vendor/github.com/docker/docker/daemon/unpause.go +++ /dev/null @@ -1,44 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - - "github.com/docker/docker/container" - "github.com/sirupsen/logrus" -) - -// ContainerUnpause unpauses a container -func (daemon *Daemon) ContainerUnpause(name string) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - return daemon.containerUnpause(container) -} - -// containerUnpause resumes the container execution after the container is paused. -func (daemon *Daemon) containerUnpause(container *container.Container) error { - container.Lock() - defer container.Unlock() - - // We cannot unpause the container which is not paused - if !container.Paused { - return fmt.Errorf("Container %s is not paused", container.ID) - } - - if err := daemon.containerd.Resume(context.Background(), container.ID); err != nil { - return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err) - } - - container.Paused = false - daemon.setStateCounter(container) - daemon.updateHealthMonitor(container) - daemon.LogContainerEvent(container, "unpause") - - if err := container.CheckpointTo(daemon.containersReplica); err != nil { - logrus.WithError(err).Warnf("could not save container to disk") - } - - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/update.go b/vendor/github.com/docker/docker/daemon/update.go deleted file mode 100644 index 0ebb139d3..000000000 --- a/vendor/github.com/docker/docker/daemon/update.go +++ /dev/null @@ -1,95 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "fmt" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// ContainerUpdate updates configuration of the container -func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) { - var warnings []string - - c, err := daemon.GetContainer(name) - if err != nil { - return container.ContainerUpdateOKBody{Warnings: warnings}, err - } - - warnings, err = daemon.verifyContainerSettings(c.OS, hostConfig, nil, true) - if err != nil { - return container.ContainerUpdateOKBody{Warnings: warnings}, errdefs.InvalidParameter(err) - } - - if err := daemon.update(name, hostConfig); err != nil { - return container.ContainerUpdateOKBody{Warnings: warnings}, err - } - - return container.ContainerUpdateOKBody{Warnings: warnings}, nil -} - -func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { - if hostConfig == nil { - return nil - } - - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - restoreConfig := false - backupHostConfig := *container.HostConfig - defer func() { - if restoreConfig { - container.Lock() - container.HostConfig = &backupHostConfig - container.CheckpointTo(daemon.containersReplica) - container.Unlock() - } - }() - - if container.RemovalInProgress || container.Dead { - return errCannotUpdate(container.ID, fmt.Errorf("container is marked for removal and cannot be \"update\"")) - } - - container.Lock() - if err := container.UpdateContainer(hostConfig); err != nil { - restoreConfig = true - container.Unlock() - return errCannotUpdate(container.ID, err) - } - if err := container.CheckpointTo(daemon.containersReplica); err != nil { - restoreConfig = true - container.Unlock() - return errCannotUpdate(container.ID, err) - } - container.Unlock() - - // if Restart Policy changed, we need to update container monitor - if hostConfig.RestartPolicy.Name != "" { - container.UpdateMonitor(hostConfig.RestartPolicy) - } - - // If container is not running, update hostConfig struct is enough, - // resources will be updated when the container is started again. - // If container is running (including paused), we need to update configs - // to the real world. - if container.IsRunning() && !container.IsRestarting() { - if err := daemon.containerd.UpdateResources(context.Background(), container.ID, toContainerdResources(hostConfig.Resources)); err != nil { - restoreConfig = true - // TODO: it would be nice if containerd responded with better errors here so we can classify this better. - return errCannotUpdate(container.ID, errdefs.System(err)) - } - } - - daemon.LogContainerEvent(container, "update") - - return nil -} - -func errCannotUpdate(containerID string, err error) error { - return errors.Wrap(err, "Cannot update container "+containerID) -} diff --git a/vendor/github.com/docker/docker/daemon/update_linux.go b/vendor/github.com/docker/docker/daemon/update_linux.go deleted file mode 100644 index 6a307eabc..000000000 --- a/vendor/github.com/docker/docker/daemon/update_linux.go +++ /dev/null @@ -1,54 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/libcontainerd" - "github.com/opencontainers/runtime-spec/specs-go" -) - -func toContainerdResources(resources container.Resources) *libcontainerd.Resources { - var r libcontainerd.Resources - - r.BlockIO = &specs.LinuxBlockIO{ - Weight: &resources.BlkioWeight, - } - - shares := uint64(resources.CPUShares) - r.CPU = &specs.LinuxCPU{ - Shares: &shares, - Cpus: resources.CpusetCpus, - Mems: resources.CpusetMems, - } - - var ( - period uint64 - quota int64 - ) - if resources.NanoCPUs != 0 { - period = uint64(100 * time.Millisecond / time.Microsecond) - quota = resources.NanoCPUs * int64(period) / 1e9 - } - if quota == 0 && resources.CPUQuota != 0 { - quota = resources.CPUQuota - } - if period == 0 && resources.CPUPeriod != 0 { - period = uint64(resources.CPUPeriod) - } - - r.CPU.Period = &period - r.CPU.Quota = "a - - r.Memory = &specs.LinuxMemory{ - Limit: &resources.Memory, - Reservation: &resources.MemoryReservation, - Kernel: &resources.KernelMemory, - } - - if resources.MemorySwap > 0 { - r.Memory.Swap = &resources.MemorySwap - } - - return &r -} diff --git a/vendor/github.com/docker/docker/daemon/update_windows.go b/vendor/github.com/docker/docker/daemon/update_windows.go deleted file mode 100644 index fada3c1c0..000000000 --- a/vendor/github.com/docker/docker/daemon/update_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/libcontainerd" -) - -func toContainerdResources(resources container.Resources) *libcontainerd.Resources { - // We don't support update, so do nothing - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/volumes.go b/vendor/github.com/docker/docker/daemon/volumes.go deleted file mode 100644 index a20ff1fbf..000000000 --- a/vendor/github.com/docker/docker/daemon/volumes.go +++ /dev/null @@ -1,417 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - "os" - "path/filepath" - "reflect" - "strings" - "time" - - "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/container" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/volume" - volumemounts "github.com/docker/docker/volume/mounts" - "github.com/docker/docker/volume/service" - volumeopts "github.com/docker/docker/volume/service/opts" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var ( - // ErrVolumeReadonly is used to signal an error when trying to copy data into - // a volume mount that is not writable. - ErrVolumeReadonly = errors.New("mounted volume is marked read-only") -) - -type mounts []container.Mount - -// Len returns the number of mounts. Used in sorting. -func (m mounts) Len() int { - return len(m) -} - -// Less returns true if the number of parts (a/b/c would be 3 parts) in the -// mount indexed by parameter 1 is less than that of the mount indexed by -// parameter 2. Used in sorting. -func (m mounts) Less(i, j int) bool { - return m.parts(i) < m.parts(j) -} - -// Swap swaps two items in an array of mounts. Used in sorting -func (m mounts) Swap(i, j int) { - m[i], m[j] = m[j], m[i] -} - -// parts returns the number of parts in the destination of a mount. Used in sorting. -func (m mounts) parts(i int) int { - return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) -} - -// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. -// It follows the next sequence to decide what to mount in each final destination: -// -// 1. Select the previously configured mount points for the containers, if any. -// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. -// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. -// 4. Cleanup old volumes that are about to be reassigned. -func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) { - binds := map[string]bool{} - mountPoints := map[string]*volumemounts.MountPoint{} - parser := volumemounts.NewParser(container.OS) - - ctx := context.TODO() - defer func() { - // clean up the container mountpoints once return with error - if retErr != nil { - for _, m := range mountPoints { - if m.Volume == nil { - continue - } - daemon.volumes.Release(ctx, m.Volume.Name(), container.ID) - } - } - }() - - dereferenceIfExists := func(destination string) { - if v, ok := mountPoints[destination]; ok { - logrus.Debugf("Duplicate mount point '%s'", destination) - if v.Volume != nil { - daemon.volumes.Release(ctx, v.Volume.Name(), container.ID) - } - } - } - - // 1. Read already configured mount points. - for destination, point := range container.MountPoints { - mountPoints[destination] = point - } - - // 2. Read volumes from other containers. - for _, v := range hostConfig.VolumesFrom { - containerID, mode, err := parser.ParseVolumesFrom(v) - if err != nil { - return err - } - - c, err := daemon.GetContainer(containerID) - if err != nil { - return err - } - - for _, m := range c.MountPoints { - cp := &volumemounts.MountPoint{ - Type: m.Type, - Name: m.Name, - Source: m.Source, - RW: m.RW && parser.ReadWrite(mode), - Driver: m.Driver, - Destination: m.Destination, - Propagation: m.Propagation, - Spec: m.Spec, - CopyData: false, - } - - if len(cp.Source) == 0 { - v, err := daemon.volumes.Get(ctx, cp.Name, volumeopts.WithGetDriver(cp.Driver), volumeopts.WithGetReference(container.ID)) - if err != nil { - return err - } - cp.Volume = &volumeWrapper{v: v, s: daemon.volumes} - } - dereferenceIfExists(cp.Destination) - mountPoints[cp.Destination] = cp - } - } - - // 3. Read bind mounts - for _, b := range hostConfig.Binds { - bind, err := parser.ParseMountRaw(b, hostConfig.VolumeDriver) - if err != nil { - return err - } - needsSlavePropagation, err := daemon.validateBindDaemonRoot(bind.Spec) - if err != nil { - return err - } - if needsSlavePropagation { - bind.Propagation = mount.PropagationRSlave - } - - // #10618 - _, tmpfsExists := hostConfig.Tmpfs[bind.Destination] - if binds[bind.Destination] || tmpfsExists { - return duplicateMountPointError(bind.Destination) - } - - if bind.Type == mounttypes.TypeVolume { - // create the volume - v, err := daemon.volumes.Create(ctx, bind.Name, bind.Driver, volumeopts.WithCreateReference(container.ID)) - if err != nil { - return err - } - bind.Volume = &volumeWrapper{v: v, s: daemon.volumes} - bind.Source = v.Mountpoint - // bind.Name is an already existing volume, we need to use that here - bind.Driver = v.Driver - if bind.Driver == volume.DefaultDriverName { - setBindModeIfNull(bind) - } - } - - binds[bind.Destination] = true - dereferenceIfExists(bind.Destination) - mountPoints[bind.Destination] = bind - } - - for _, cfg := range hostConfig.Mounts { - mp, err := parser.ParseMountSpec(cfg) - if err != nil { - return errdefs.InvalidParameter(err) - } - needsSlavePropagation, err := daemon.validateBindDaemonRoot(mp.Spec) - if err != nil { - return err - } - if needsSlavePropagation { - mp.Propagation = mount.PropagationRSlave - } - - if binds[mp.Destination] { - return duplicateMountPointError(cfg.Target) - } - - if mp.Type == mounttypes.TypeVolume { - var v *types.Volume - if cfg.VolumeOptions != nil { - var driverOpts map[string]string - if cfg.VolumeOptions.DriverConfig != nil { - driverOpts = cfg.VolumeOptions.DriverConfig.Options - } - v, err = daemon.volumes.Create(ctx, - mp.Name, - mp.Driver, - volumeopts.WithCreateReference(container.ID), - volumeopts.WithCreateOptions(driverOpts), - volumeopts.WithCreateLabels(cfg.VolumeOptions.Labels), - ) - } else { - v, err = daemon.volumes.Create(ctx, mp.Name, mp.Driver, volumeopts.WithCreateReference(container.ID)) - } - if err != nil { - return err - } - - mp.Volume = &volumeWrapper{v: v, s: daemon.volumes} - mp.Name = v.Name - mp.Driver = v.Driver - - if mp.Driver == volume.DefaultDriverName { - setBindModeIfNull(mp) - } - } - - binds[mp.Destination] = true - dereferenceIfExists(mp.Destination) - mountPoints[mp.Destination] = mp - } - - container.Lock() - - // 4. Cleanup old volumes that are about to be reassigned. - for _, m := range mountPoints { - if parser.IsBackwardCompatible(m) { - if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { - daemon.volumes.Release(ctx, mp.Volume.Name(), container.ID) - } - } - } - container.MountPoints = mountPoints - - container.Unlock() - - return nil -} - -// lazyInitializeVolume initializes a mountpoint's volume if needed. -// This happens after a daemon restart. -func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volumemounts.MountPoint) error { - if len(m.Driver) > 0 && m.Volume == nil { - v, err := daemon.volumes.Get(context.TODO(), m.Name, volumeopts.WithGetDriver(m.Driver), volumeopts.WithGetReference(containerID)) - if err != nil { - return err - } - m.Volume = &volumeWrapper{v: v, s: daemon.volumes} - } - return nil -} - -// backportMountSpec resolves mount specs (introduced in 1.13) from pre-1.13 -// mount configurations -// The container lock should not be held when calling this function. -// Changes are only made in-memory and may make changes to containers referenced -// by `container.HostConfig.VolumesFrom` -func (daemon *Daemon) backportMountSpec(container *container.Container) { - container.Lock() - defer container.Unlock() - - parser := volumemounts.NewParser(container.OS) - - maybeUpdate := make(map[string]bool) - for _, mp := range container.MountPoints { - if mp.Spec.Source != "" && mp.Type != "" { - continue - } - maybeUpdate[mp.Destination] = true - } - if len(maybeUpdate) == 0 { - return - } - - mountSpecs := make(map[string]bool, len(container.HostConfig.Mounts)) - for _, m := range container.HostConfig.Mounts { - mountSpecs[m.Target] = true - } - - binds := make(map[string]*volumemounts.MountPoint, len(container.HostConfig.Binds)) - for _, rawSpec := range container.HostConfig.Binds { - mp, err := parser.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver) - if err != nil { - logrus.WithError(err).Error("Got unexpected error while re-parsing raw volume spec during spec backport") - continue - } - binds[mp.Destination] = mp - } - - volumesFrom := make(map[string]volumemounts.MountPoint) - for _, fromSpec := range container.HostConfig.VolumesFrom { - from, _, err := parser.ParseVolumesFrom(fromSpec) - if err != nil { - logrus.WithError(err).WithField("id", container.ID).Error("Error reading volumes-from spec during mount spec backport") - continue - } - fromC, err := daemon.GetContainer(from) - if err != nil { - logrus.WithError(err).WithField("from-container", from).Error("Error looking up volumes-from container") - continue - } - - // make sure from container's specs have been backported - daemon.backportMountSpec(fromC) - - fromC.Lock() - for t, mp := range fromC.MountPoints { - volumesFrom[t] = *mp - } - fromC.Unlock() - } - - needsUpdate := func(containerMount, other *volumemounts.MountPoint) bool { - if containerMount.Type != other.Type || !reflect.DeepEqual(containerMount.Spec, other.Spec) { - return true - } - return false - } - - // main - for _, cm := range container.MountPoints { - if !maybeUpdate[cm.Destination] { - continue - } - // nothing to backport if from hostconfig.Mounts - if mountSpecs[cm.Destination] { - continue - } - - if mp, exists := binds[cm.Destination]; exists { - if needsUpdate(cm, mp) { - cm.Spec = mp.Spec - cm.Type = mp.Type - } - continue - } - - if cm.Name != "" { - if mp, exists := volumesFrom[cm.Destination]; exists { - if needsUpdate(cm, &mp) { - cm.Spec = mp.Spec - cm.Type = mp.Type - } - continue - } - - if cm.Type != "" { - // probably specified via the hostconfig.Mounts - continue - } - - // anon volume - cm.Type = mounttypes.TypeVolume - cm.Spec.Type = mounttypes.TypeVolume - } else { - if cm.Type != "" { - // already updated - continue - } - - cm.Type = mounttypes.TypeBind - cm.Spec.Type = mounttypes.TypeBind - cm.Spec.Source = cm.Source - if cm.Propagation != "" { - cm.Spec.BindOptions = &mounttypes.BindOptions{ - Propagation: cm.Propagation, - } - } - } - - cm.Spec.Target = cm.Destination - cm.Spec.ReadOnly = !cm.RW - } -} - -// VolumesService is used to perform volume operations -func (daemon *Daemon) VolumesService() *service.VolumesService { - return daemon.volumes -} - -type volumeMounter interface { - Mount(ctx context.Context, v *types.Volume, ref string) (string, error) - Unmount(ctx context.Context, v *types.Volume, ref string) error -} - -type volumeWrapper struct { - v *types.Volume - s volumeMounter -} - -func (v *volumeWrapper) Name() string { - return v.v.Name -} - -func (v *volumeWrapper) DriverName() string { - return v.v.Driver -} - -func (v *volumeWrapper) Path() string { - return v.v.Mountpoint -} - -func (v *volumeWrapper) Mount(ref string) (string, error) { - return v.s.Mount(context.TODO(), v.v, ref) -} - -func (v *volumeWrapper) Unmount(ref string) error { - return v.s.Unmount(context.TODO(), v.v, ref) -} - -func (v *volumeWrapper) CreatedAt() (time.Time, error) { - return time.Time{}, errors.New("not implemented") -} - -func (v *volumeWrapper) Status() map[string]interface{} { - return v.v.Status -} diff --git a/vendor/github.com/docker/docker/daemon/volumes_linux.go b/vendor/github.com/docker/docker/daemon/volumes_linux.go deleted file mode 100644 index cf3d9ed15..000000000 --- a/vendor/github.com/docker/docker/daemon/volumes_linux.go +++ /dev/null @@ -1,36 +0,0 @@ -package daemon - -import ( - "strings" - - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" -) - -// validateBindDaemonRoot ensures that if a given mountpoint's source is within -// the daemon root path, that the propagation is setup to prevent a container -// from holding private refereneces to a mount within the daemon root, which -// can cause issues when the daemon attempts to remove the mountpoint. -func (daemon *Daemon) validateBindDaemonRoot(m mount.Mount) (bool, error) { - if m.Type != mount.TypeBind { - return false, nil - } - - // check if the source is within the daemon root, or if the daemon root is within the source - if !strings.HasPrefix(m.Source, daemon.root) && !strings.HasPrefix(daemon.root, m.Source) { - return false, nil - } - - if m.BindOptions == nil { - return true, nil - } - - switch m.BindOptions.Propagation { - case mount.PropagationRSlave, mount.PropagationRShared, "": - return m.BindOptions.Propagation == "", nil - default: - } - - return false, errdefs.InvalidParameter(errors.Errorf(`invalid mount config: must use either propagation mode "rslave" or "rshared" when mount source is within the daemon root, daemon root: %q, bind mount source: %q, propagation: %q`, daemon.root, m.Source, m.BindOptions.Propagation)) -} diff --git a/vendor/github.com/docker/docker/daemon/volumes_unix.go b/vendor/github.com/docker/docker/daemon/volumes_unix.go deleted file mode 100644 index efffefa76..000000000 --- a/vendor/github.com/docker/docker/daemon/volumes_unix.go +++ /dev/null @@ -1,156 +0,0 @@ -// +build !windows - -package daemon // import "github.com/docker/docker/daemon" - -import ( - "fmt" - "os" - "sort" - "strconv" - "strings" - - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/mount" - volumemounts "github.com/docker/docker/volume/mounts" -) - -// setupMounts iterates through each of the mount points for a container and -// calls Setup() on each. It also looks to see if is a network mount such as -// /etc/resolv.conf, and if it is not, appends it to the array of mounts. -func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { - var mounts []container.Mount - // TODO: tmpfs mounts should be part of Mountpoints - tmpfsMounts := make(map[string]bool) - tmpfsMountInfo, err := c.TmpfsMounts() - if err != nil { - return nil, err - } - for _, m := range tmpfsMountInfo { - tmpfsMounts[m.Destination] = true - } - for _, m := range c.MountPoints { - if tmpfsMounts[m.Destination] { - continue - } - if err := daemon.lazyInitializeVolume(c.ID, m); err != nil { - return nil, err - } - // If the daemon is being shutdown, we should not let a container start if it is trying to - // mount the socket the daemon is listening on. During daemon shutdown, the socket - // (/var/run/docker.sock by default) doesn't exist anymore causing the call to m.Setup to - // create at directory instead. This in turn will prevent the daemon to restart. - checkfunc := func(m *volumemounts.MountPoint) error { - if _, exist := daemon.hosts[m.Source]; exist && daemon.IsShuttingDown() { - return fmt.Errorf("Could not mount %q to container while the daemon is shutting down", m.Source) - } - return nil - } - - path, err := m.Setup(c.MountLabel, daemon.idMappings.RootPair(), checkfunc) - if err != nil { - return nil, err - } - if !c.TrySetNetworkMount(m.Destination, path) { - mnt := container.Mount{ - Source: path, - Destination: m.Destination, - Writable: m.RW, - Propagation: string(m.Propagation), - } - if m.Volume != nil { - attributes := map[string]string{ - "driver": m.Volume.DriverName(), - "container": c.ID, - "destination": m.Destination, - "read/write": strconv.FormatBool(m.RW), - "propagation": string(m.Propagation), - } - daemon.LogVolumeEvent(m.Volume.Name(), "mount", attributes) - } - mounts = append(mounts, mnt) - } - } - - mounts = sortMounts(mounts) - netMounts := c.NetworkMounts() - // if we are going to mount any of the network files from container - // metadata, the ownership must be set properly for potential container - // remapped root (user namespaces) - rootIDs := daemon.idMappings.RootPair() - for _, mount := range netMounts { - // we should only modify ownership of network files within our own container - // metadata repository. If the user specifies a mount path external, it is - // up to the user to make sure the file has proper ownership for userns - if strings.Index(mount.Source, daemon.repository) == 0 { - if err := os.Chown(mount.Source, rootIDs.UID, rootIDs.GID); err != nil { - return nil, err - } - } - } - return append(mounts, netMounts...), nil -} - -// sortMounts sorts an array of mounts in lexicographic order. This ensure that -// when mounting, the mounts don't shadow other mounts. For example, if mounting -// /etc and /etc/resolv.conf, /etc/resolv.conf must not be mounted first. -func sortMounts(m []container.Mount) []container.Mount { - sort.Sort(mounts(m)) - return m -} - -// setBindModeIfNull is platform specific processing to ensure the -// shared mode is set to 'z' if it is null. This is called in the case -// of processing a named volume and not a typical bind. -func setBindModeIfNull(bind *volumemounts.MountPoint) { - if bind.Mode == "" { - bind.Mode = "z" - } -} - -func (daemon *Daemon) mountVolumes(container *container.Container) error { - mounts, err := daemon.setupMounts(container) - if err != nil { - return err - } - - for _, m := range mounts { - dest, err := container.GetResourcePath(m.Destination) - if err != nil { - return err - } - - var stat os.FileInfo - stat, err = os.Stat(m.Source) - if err != nil { - return err - } - if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { - return err - } - - opts := "rbind,ro" - if m.Writable { - opts = "rbind,rw" - } - - if err := mount.Mount(m.Source, dest, bindMountType, opts); err != nil { - return err - } - - // mountVolumes() seems to be called for temporary mounts - // outside the container. Soon these will be unmounted with - // lazy unmount option and given we have mounted the rbind, - // all the submounts will propagate if these are shared. If - // daemon is running in host namespace and has / as shared - // then these unmounts will propagate and unmount original - // mount as well. So make all these mounts rprivate. - // Do not use propagation property of volume as that should - // apply only when mounting happen inside the container. - if err := mount.MakeRPrivate(dest); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/docker/docker/daemon/volumes_windows.go b/vendor/github.com/docker/docker/daemon/volumes_windows.go deleted file mode 100644 index a2fb5152d..000000000 --- a/vendor/github.com/docker/docker/daemon/volumes_windows.go +++ /dev/null @@ -1,51 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "sort" - - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/idtools" - volumemounts "github.com/docker/docker/volume/mounts" -) - -// setupMounts configures the mount points for a container by appending each -// of the configured mounts on the container to the OCI mount structure -// which will ultimately be passed into the oci runtime during container creation. -// It also ensures each of the mounts are lexicographically sorted. - -// BUGBUG TODO Windows containerd. This would be much better if it returned -// an array of runtime spec mounts, not container mounts. Then no need to -// do multiple transitions. - -func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { - var mnts []container.Mount - for _, mount := range c.MountPoints { // type is volumemounts.MountPoint - if err := daemon.lazyInitializeVolume(c.ID, mount); err != nil { - return nil, err - } - s, err := mount.Setup(c.MountLabel, idtools.IDPair{UID: 0, GID: 0}, nil) - if err != nil { - return nil, err - } - - mnts = append(mnts, container.Mount{ - Source: s, - Destination: mount.Destination, - Writable: mount.RW, - }) - } - - sort.Sort(mounts(mnts)) - return mnts, nil -} - -// setBindModeIfNull is platform specific processing which is a no-op on -// Windows. -func setBindModeIfNull(bind *volumemounts.MountPoint) { - return -} - -func (daemon *Daemon) validateBindDaemonRoot(m mount.Mount) (bool, error) { - return false, nil -} diff --git a/vendor/github.com/docker/docker/daemon/wait.go b/vendor/github.com/docker/docker/daemon/wait.go deleted file mode 100644 index 545f24c7b..000000000 --- a/vendor/github.com/docker/docker/daemon/wait.go +++ /dev/null @@ -1,23 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -import ( - "context" - - "github.com/docker/docker/container" -) - -// ContainerWait waits until the given container is in a certain state -// indicated by the given condition. If the container is not found, a nil -// channel and non-nil error is returned immediately. If the container is -// found, a status result will be sent on the returned channel once the wait -// condition is met or if an error occurs waiting for the container (such as a -// context timeout or cancellation). On a successful wait, the exit code of the -// container is returned in the status with a non-nil Err() value. -func (daemon *Daemon) ContainerWait(ctx context.Context, name string, condition container.WaitCondition) (<-chan container.StateStatus, error) { - cntr, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - return cntr.Wait(ctx, condition), nil -} diff --git a/vendor/github.com/docker/docker/daemon/workdir.go b/vendor/github.com/docker/docker/daemon/workdir.go deleted file mode 100644 index 90bba79b5..000000000 --- a/vendor/github.com/docker/docker/daemon/workdir.go +++ /dev/null @@ -1,20 +0,0 @@ -package daemon // import "github.com/docker/docker/daemon" - -// ContainerCreateWorkdir creates the working directory. This solves the -// issue arising from https://github.com/docker/docker/issues/27545, -// which was initially fixed by https://github.com/docker/docker/pull/27884. But that fix -// was too expensive in terms of performance on Windows. Instead, -// https://github.com/docker/docker/pull/28514 introduces this new functionality -// where the builder calls into the backend here to create the working directory. -func (daemon *Daemon) ContainerCreateWorkdir(cID string) error { - container, err := daemon.GetContainer(cID) - if err != nil { - return err - } - err = daemon.Mount(container) - if err != nil { - return err - } - defer daemon.Unmount(container) - return container.SetupWorkingDirectory(daemon.idMappings.RootPair()) -} diff --git a/vendor/github.com/docker/docker/distribution/config.go b/vendor/github.com/docker/docker/distribution/config.go deleted file mode 100644 index 55f1f8c2d..000000000 --- a/vendor/github.com/docker/docker/distribution/config.go +++ /dev/null @@ -1,267 +0,0 @@ -package distribution // import "github.com/docker/docker/distribution" - -import ( - "context" - "encoding/json" - "fmt" - "io" - "runtime" - - "github.com/docker/distribution" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/docker/api/types" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/system" - refstore "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// Config stores configuration for communicating -// with a registry. -type Config struct { - // MetaHeaders stores HTTP headers with metadata about the image - MetaHeaders map[string][]string - // AuthConfig holds authentication credentials for authenticating with - // the registry. - AuthConfig *types.AuthConfig - // ProgressOutput is the interface for showing the status of the pull - // operation. - ProgressOutput progress.Output - // RegistryService is the registry service to use for TLS configuration - // and endpoint lookup. - RegistryService registry.Service - // ImageEventLogger notifies events for a given image - ImageEventLogger func(id, name, action string) - // MetadataStore is the storage backend for distribution-specific - // metadata. - MetadataStore metadata.Store - // ImageStore manages images. - ImageStore ImageConfigStore - // ReferenceStore manages tags. This value is optional, when excluded - // content will not be tagged. - ReferenceStore refstore.Store - // RequireSchema2 ensures that only schema2 manifests are used. - RequireSchema2 bool -} - -// ImagePullConfig stores pull configuration. -type ImagePullConfig struct { - Config - - // DownloadManager manages concurrent pulls. - DownloadManager RootFSDownloadManager - // Schema2Types is the valid schema2 configuration types allowed - // by the pull operation. - Schema2Types []string - // OS is the requested operating system of the image being pulled to ensure it can be validated - // when the host OS supports multiple image operating systems. - OS string -} - -// ImagePushConfig stores push configuration. -type ImagePushConfig struct { - Config - - // ConfigMediaType is the configuration media type for - // schema2 manifests. - ConfigMediaType string - // LayerStores (indexed by operating system) manages layers. - LayerStores map[string]PushLayerProvider - // TrustKey is the private key for legacy signatures. This is typically - // an ephemeral key, since these signatures are no longer verified. - TrustKey libtrust.PrivateKey - // UploadManager dispatches uploads. - UploadManager *xfer.LayerUploadManager -} - -// ImageConfigStore handles storing and getting image configurations -// by digest. Allows getting an image configurations rootfs from the -// configuration. -type ImageConfigStore interface { - Put([]byte) (digest.Digest, error) - Get(digest.Digest) ([]byte, error) - RootFSFromConfig([]byte) (*image.RootFS, error) - PlatformFromConfig([]byte) (*specs.Platform, error) -} - -// PushLayerProvider provides layers to be pushed by ChainID. -type PushLayerProvider interface { - Get(layer.ChainID) (PushLayer, error) -} - -// PushLayer is a pushable layer with metadata about the layer -// and access to the content of the layer. -type PushLayer interface { - ChainID() layer.ChainID - DiffID() layer.DiffID - Parent() PushLayer - Open() (io.ReadCloser, error) - Size() (int64, error) - MediaType() string - Release() -} - -// RootFSDownloadManager handles downloading of the rootfs -type RootFSDownloadManager interface { - // Download downloads the layers into the given initial rootfs and - // returns the final rootfs. - // Given progress output to track download progress - // Returns function to release download resources - Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) -} - -type imageConfigStore struct { - image.Store -} - -// NewImageConfigStoreFromStore returns an ImageConfigStore backed -// by an image.Store for container images. -func NewImageConfigStoreFromStore(is image.Store) ImageConfigStore { - return &imageConfigStore{ - Store: is, - } -} - -func (s *imageConfigStore) Put(c []byte) (digest.Digest, error) { - id, err := s.Store.Create(c) - return digest.Digest(id), err -} - -func (s *imageConfigStore) Get(d digest.Digest) ([]byte, error) { - img, err := s.Store.Get(image.IDFromDigest(d)) - if err != nil { - return nil, err - } - return img.RawJSON(), nil -} - -func (s *imageConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { - var unmarshalledConfig image.Image - if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { - return nil, err - } - return unmarshalledConfig.RootFS, nil -} - -func (s *imageConfigStore) PlatformFromConfig(c []byte) (*specs.Platform, error) { - var unmarshalledConfig image.Image - if err := json.Unmarshal(c, &unmarshalledConfig); err != nil { - return nil, err - } - - // fail immediately on Windows when downloading a non-Windows image - // and vice versa. Exception on Windows if Linux Containers are enabled. - if runtime.GOOS == "windows" && unmarshalledConfig.OS == "linux" && !system.LCOWSupported() { - return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) - } else if runtime.GOOS != "windows" && unmarshalledConfig.OS == "windows" { - return nil, fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS) - } - - os := unmarshalledConfig.OS - if os == "" { - os = runtime.GOOS - } - if !system.IsOSSupported(os) { - return nil, system.ErrNotSupportedOperatingSystem - } - return &specs.Platform{OS: os, OSVersion: unmarshalledConfig.OSVersion}, nil -} - -type storeLayerProvider struct { - ls layer.Store -} - -// NewLayerProvidersFromStores returns layer providers backed by -// an instance of LayerStore. Only getting layers as gzipped -// tars is supported. -func NewLayerProvidersFromStores(lss map[string]layer.Store) map[string]PushLayerProvider { - plps := make(map[string]PushLayerProvider) - for os, ls := range lss { - plps[os] = &storeLayerProvider{ls: ls} - } - return plps -} - -func (p *storeLayerProvider) Get(lid layer.ChainID) (PushLayer, error) { - if lid == "" { - return &storeLayer{ - Layer: layer.EmptyLayer, - }, nil - } - l, err := p.ls.Get(lid) - if err != nil { - return nil, err - } - - sl := storeLayer{ - Layer: l, - ls: p.ls, - } - if d, ok := l.(distribution.Describable); ok { - return &describableStoreLayer{ - storeLayer: sl, - describable: d, - }, nil - } - - return &sl, nil -} - -type storeLayer struct { - layer.Layer - ls layer.Store -} - -func (l *storeLayer) Parent() PushLayer { - p := l.Layer.Parent() - if p == nil { - return nil - } - sl := storeLayer{ - Layer: p, - ls: l.ls, - } - if d, ok := p.(distribution.Describable); ok { - return &describableStoreLayer{ - storeLayer: sl, - describable: d, - } - } - - return &sl -} - -func (l *storeLayer) Open() (io.ReadCloser, error) { - return l.Layer.TarStream() -} - -func (l *storeLayer) Size() (int64, error) { - return l.Layer.DiffSize() -} - -func (l *storeLayer) MediaType() string { - // layer store always returns uncompressed tars - return schema2.MediaTypeUncompressedLayer -} - -func (l *storeLayer) Release() { - if l.ls != nil { - layer.ReleaseAndLog(l.ls, l.Layer) - } -} - -type describableStoreLayer struct { - storeLayer - describable distribution.Describable -} - -func (l *describableStoreLayer) Descriptor() distribution.Descriptor { - return l.describable.Descriptor() -} diff --git a/vendor/github.com/docker/docker/distribution/errors.go b/vendor/github.com/docker/docker/distribution/errors.go deleted file mode 100644 index e2913d45d..000000000 --- a/vendor/github.com/docker/docker/distribution/errors.go +++ /dev/null @@ -1,206 +0,0 @@ -package distribution // import "github.com/docker/docker/distribution" - -import ( - "fmt" - "net/url" - "strings" - "syscall" - - "github.com/docker/distribution" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/errdefs" - "github.com/sirupsen/logrus" -) - -// ErrNoSupport is an error type used for errors indicating that an operation -// is not supported. It encapsulates a more specific error. -type ErrNoSupport struct{ Err error } - -func (e ErrNoSupport) Error() string { - if e.Err == nil { - return "not supported" - } - return e.Err.Error() -} - -// fallbackError wraps an error that can possibly allow fallback to a different -// endpoint. -type fallbackError struct { - // err is the error being wrapped. - err error - // confirmedV2 is set to true if it was confirmed that the registry - // supports the v2 protocol. This is used to limit fallbacks to the v1 - // protocol. - confirmedV2 bool - // transportOK is set to true if we managed to speak HTTP with the - // registry. This confirms that we're using appropriate TLS settings - // (or lack of TLS). - transportOK bool -} - -// Error renders the FallbackError as a string. -func (f fallbackError) Error() string { - return f.Cause().Error() -} - -func (f fallbackError) Cause() error { - return f.err -} - -// shouldV2Fallback returns true if this error is a reason to fall back to v1. -func shouldV2Fallback(err errcode.Error) bool { - switch err.Code { - case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: - return true - } - return false -} - -type notFoundError struct { - cause errcode.Error - ref reference.Named -} - -func (e notFoundError) Error() string { - switch e.cause.Code { - case errcode.ErrorCodeDenied: - // ErrorCodeDenied is used when access to the repository was denied - return fmt.Sprintf("pull access denied for %s, repository does not exist or may require 'docker login'", reference.FamiliarName(e.ref)) - case v2.ErrorCodeManifestUnknown: - return fmt.Sprintf("manifest for %s not found", reference.FamiliarString(e.ref)) - case v2.ErrorCodeNameUnknown: - return fmt.Sprintf("repository %s not found", reference.FamiliarName(e.ref)) - } - // Shouldn't get here, but this is better than returning an empty string - return e.cause.Message -} - -func (e notFoundError) NotFound() {} - -func (e notFoundError) Cause() error { - return e.cause -} - -// TranslatePullError is used to convert an error from a registry pull -// operation to an error representing the entire pull operation. Any error -// information which is not used by the returned error gets output to -// log at info level. -func TranslatePullError(err error, ref reference.Named) error { - switch v := err.(type) { - case errcode.Errors: - if len(v) != 0 { - for _, extra := range v[1:] { - logrus.Infof("Ignoring extra error returned from registry: %v", extra) - } - return TranslatePullError(v[0], ref) - } - case errcode.Error: - switch v.Code { - case errcode.ErrorCodeDenied, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: - return notFoundError{v, ref} - } - case xfer.DoNotRetry: - return TranslatePullError(v.Err, ref) - } - - return errdefs.Unknown(err) -} - -// continueOnError returns true if we should fallback to the next endpoint -// as a result of this error. -func continueOnError(err error, mirrorEndpoint bool) bool { - switch v := err.(type) { - case errcode.Errors: - if len(v) == 0 { - return true - } - return continueOnError(v[0], mirrorEndpoint) - case ErrNoSupport: - return continueOnError(v.Err, mirrorEndpoint) - case errcode.Error: - return mirrorEndpoint || shouldV2Fallback(v) - case *client.UnexpectedHTTPResponseError: - return true - case ImageConfigPullError: - // ImageConfigPullError only happens with v2 images, v1 fallback is - // unnecessary. - // Failures from a mirror endpoint should result in fallback to the - // canonical repo. - return mirrorEndpoint - case error: - return !strings.Contains(err.Error(), strings.ToLower(syscall.ESRCH.Error())) - } - // let's be nice and fallback if the error is a completely - // unexpected one. - // If new errors have to be handled in some way, please - // add them to the switch above. - return true -} - -// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the -// operation after this error. -func retryOnError(err error) error { - switch v := err.(type) { - case errcode.Errors: - if len(v) != 0 { - return retryOnError(v[0]) - } - case errcode.Error: - switch v.Code { - case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied, errcode.ErrorCodeTooManyRequests, v2.ErrorCodeNameUnknown: - return xfer.DoNotRetry{Err: err} - } - case *url.Error: - switch v.Err { - case auth.ErrNoBasicAuthCredentials, auth.ErrNoToken: - return xfer.DoNotRetry{Err: v.Err} - } - return retryOnError(v.Err) - case *client.UnexpectedHTTPResponseError: - return xfer.DoNotRetry{Err: err} - case error: - if err == distribution.ErrBlobUnknown { - return xfer.DoNotRetry{Err: err} - } - if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { - return xfer.DoNotRetry{Err: err} - } - } - // let's be nice and fallback if the error is a completely - // unexpected one. - // If new errors have to be handled in some way, please - // add them to the switch above. - return err -} - -type invalidManifestClassError struct { - mediaType string - class string -} - -func (e invalidManifestClassError) Error() string { - return fmt.Sprintf("Encountered remote %q(%s) when fetching", e.mediaType, e.class) -} - -func (e invalidManifestClassError) InvalidParameter() {} - -type invalidManifestFormatError struct{} - -func (invalidManifestFormatError) Error() string { - return "unsupported manifest format" -} - -func (invalidManifestFormatError) InvalidParameter() {} - -type reservedNameError string - -func (e reservedNameError) Error() string { - return "'" + string(e) + "' is a reserved name" -} - -func (e reservedNameError) Forbidden() {} diff --git a/vendor/github.com/docker/docker/distribution/metadata/metadata.go b/vendor/github.com/docker/docker/distribution/metadata/metadata.go deleted file mode 100644 index 4ae8223bd..000000000 --- a/vendor/github.com/docker/docker/distribution/metadata/metadata.go +++ /dev/null @@ -1,75 +0,0 @@ -package metadata // import "github.com/docker/docker/distribution/metadata" - -import ( - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/docker/docker/pkg/ioutils" -) - -// Store implements a K/V store for mapping distribution-related IDs -// to on-disk layer IDs and image IDs. The namespace identifies the type of -// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. -type Store interface { - // Get retrieves data by namespace and key. - Get(namespace string, key string) ([]byte, error) - // Set writes data indexed by namespace and key. - Set(namespace, key string, value []byte) error - // Delete removes data indexed by namespace and key. - Delete(namespace, key string) error -} - -// FSMetadataStore uses the filesystem to associate metadata with layer and -// image IDs. -type FSMetadataStore struct { - sync.RWMutex - basePath string -} - -// NewFSMetadataStore creates a new filesystem-based metadata store. -func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { - if err := os.MkdirAll(basePath, 0700); err != nil { - return nil, err - } - return &FSMetadataStore{ - basePath: basePath, - }, nil -} - -func (store *FSMetadataStore) path(namespace, key string) string { - return filepath.Join(store.basePath, namespace, key) -} - -// Get retrieves data by namespace and key. The data is read from a file named -// after the key, stored in the namespace's directory. -func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { - store.RLock() - defer store.RUnlock() - - return ioutil.ReadFile(store.path(namespace, key)) -} - -// Set writes data indexed by namespace and key. The data is written to a file -// named after the key, stored in the namespace's directory. -func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { - store.Lock() - defer store.Unlock() - - path := store.path(namespace, key) - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - return ioutils.AtomicWriteFile(path, value, 0644) -} - -// Delete removes data indexed by namespace and key. The data file named after -// the key, stored in the namespace's directory is deleted. -func (store *FSMetadataStore) Delete(namespace, key string) error { - store.Lock() - defer store.Unlock() - - path := store.path(namespace, key) - return os.Remove(path) -} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go b/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go deleted file mode 100644 index 5575c59b0..000000000 --- a/vendor/github.com/docker/docker/distribution/metadata/v1_id_service.go +++ /dev/null @@ -1,51 +0,0 @@ -package metadata // import "github.com/docker/docker/distribution/metadata" - -import ( - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/pkg/errors" -) - -// V1IDService maps v1 IDs to layers on disk. -type V1IDService struct { - store Store -} - -// NewV1IDService creates a new V1 ID mapping service. -func NewV1IDService(store Store) *V1IDService { - return &V1IDService{ - store: store, - } -} - -// namespace returns the namespace used by this service. -func (idserv *V1IDService) namespace() string { - return "v1id" -} - -// Get finds a layer by its V1 ID. -func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { - if idserv.store == nil { - return "", errors.New("no v1IDService storage") - } - if err := v1.ValidateID(v1ID); err != nil { - return layer.DiffID(""), err - } - - idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) - if err != nil { - return layer.DiffID(""), err - } - return layer.DiffID(idBytes), nil -} - -// Set associates an image with a V1 ID. -func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { - if idserv.store == nil { - return nil - } - if err := v1.ValidateID(v1ID); err != nil { - return err - } - return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) -} diff --git a/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go b/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go deleted file mode 100644 index fe3349855..000000000 --- a/vendor/github.com/docker/docker/distribution/metadata/v2_metadata_service.go +++ /dev/null @@ -1,241 +0,0 @@ -package metadata // import "github.com/docker/docker/distribution/metadata" - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "errors" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/layer" - "github.com/opencontainers/go-digest" -) - -// V2MetadataService maps layer IDs to a set of known metadata for -// the layer. -type V2MetadataService interface { - GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) - GetDiffID(dgst digest.Digest) (layer.DiffID, error) - Add(diffID layer.DiffID, metadata V2Metadata) error - TagAndAdd(diffID layer.DiffID, hmacKey []byte, metadata V2Metadata) error - Remove(metadata V2Metadata) error -} - -// v2MetadataService implements V2MetadataService -type v2MetadataService struct { - store Store -} - -var _ V2MetadataService = &v2MetadataService{} - -// V2Metadata contains the digest and source repository information for a layer. -type V2Metadata struct { - Digest digest.Digest - SourceRepository string - // HMAC hashes above attributes with recent authconfig digest used as a key in order to determine matching - // metadata entries accompanied by the same credentials without actually exposing them. - HMAC string -} - -// CheckV2MetadataHMAC returns true if the given "meta" is tagged with a hmac hashed by the given "key". -func CheckV2MetadataHMAC(meta *V2Metadata, key []byte) bool { - if len(meta.HMAC) == 0 || len(key) == 0 { - return len(meta.HMAC) == 0 && len(key) == 0 - } - mac := hmac.New(sha256.New, key) - mac.Write([]byte(meta.Digest)) - mac.Write([]byte(meta.SourceRepository)) - expectedMac := mac.Sum(nil) - - storedMac, err := hex.DecodeString(meta.HMAC) - if err != nil { - return false - } - - return hmac.Equal(storedMac, expectedMac) -} - -// ComputeV2MetadataHMAC returns a hmac for the given "meta" hash by the given key. -func ComputeV2MetadataHMAC(key []byte, meta *V2Metadata) string { - if len(key) == 0 || meta == nil { - return "" - } - mac := hmac.New(sha256.New, key) - mac.Write([]byte(meta.Digest)) - mac.Write([]byte(meta.SourceRepository)) - return hex.EncodeToString(mac.Sum(nil)) -} - -// ComputeV2MetadataHMACKey returns a key for the given "authConfig" that can be used to hash v2 metadata -// entries. -func ComputeV2MetadataHMACKey(authConfig *types.AuthConfig) ([]byte, error) { - if authConfig == nil { - return nil, nil - } - key := authConfigKeyInput{ - Username: authConfig.Username, - Password: authConfig.Password, - Auth: authConfig.Auth, - IdentityToken: authConfig.IdentityToken, - RegistryToken: authConfig.RegistryToken, - } - buf, err := json.Marshal(&key) - if err != nil { - return nil, err - } - return []byte(digest.FromBytes(buf)), nil -} - -// authConfigKeyInput is a reduced AuthConfig structure holding just relevant credential data eligible for -// hmac key creation. -type authConfigKeyInput struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - IdentityToken string `json:"identitytoken,omitempty"` - RegistryToken string `json:"registrytoken,omitempty"` -} - -// maxMetadata is the number of metadata entries to keep per layer DiffID. -const maxMetadata = 50 - -// NewV2MetadataService creates a new diff ID to v2 metadata mapping service. -func NewV2MetadataService(store Store) V2MetadataService { - return &v2MetadataService{ - store: store, - } -} - -func (serv *v2MetadataService) diffIDNamespace() string { - return "v2metadata-by-diffid" -} - -func (serv *v2MetadataService) digestNamespace() string { - return "diffid-by-digest" -} - -func (serv *v2MetadataService) diffIDKey(diffID layer.DiffID) string { - return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() -} - -func (serv *v2MetadataService) digestKey(dgst digest.Digest) string { - return string(dgst.Algorithm()) + "/" + dgst.Hex() -} - -// GetMetadata finds the metadata associated with a layer DiffID. -func (serv *v2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { - if serv.store == nil { - return nil, errors.New("no metadata storage") - } - jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) - if err != nil { - return nil, err - } - - var metadata []V2Metadata - if err := json.Unmarshal(jsonBytes, &metadata); err != nil { - return nil, err - } - - return metadata, nil -} - -// GetDiffID finds a layer DiffID from a digest. -func (serv *v2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { - if serv.store == nil { - return layer.DiffID(""), errors.New("no metadata storage") - } - diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) - if err != nil { - return layer.DiffID(""), err - } - - return layer.DiffID(diffIDBytes), nil -} - -// Add associates metadata with a layer DiffID. If too many metadata entries are -// present, the oldest one is dropped. -func (serv *v2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { - if serv.store == nil { - // Support a service which has no backend storage, in this case - // an add becomes a no-op. - // TODO: implement in memory storage - return nil - } - oldMetadata, err := serv.GetMetadata(diffID) - if err != nil { - oldMetadata = nil - } - newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) - - // Copy all other metadata to new slice - for _, oldMeta := range oldMetadata { - if oldMeta != metadata { - newMetadata = append(newMetadata, oldMeta) - } - } - - newMetadata = append(newMetadata, metadata) - - if len(newMetadata) > maxMetadata { - newMetadata = newMetadata[len(newMetadata)-maxMetadata:] - } - - jsonBytes, err := json.Marshal(newMetadata) - if err != nil { - return err - } - - err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) - if err != nil { - return err - } - - return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) -} - -// TagAndAdd amends the given "meta" for hmac hashed by the given "hmacKey" and associates it with a layer -// DiffID. If too many metadata entries are present, the oldest one is dropped. -func (serv *v2MetadataService) TagAndAdd(diffID layer.DiffID, hmacKey []byte, meta V2Metadata) error { - meta.HMAC = ComputeV2MetadataHMAC(hmacKey, &meta) - return serv.Add(diffID, meta) -} - -// Remove disassociates a metadata entry from a layer DiffID. -func (serv *v2MetadataService) Remove(metadata V2Metadata) error { - if serv.store == nil { - // Support a service which has no backend storage, in this case - // an remove becomes a no-op. - // TODO: implement in memory storage - return nil - } - diffID, err := serv.GetDiffID(metadata.Digest) - if err != nil { - return err - } - oldMetadata, err := serv.GetMetadata(diffID) - if err != nil { - oldMetadata = nil - } - newMetadata := make([]V2Metadata, 0, len(oldMetadata)) - - // Copy all other metadata to new slice - for _, oldMeta := range oldMetadata { - if oldMeta != metadata { - newMetadata = append(newMetadata, oldMeta) - } - } - - if len(newMetadata) == 0 { - return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) - } - - jsonBytes, err := json.Marshal(newMetadata) - if err != nil { - return err - } - - return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) -} diff --git a/vendor/github.com/docker/docker/distribution/pull.go b/vendor/github.com/docker/docker/distribution/pull.go deleted file mode 100644 index 0240eb05f..000000000 --- a/vendor/github.com/docker/docker/distribution/pull.go +++ /dev/null @@ -1,206 +0,0 @@ -package distribution // import "github.com/docker/docker/distribution" - -import ( - "context" - "fmt" - "runtime" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/pkg/progress" - refstore "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// Puller is an interface that abstracts pulling for different API versions. -type Puller interface { - // Pull tries to pull the image referenced by `tag` - // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. - // - Pull(ctx context.Context, ref reference.Named, os string) error -} - -// newPuller returns a Puller interface that will pull from either a v1 or v2 -// registry. The endpoint argument contains a Version field that determines -// whether a v1 or v2 puller will be created. The other parameters are passed -// through to the underlying puller implementation for use during the actual -// pull operation. -func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { - switch endpoint.Version { - case registry.APIVersion2: - return &v2Puller{ - V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), - endpoint: endpoint, - config: imagePullConfig, - repoInfo: repoInfo, - }, nil - case registry.APIVersion1: - return &v1Puller{ - v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), - endpoint: endpoint, - config: imagePullConfig, - repoInfo: repoInfo, - }, nil - } - return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) -} - -// Pull initiates a pull operation. image is the repository name to pull, and -// tag may be either empty, or indicate a specific tag to pull. -func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) - if err != nil { - return err - } - - // makes sure name is not `scratch` - if err := ValidateRepoName(repoInfo.Name); err != nil { - return err - } - - endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(reference.Domain(repoInfo.Name)) - if err != nil { - return err - } - - var ( - lastErr error - - // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport - // By default it is false, which means that if an ErrNoSupport error is encountered, it will be saved in lastErr. - // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of - // any subsequent ErrNoSupport errors in lastErr. - // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be - // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant - // error is the ones from v2 endpoints not v1. - discardNoSupportErrors bool - - // confirmedV2 is set to true if a pull attempt managed to - // confirm that it was talking to a v2 registry. This will - // prevent fallback to the v1 protocol. - confirmedV2 bool - - // confirmedTLSRegistries is a map indicating which registries - // are known to be using TLS. There should never be a plaintext - // retry for any of these. - confirmedTLSRegistries = make(map[string]struct{}) - ) - for _, endpoint := range endpoints { - if imagePullConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { - continue - } - - if confirmedV2 && endpoint.Version == registry.APIVersion1 { - logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) - continue - } - - if endpoint.URL.Scheme != "https" { - if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { - logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) - continue - } - } - - logrus.Debugf("Trying to pull %s from %s %s", reference.FamiliarName(repoInfo.Name), endpoint.URL, endpoint.Version) - - puller, err := newPuller(endpoint, repoInfo, imagePullConfig) - if err != nil { - lastErr = err - continue - } - - // Make sure we default the OS if it hasn't been supplied - if imagePullConfig.OS == "" { - imagePullConfig.OS = runtime.GOOS - } - - if err := puller.Pull(ctx, ref, imagePullConfig.OS); err != nil { - // Was this pull cancelled? If so, don't try to fall - // back. - fallback := false - select { - case <-ctx.Done(): - default: - if fallbackErr, ok := err.(fallbackError); ok { - fallback = true - confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 - if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { - confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} - } - err = fallbackErr.err - } - } - if fallback { - if _, ok := err.(ErrNoSupport); !ok { - // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. - discardNoSupportErrors = true - // append subsequent errors - lastErr = err - } else if !discardNoSupportErrors { - // Save the ErrNoSupport error, because it's either the first error or all encountered errors - // were also ErrNoSupport errors. - // append subsequent errors - lastErr = err - } - logrus.Infof("Attempting next endpoint for pull after error: %v", err) - continue - } - logrus.Errorf("Not continuing with pull after error: %v", err) - return TranslatePullError(err, ref) - } - - imagePullConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "pull") - return nil - } - - if lastErr == nil { - lastErr = fmt.Errorf("no endpoints found for %s", reference.FamiliarString(ref)) - } - - return TranslatePullError(lastErr, ref) -} - -// writeStatus writes a status message to out. If layersDownloaded is true, the -// status message indicates that a newer image was downloaded. Otherwise, it -// indicates that the image is up to date. requestedTag is the tag the message -// will refer to. -func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { - if layersDownloaded { - progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) - } else { - progress.Message(out, "", "Status: Image is up to date for "+requestedTag) - } -} - -// ValidateRepoName validates the name of a repository. -func ValidateRepoName(name reference.Named) error { - if reference.FamiliarName(name) == api.NoBaseImageSpecifier { - return errors.WithStack(reservedNameError(api.NoBaseImageSpecifier)) - } - return nil -} - -func addDigestReference(store refstore.Store, ref reference.Named, dgst digest.Digest, id digest.Digest) error { - dgstRef, err := reference.WithDigest(reference.TrimNamed(ref), dgst) - if err != nil { - return err - } - - if oldTagID, err := store.Get(dgstRef); err == nil { - if oldTagID != id { - // Updating digests not supported by reference store - logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagID, id) - } - return nil - } else if err != refstore.ErrDoesNotExist { - return err - } - - return store.AddDigest(dgstRef, id, true) -} diff --git a/vendor/github.com/docker/docker/distribution/pull_v1.go b/vendor/github.com/docker/docker/distribution/pull_v1.go deleted file mode 100644 index c26d88122..000000000 --- a/vendor/github.com/docker/docker/distribution/pull_v1.go +++ /dev/null @@ -1,367 +0,0 @@ -package distribution // import "github.com/docker/docker/distribution" - -import ( - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/url" - "os" - "strings" - "time" - - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/registry" - "github.com/sirupsen/logrus" -) - -type v1Puller struct { - v1IDService *metadata.V1IDService - endpoint registry.APIEndpoint - config *ImagePullConfig - repoInfo *registry.RepositoryInfo - session *registry.Session -} - -func (p *v1Puller) Pull(ctx context.Context, ref reference.Named, os string) error { - if _, isCanonical := ref.(reference.Canonical); isCanonical { - // Allowing fallback, because HTTPS v1 is before HTTP v2 - return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} - } - - tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) - if err != nil { - return err - } - // Adds Docker-specific headers as well as user-specified headers (metaHeaders) - tr := transport.NewTransport( - // TODO(tiborvass): was ReceiveTimeout - registry.NewTransport(tlsConfig), - registry.Headers(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., - ) - client := registry.HTTPClient(tr) - v1Endpoint := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) - p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) - if err != nil { - // TODO(dmcgowan): Check if should fallback - logrus.Debugf("Fallback from error: %s", err) - return fallbackError{err: err} - } - if err := p.pullRepository(ctx, ref); err != nil { - // TODO(dmcgowan): Check if should fallback - return err - } - progress.Message(p.config.ProgressOutput, "", p.repoInfo.Name.Name()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") - - return nil -} - -// Note use auth.Scope rather than reference.Named due to this warning causing Jenkins CI to fail: -// warning: ref can be github.com/docker/docker/vendor/github.com/docker/distribution/registry/client/auth.Scope (interfacer) -func (p *v1Puller) pullRepository(ctx context.Context, ref auth.Scope) error { - progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.Name.Name()) - - tagged, isTagged := ref.(reference.NamedTagged) - - repoData, err := p.session.GetRepositoryData(p.repoInfo.Name) - if err != nil { - if strings.Contains(err.Error(), "HTTP code: 404") { - if isTagged { - return fmt.Errorf("Error: image %s:%s not found", reference.Path(p.repoInfo.Name), tagged.Tag()) - } - return fmt.Errorf("Error: image %s not found", reference.Path(p.repoInfo.Name)) - } - // Unexpected HTTP error - return err - } - - logrus.Debug("Retrieving the tag list") - var tagsList map[string]string - if !isTagged { - tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo.Name) - } else { - var tagID string - tagsList = make(map[string]string) - tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo.Name, tagged.Tag()) - if err == registry.ErrRepoNotFound { - return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.Name.Name()) - } - tagsList[tagged.Tag()] = tagID - } - if err != nil { - logrus.Errorf("unable to get remote tags: %s", err) - return err - } - - for tag, id := range tagsList { - repoData.ImgList[id] = ®istry.ImgData{ - ID: id, - Tag: tag, - Checksum: "", - } - } - - layersDownloaded := false - for _, imgData := range repoData.ImgList { - if isTagged && imgData.Tag != tagged.Tag() { - continue - } - - err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded) - if err != nil { - return err - } - } - - writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) - return nil -} - -func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error { - if img.Tag == "" { - logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) - return nil - } - - localNameRef, err := reference.WithTag(p.repoInfo.Name, img.Tag) - if err != nil { - retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) - logrus.Debug(retErr.Error()) - return retErr - } - - if err := v1.ValidateID(img.ID); err != nil { - return err - } - - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.Name.Name()) - success := false - var lastErr error - for _, ep := range p.repoInfo.Index.Mirrors { - ep += "v1/" - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.Name.Name(), ep)) - if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { - // Don't report errors when pulling from mirrors. - logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err) - continue - } - success = true - break - } - if !success { - for _, ep := range repoData.Endpoints { - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.Name.Name(), ep) - if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { - // It's not ideal that only the last error is returned, it would be better to concatenate the errors. - // As the error is also given to the output stream the user will see the error. - lastErr = err - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.Name.Name(), ep, err) - continue - } - success = true - break - } - } - if !success { - err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.Name.Name(), lastErr) - progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) - return err - } - return nil -} - -func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) { - var history []string - history, err = p.session.GetRemoteHistory(v1ID, endpoint) - if err != nil { - return err - } - if len(history) < 1 { - return fmt.Errorf("empty history for image %s", v1ID) - } - progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers") - - var ( - descriptors []xfer.DownloadDescriptor - newHistory []image.History - imgJSON []byte - imgSize int64 - ) - - // Iterate over layers, in order from bottom-most to top-most. Download - // config for all layers and create descriptors. - for i := len(history) - 1; i >= 0; i-- { - v1LayerID := history[i] - imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint) - if err != nil { - return err - } - - // Create a new-style config from the legacy configs - h, err := v1.HistoryFromConfig(imgJSON, false) - if err != nil { - return err - } - newHistory = append(newHistory, h) - - layerDescriptor := &v1LayerDescriptor{ - v1LayerID: v1LayerID, - indexName: p.repoInfo.Index.Name, - endpoint: endpoint, - v1IDService: p.v1IDService, - layersDownloaded: layersDownloaded, - layerSize: imgSize, - session: p.session, - } - - descriptors = append(descriptors, layerDescriptor) - } - - rootFS := image.NewRootFS() - resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, "", descriptors, p.config.ProgressOutput) - if err != nil { - return err - } - defer release() - - config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory) - if err != nil { - return err - } - - imageID, err := p.config.ImageStore.Put(config) - if err != nil { - return err - } - - if p.config.ReferenceStore != nil { - if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil { - return err - } - } - - return nil -} - -func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { - progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata") - - retries := 5 - for j := 1; j <= retries; j++ { - imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) - if err != nil && j == retries { - progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata") - return nil, 0, err - } else if err != nil { - time.Sleep(time.Duration(j) * 500 * time.Millisecond) - continue - } - - return imgJSON, imgSize, nil - } - - // not reached - return nil, 0, nil -} - -type v1LayerDescriptor struct { - v1LayerID string - indexName string - endpoint string - v1IDService *metadata.V1IDService - layersDownloaded *bool - layerSize int64 - session *registry.Session - tmpFile *os.File -} - -func (ld *v1LayerDescriptor) Key() string { - return "v1:" + ld.v1LayerID -} - -func (ld *v1LayerDescriptor) ID() string { - return stringid.TruncateID(ld.v1LayerID) -} - -func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) { - return ld.v1IDService.Get(ld.v1LayerID, ld.indexName) -} - -func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { - progress.Update(progressOutput, ld.ID(), "Pulling fs layer") - layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize) - if err != nil { - progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers") - if uerr, ok := err.(*url.Error); ok { - err = uerr.Err - } - if terr, ok := err.(net.Error); ok && terr.Timeout() { - return nil, 0, err - } - return nil, 0, xfer.DoNotRetry{Err: err} - } - *ld.layersDownloaded = true - - ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob") - if err != nil { - layerReader.Close() - return nil, 0, err - } - - reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading") - defer reader.Close() - - _, err = io.Copy(ld.tmpFile, reader) - if err != nil { - ld.Close() - return nil, 0, err - } - - progress.Update(progressOutput, ld.ID(), "Download complete") - - logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name()) - - ld.tmpFile.Seek(0, 0) - - // hand off the temporary file to the download manager, so it will only - // be closed once - tmpFile := ld.tmpFile - ld.tmpFile = nil - - return ioutils.NewReadCloserWrapper(tmpFile, func() error { - tmpFile.Close() - err := os.RemoveAll(tmpFile.Name()) - if err != nil { - logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) - } - return err - }), ld.layerSize, nil -} - -func (ld *v1LayerDescriptor) Close() { - if ld.tmpFile != nil { - ld.tmpFile.Close() - if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) - } - ld.tmpFile = nil - } -} - -func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) { - // Cache mapping from this layer's DiffID to the blobsum - ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID) -} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2.go b/vendor/github.com/docker/docker/distribution/pull_v2.go deleted file mode 100644 index 60a894b1c..000000000 --- a/vendor/github.com/docker/docker/distribution/pull_v2.go +++ /dev/null @@ -1,941 +0,0 @@ -package distribution // import "github.com/docker/docker/distribution" - -import ( - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/url" - "os" - "runtime" - "strings" - - "github.com/docker/distribution" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/system" - refstore "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var ( - errRootFSMismatch = errors.New("layers from manifest don't match image configuration") - errRootFSInvalid = errors.New("invalid rootfs in image configuration") -) - -// ImageConfigPullError is an error pulling the image config blob -// (only applies to schema2). -type ImageConfigPullError struct { - Err error -} - -// Error returns the error string for ImageConfigPullError. -func (e ImageConfigPullError) Error() string { - return "error pulling image configuration: " + e.Err.Error() -} - -type v2Puller struct { - V2MetadataService metadata.V2MetadataService - endpoint registry.APIEndpoint - config *ImagePullConfig - repoInfo *registry.RepositoryInfo - repo distribution.Repository - // confirmedV2 is set to true if we confirm we're talking to a v2 - // registry. This is used to limit fallbacks to the v1 protocol. - confirmedV2 bool -} - -func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, os string) (err error) { - // TODO(tiborvass): was ReceiveTimeout - p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") - if err != nil { - logrus.Warnf("Error getting v2 registry: %v", err) - return err - } - - if err = p.pullV2Repository(ctx, ref, os); err != nil { - if _, ok := err.(fallbackError); ok { - return err - } - if continueOnError(err, p.endpoint.Mirror) { - return fallbackError{ - err: err, - confirmedV2: p.confirmedV2, - transportOK: true, - } - } - } - return err -} - -func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, os string) (err error) { - var layersDownloaded bool - if !reference.IsNameOnly(ref) { - layersDownloaded, err = p.pullV2Tag(ctx, ref, os) - if err != nil { - return err - } - } else { - tags, err := p.repo.Tags(ctx).All(ctx) - if err != nil { - // If this repository doesn't exist on V2, we should - // permit a fallback to V1. - return allowV1Fallback(err) - } - - // The v2 registry knows about this repository, so we will not - // allow fallback to the v1 protocol even if we encounter an - // error later on. - p.confirmedV2 = true - - for _, tag := range tags { - tagRef, err := reference.WithTag(ref, tag) - if err != nil { - return err - } - pulledNew, err := p.pullV2Tag(ctx, tagRef, os) - if err != nil { - // Since this is the pull-all-tags case, don't - // allow an error pulling a particular tag to - // make the whole pull fall back to v1. - if fallbackErr, ok := err.(fallbackError); ok { - return fallbackErr.err - } - return err - } - // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged - // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? - layersDownloaded = layersDownloaded || pulledNew - } - } - - writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded) - - return nil -} - -type v2LayerDescriptor struct { - digest digest.Digest - diffID layer.DiffID - repoInfo *registry.RepositoryInfo - repo distribution.Repository - V2MetadataService metadata.V2MetadataService - tmpFile *os.File - verifier digest.Verifier - src distribution.Descriptor -} - -func (ld *v2LayerDescriptor) Key() string { - return "v2:" + ld.digest.String() -} - -func (ld *v2LayerDescriptor) ID() string { - return stringid.TruncateID(ld.digest.String()) -} - -func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { - if ld.diffID != "" { - return ld.diffID, nil - } - return ld.V2MetadataService.GetDiffID(ld.digest) -} - -func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { - logrus.Debugf("pulling blob %q", ld.digest) - - var ( - err error - offset int64 - ) - - if ld.tmpFile == nil { - ld.tmpFile, err = createDownloadFile() - if err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - } else { - offset, err = ld.tmpFile.Seek(0, os.SEEK_END) - if err != nil { - logrus.Debugf("error seeking to end of download file: %v", err) - offset = 0 - - ld.tmpFile.Close() - if err := os.Remove(ld.tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) - } - ld.tmpFile, err = createDownloadFile() - if err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - } else if offset != 0 { - logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) - } - } - - tmpFile := ld.tmpFile - - layerDownload, err := ld.open(ctx) - if err != nil { - logrus.Errorf("Error initiating layer download: %v", err) - return nil, 0, retryOnError(err) - } - - if offset != 0 { - _, err := layerDownload.Seek(offset, os.SEEK_SET) - if err != nil { - if err := ld.truncateDownloadFile(); err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - return nil, 0, err - } - } - size, err := layerDownload.Seek(0, os.SEEK_END) - if err != nil { - // Seek failed, perhaps because there was no Content-Length - // header. This shouldn't fail the download, because we can - // still continue without a progress bar. - size = 0 - } else { - if size != 0 && offset > size { - logrus.Debug("Partial download is larger than full blob. Starting over") - offset = 0 - if err := ld.truncateDownloadFile(); err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - } - - // Restore the seek offset either at the beginning of the - // stream, or just after the last byte we have from previous - // attempts. - _, err = layerDownload.Seek(offset, os.SEEK_SET) - if err != nil { - return nil, 0, err - } - } - - reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") - defer reader.Close() - - if ld.verifier == nil { - ld.verifier = ld.digest.Verifier() - } - - _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) - if err != nil { - if err == transport.ErrWrongCodeForByteRange { - if err := ld.truncateDownloadFile(); err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - return nil, 0, err - } - return nil, 0, retryOnError(err) - } - - progress.Update(progressOutput, ld.ID(), "Verifying Checksum") - - if !ld.verifier.Verified() { - err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) - logrus.Error(err) - - // Allow a retry if this digest verification error happened - // after a resumed download. - if offset != 0 { - if err := ld.truncateDownloadFile(); err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - - return nil, 0, err - } - return nil, 0, xfer.DoNotRetry{Err: err} - } - - progress.Update(progressOutput, ld.ID(), "Download complete") - - logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) - - _, err = tmpFile.Seek(0, os.SEEK_SET) - if err != nil { - tmpFile.Close() - if err := os.Remove(tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) - } - ld.tmpFile = nil - ld.verifier = nil - return nil, 0, xfer.DoNotRetry{Err: err} - } - - // hand off the temporary file to the download manager, so it will only - // be closed once - ld.tmpFile = nil - - return ioutils.NewReadCloserWrapper(tmpFile, func() error { - tmpFile.Close() - err := os.RemoveAll(tmpFile.Name()) - if err != nil { - logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) - } - return err - }), size, nil -} - -func (ld *v2LayerDescriptor) Close() { - if ld.tmpFile != nil { - ld.tmpFile.Close() - if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) - } - } -} - -func (ld *v2LayerDescriptor) truncateDownloadFile() error { - // Need a new hash context since we will be redoing the download - ld.verifier = nil - - if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil { - logrus.Errorf("error seeking to beginning of download file: %v", err) - return err - } - - if err := ld.tmpFile.Truncate(0); err != nil { - logrus.Errorf("error truncating download file: %v", err) - return err - } - - return nil -} - -func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { - // Cache mapping from this layer's DiffID to the blobsum - ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()}) -} - -func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, os string) (tagUpdated bool, err error) { - manSvc, err := p.repo.Manifests(ctx) - if err != nil { - return false, err - } - - var ( - manifest distribution.Manifest - tagOrDigest string // Used for logging/progress only - ) - if digested, isDigested := ref.(reference.Canonical); isDigested { - manifest, err = manSvc.Get(ctx, digested.Digest()) - if err != nil { - return false, err - } - tagOrDigest = digested.Digest().String() - } else if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) - if err != nil { - return false, allowV1Fallback(err) - } - tagOrDigest = tagged.Tag() - } else { - return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref)) - } - - if manifest == nil { - return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) - } - - if m, ok := manifest.(*schema2.DeserializedManifest); ok { - var allowedMediatype bool - for _, t := range p.config.Schema2Types { - if m.Manifest.Config.MediaType == t { - allowedMediatype = true - break - } - } - if !allowedMediatype { - configClass := mediaTypeClasses[m.Manifest.Config.MediaType] - if configClass == "" { - configClass = "unknown" - } - return false, invalidManifestClassError{m.Manifest.Config.MediaType, configClass} - } - } - - // If manSvc.Get succeeded, we can be confident that the registry on - // the other side speaks the v2 protocol. - p.confirmedV2 = true - - logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref)) - progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named())) - - var ( - id digest.Digest - manifestDigest digest.Digest - ) - - switch v := manifest.(type) { - case *schema1.SignedManifest: - if p.config.RequireSchema2 { - return false, fmt.Errorf("invalid manifest: not schema2") - } - id, manifestDigest, err = p.pullSchema1(ctx, ref, v, os) - if err != nil { - return false, err - } - case *schema2.DeserializedManifest: - id, manifestDigest, err = p.pullSchema2(ctx, ref, v, os) - if err != nil { - return false, err - } - case *manifestlist.DeserializedManifestList: - id, manifestDigest, err = p.pullManifestList(ctx, ref, v, os) - if err != nil { - return false, err - } - default: - return false, invalidManifestFormatError{} - } - - progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) - - if p.config.ReferenceStore != nil { - oldTagID, err := p.config.ReferenceStore.Get(ref) - if err == nil { - if oldTagID == id { - return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id) - } - } else if err != refstore.ErrDoesNotExist { - return false, err - } - - if canonical, ok := ref.(reference.Canonical); ok { - if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil { - return false, err - } - } else { - if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { - return false, err - } - if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil { - return false, err - } - } - } - return true, nil -} - -func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, requestedOS string) (id digest.Digest, manifestDigest digest.Digest, err error) { - var verifiedManifest *schema1.Manifest - verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) - if err != nil { - return "", "", err - } - - rootFS := image.NewRootFS() - - // remove duplicate layers and check parent chain validity - err = fixManifestLayers(verifiedManifest) - if err != nil { - return "", "", err - } - - var descriptors []xfer.DownloadDescriptor - - // Image history converted to the new format - var history []image.History - - // Note that the order of this loop is in the direction of bottom-most - // to top-most, so that the downloads slice gets ordered correctly. - for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { - blobSum := verifiedManifest.FSLayers[i].BlobSum - - var throwAway struct { - ThrowAway bool `json:"throwaway,omitempty"` - } - if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { - return "", "", err - } - - h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) - if err != nil { - return "", "", err - } - history = append(history, h) - - if throwAway.ThrowAway { - continue - } - - layerDescriptor := &v2LayerDescriptor{ - digest: blobSum, - repoInfo: p.repoInfo, - repo: p.repo, - V2MetadataService: p.V2MetadataService, - } - - descriptors = append(descriptors, layerDescriptor) - } - - // The v1 manifest itself doesn't directly contain an OS. However, - // the history does, but unfortunately that's a string, so search through - // all the history until hopefully we find one which indicates the OS. - // supertest2014/nyan is an example of a registry image with schemav1. - configOS := runtime.GOOS - if system.LCOWSupported() { - type config struct { - Os string `json:"os,omitempty"` - } - for _, v := range verifiedManifest.History { - var c config - if err := json.Unmarshal([]byte(v.V1Compatibility), &c); err == nil { - if c.Os != "" { - configOS = c.Os - break - } - } - } - } - - // Early bath if the requested OS doesn't match that of the configuration. - // This avoids doing the download, only to potentially fail later. - if !strings.EqualFold(configOS, requestedOS) { - return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS) - } - - resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, configOS, descriptors, p.config.ProgressOutput) - if err != nil { - return "", "", err - } - defer release() - - config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) - if err != nil { - return "", "", err - } - - imageID, err := p.config.ImageStore.Put(config) - if err != nil { - return "", "", err - } - - manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) - - return imageID, manifestDigest, nil -} - -func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, requestedOS string) (id digest.Digest, manifestDigest digest.Digest, err error) { - manifestDigest, err = schema2ManifestDigest(ref, mfst) - if err != nil { - return "", "", err - } - - target := mfst.Target() - if _, err := p.config.ImageStore.Get(target.Digest); err == nil { - // If the image already exists locally, no need to pull - // anything. - return target.Digest, manifestDigest, nil - } - - var descriptors []xfer.DownloadDescriptor - - // Note that the order of this loop is in the direction of bottom-most - // to top-most, so that the downloads slice gets ordered correctly. - for _, d := range mfst.Layers { - layerDescriptor := &v2LayerDescriptor{ - digest: d.Digest, - repo: p.repo, - repoInfo: p.repoInfo, - V2MetadataService: p.V2MetadataService, - src: d, - } - - descriptors = append(descriptors, layerDescriptor) - } - - configChan := make(chan []byte, 1) - configErrChan := make(chan error, 1) - layerErrChan := make(chan error, 1) - downloadsDone := make(chan struct{}) - var cancel func() - ctx, cancel = context.WithCancel(ctx) - defer cancel() - - // Pull the image config - go func() { - configJSON, err := p.pullSchema2Config(ctx, target.Digest) - if err != nil { - configErrChan <- ImageConfigPullError{Err: err} - cancel() - return - } - configChan <- configJSON - }() - - var ( - configJSON []byte // raw serialized image config - downloadedRootFS *image.RootFS // rootFS from registered layers - configRootFS *image.RootFS // rootFS from configuration - release func() // release resources from rootFS download - configPlatform *specs.Platform // for LCOW when registering downloaded layers - ) - - // https://github.com/docker/docker/issues/24766 - Err on the side of caution, - // explicitly blocking images intended for linux from the Windows daemon. On - // Windows, we do this before the attempt to download, effectively serialising - // the download slightly slowing it down. We have to do it this way, as - // chances are the download of layers itself would fail due to file names - // which aren't suitable for NTFS. At some point in the future, if a similar - // check to block Windows images being pulled on Linux is implemented, it - // may be necessary to perform the same type of serialisation. - if runtime.GOOS == "windows" { - configJSON, configRootFS, configPlatform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) - if err != nil { - return "", "", err - } - if configRootFS == nil { - return "", "", errRootFSInvalid - } - if err := checkImageCompatibility(configPlatform.OS, configPlatform.OSVersion); err != nil { - return "", "", err - } - - if len(descriptors) != len(configRootFS.DiffIDs) { - return "", "", errRootFSMismatch - } - - // Early bath if the requested OS doesn't match that of the configuration. - // This avoids doing the download, only to potentially fail later. - if !strings.EqualFold(configPlatform.OS, requestedOS) { - return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configPlatform.OS, requestedOS) - } - - // Populate diff ids in descriptors to avoid downloading foreign layers - // which have been side loaded - for i := range descriptors { - descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i] - } - } - - if p.config.DownloadManager != nil { - go func() { - var ( - err error - rootFS image.RootFS - ) - downloadRootFS := *image.NewRootFS() - rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, requestedOS, descriptors, p.config.ProgressOutput) - if err != nil { - // Intentionally do not cancel the config download here - // as the error from config download (if there is one) - // is more interesting than the layer download error - layerErrChan <- err - return - } - - downloadedRootFS = &rootFS - close(downloadsDone) - }() - } else { - // We have nothing to download - close(downloadsDone) - } - - if configJSON == nil { - configJSON, configRootFS, _, err = receiveConfig(p.config.ImageStore, configChan, configErrChan) - if err == nil && configRootFS == nil { - err = errRootFSInvalid - } - if err != nil { - cancel() - select { - case <-downloadsDone: - case <-layerErrChan: - } - return "", "", err - } - } - - select { - case <-downloadsDone: - case err = <-layerErrChan: - return "", "", err - } - - if release != nil { - defer release() - } - - if downloadedRootFS != nil { - // The DiffIDs returned in rootFS MUST match those in the config. - // Otherwise the image config could be referencing layers that aren't - // included in the manifest. - if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) { - return "", "", errRootFSMismatch - } - - for i := range downloadedRootFS.DiffIDs { - if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] { - return "", "", errRootFSMismatch - } - } - } - - imageID, err := p.config.ImageStore.Put(configJSON) - if err != nil { - return "", "", err - } - - return imageID, manifestDigest, nil -} - -func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, *specs.Platform, error) { - select { - case configJSON := <-configChan: - rootfs, err := s.RootFSFromConfig(configJSON) - if err != nil { - return nil, nil, nil, err - } - platform, err := s.PlatformFromConfig(configJSON) - if err != nil { - return nil, nil, nil, err - } - return configJSON, rootfs, platform, nil - case err := <-errChan: - return nil, nil, nil, err - // Don't need a case for ctx.Done in the select because cancellation - // will trigger an error in p.pullSchema2ImageConfig. - } -} - -// pullManifestList handles "manifest lists" which point to various -// platform-specific manifests. -func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, os string) (id digest.Digest, manifestListDigest digest.Digest, err error) { - manifestListDigest, err = schema2ManifestDigest(ref, mfstList) - if err != nil { - return "", "", err - } - - logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s/%s match", ref, len(mfstList.Manifests), os, runtime.GOARCH) - - manifestMatches := filterManifests(mfstList.Manifests, os) - - if len(manifestMatches) == 0 { - errMsg := fmt.Sprintf("no matching manifest for %s/%s in the manifest list entries", os, runtime.GOARCH) - logrus.Debugf(errMsg) - return "", "", errors.New(errMsg) - } - - if len(manifestMatches) > 1 { - logrus.Debugf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String()) - } - manifestDigest := manifestMatches[0].Digest - - if err := checkImageCompatibility(manifestMatches[0].Platform.OS, manifestMatches[0].Platform.OSVersion); err != nil { - return "", "", err - } - - manSvc, err := p.repo.Manifests(ctx) - if err != nil { - return "", "", err - } - - manifest, err := manSvc.Get(ctx, manifestDigest) - if err != nil { - return "", "", err - } - - manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest) - if err != nil { - return "", "", err - } - - switch v := manifest.(type) { - case *schema1.SignedManifest: - id, _, err = p.pullSchema1(ctx, manifestRef, v, os) - if err != nil { - return "", "", err - } - case *schema2.DeserializedManifest: - id, _, err = p.pullSchema2(ctx, manifestRef, v, os) - if err != nil { - return "", "", err - } - default: - return "", "", errors.New("unsupported manifest format") - } - - return id, manifestListDigest, err -} - -func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { - blobs := p.repo.Blobs(ctx) - configJSON, err = blobs.Get(ctx, dgst) - if err != nil { - return nil, err - } - - // Verify image config digest - verifier := dgst.Verifier() - if _, err := verifier.Write(configJSON); err != nil { - return nil, err - } - if !verifier.Verified() { - err := fmt.Errorf("image config verification failed for digest %s", dgst) - logrus.Error(err) - return nil, err - } - - return configJSON, nil -} - -// schema2ManifestDigest computes the manifest digest, and, if pulling by -// digest, ensures that it matches the requested digest. -func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { - _, canonical, err := mfst.Payload() - if err != nil { - return "", err - } - - // If pull by digest, then verify the manifest digest. - if digested, isDigested := ref.(reference.Canonical); isDigested { - verifier := digested.Digest().Verifier() - if _, err := verifier.Write(canonical); err != nil { - return "", err - } - if !verifier.Verified() { - err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) - logrus.Error(err) - return "", err - } - return digested.Digest(), nil - } - - return digest.FromBytes(canonical), nil -} - -// allowV1Fallback checks if the error is a possible reason to fallback to v1 -// (even if confirmedV2 has been set already), and if so, wraps the error in -// a fallbackError with confirmedV2 set to false. Otherwise, it returns the -// error unmodified. -func allowV1Fallback(err error) error { - switch v := err.(type) { - case errcode.Errors: - if len(v) != 0 { - if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { - return fallbackError{ - err: err, - confirmedV2: false, - transportOK: true, - } - } - } - case errcode.Error: - if shouldV2Fallback(v) { - return fallbackError{ - err: err, - confirmedV2: false, - transportOK: true, - } - } - case *url.Error: - if v.Err == auth.ErrNoBasicAuthCredentials { - return fallbackError{err: err, confirmedV2: false} - } - } - - return err -} - -func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Reference) (m *schema1.Manifest, err error) { - // If pull by digest, then verify the manifest digest. NOTE: It is - // important to do this first, before any other content validation. If the - // digest cannot be verified, don't even bother with those other things. - if digested, isCanonical := ref.(reference.Canonical); isCanonical { - verifier := digested.Digest().Verifier() - if _, err := verifier.Write(signedManifest.Canonical); err != nil { - return nil, err - } - if !verifier.Verified() { - err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) - logrus.Error(err) - return nil, err - } - } - m = &signedManifest.Manifest - - if m.SchemaVersion != 1 { - return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref)) - } - if len(m.FSLayers) != len(m.History) { - return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref)) - } - if len(m.FSLayers) == 0 { - return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref)) - } - return m, nil -} - -// fixManifestLayers removes repeated layers from the manifest and checks the -// correctness of the parent chain. -func fixManifestLayers(m *schema1.Manifest) error { - imgs := make([]*image.V1Image, len(m.FSLayers)) - for i := range m.FSLayers { - img := &image.V1Image{} - - if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { - return err - } - - imgs[i] = img - if err := v1.ValidateID(img.ID); err != nil { - return err - } - } - - if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { - // Windows base layer can point to a base layer parent that is not in manifest. - return errors.New("invalid parent ID in the base layer of the image") - } - - // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) - - var lastID string - for _, img := range imgs { - // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { - return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap[lastID] = struct{}{} - } - - // backwards loop so that we keep the remaining indexes after removing items - for i := len(imgs) - 2; i >= 0; i-- { - if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue - m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) - m.History = append(m.History[:i], m.History[i+1:]...) - } else if imgs[i].Parent != imgs[i+1].ID { - return fmt.Errorf("invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) - } - } - - return nil -} - -func createDownloadFile() (*os.File, error) { - return ioutil.TempFile("", "GetImageBlob") -} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_unix.go b/vendor/github.com/docker/docker/distribution/pull_v2_unix.go deleted file mode 100644 index 0be8a0324..000000000 --- a/vendor/github.com/docker/docker/distribution/pull_v2_unix.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build !windows - -package distribution // import "github.com/docker/docker/distribution" - -import ( - "context" - "runtime" - - "github.com/docker/distribution" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/sirupsen/logrus" -) - -func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { - blobs := ld.repo.Blobs(ctx) - return blobs.Open(ctx, ld.digest) -} - -func filterManifests(manifests []manifestlist.ManifestDescriptor, os string) []manifestlist.ManifestDescriptor { - var matches []manifestlist.ManifestDescriptor - for _, manifestDescriptor := range manifests { - if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == os { - matches = append(matches, manifestDescriptor) - - logrus.Debugf("found match for %s/%s with media type %s, digest %s", os, runtime.GOARCH, manifestDescriptor.MediaType, manifestDescriptor.Digest.String()) - } - } - return matches -} - -// checkImageCompatibility is a Windows-specific function. No-op on Linux -func checkImageCompatibility(imageOS, imageOSVersion string) error { - return nil -} diff --git a/vendor/github.com/docker/docker/distribution/pull_v2_windows.go b/vendor/github.com/docker/docker/distribution/pull_v2_windows.go deleted file mode 100644 index 432a36119..000000000 --- a/vendor/github.com/docker/docker/distribution/pull_v2_windows.go +++ /dev/null @@ -1,130 +0,0 @@ -package distribution // import "github.com/docker/docker/distribution" - -import ( - "context" - "errors" - "fmt" - "net/http" - "os" - "runtime" - "sort" - "strconv" - "strings" - - "github.com/docker/distribution" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -var _ distribution.Describable = &v2LayerDescriptor{} - -func (ld *v2LayerDescriptor) Descriptor() distribution.Descriptor { - if ld.src.MediaType == schema2.MediaTypeForeignLayer && len(ld.src.URLs) > 0 { - return ld.src - } - return distribution.Descriptor{} -} - -func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { - blobs := ld.repo.Blobs(ctx) - rsc, err := blobs.Open(ctx, ld.digest) - - if len(ld.src.URLs) == 0 { - return rsc, err - } - - // We're done if the registry has this blob. - if err == nil { - // Seek does an HTTP GET. If it succeeds, the blob really is accessible. - if _, err = rsc.Seek(0, os.SEEK_SET); err == nil { - return rsc, nil - } - rsc.Close() - } - - // Find the first URL that results in a 200 result code. - for _, url := range ld.src.URLs { - logrus.Debugf("Pulling %v from foreign URL %v", ld.digest, url) - rsc = transport.NewHTTPReadSeeker(http.DefaultClient, url, nil) - - // Seek does an HTTP GET. If it succeeds, the blob really is accessible. - _, err = rsc.Seek(0, os.SEEK_SET) - if err == nil { - break - } - logrus.Debugf("Download for %v failed: %v", ld.digest, err) - rsc.Close() - rsc = nil - } - return rsc, err -} - -func filterManifests(manifests []manifestlist.ManifestDescriptor, os string) []manifestlist.ManifestDescriptor { - osVersion := "" - if os == "windows" { - version := system.GetOSVersion() - osVersion = fmt.Sprintf("%d.%d.%d", version.MajorVersion, version.MinorVersion, version.Build) - logrus.Debugf("will prefer entries with version %s", osVersion) - } - - var matches []manifestlist.ManifestDescriptor - for _, manifestDescriptor := range manifests { - if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == os { - matches = append(matches, manifestDescriptor) - logrus.Debugf("found match for %s/%s %s with media type %s, digest %s", os, runtime.GOARCH, manifestDescriptor.Platform.OSVersion, manifestDescriptor.MediaType, manifestDescriptor.Digest.String()) - } else { - logrus.Debugf("ignoring %s/%s %s with media type %s, digest %s", os, runtime.GOARCH, manifestDescriptor.Platform.OSVersion, manifestDescriptor.MediaType, manifestDescriptor.Digest.String()) - } - } - if os == "windows" { - sort.Stable(manifestsByVersion{osVersion, matches}) - } - return matches -} - -func versionMatch(actual, expected string) bool { - // Check whether the version matches up to the build, ignoring UBR - return strings.HasPrefix(actual, expected+".") -} - -type manifestsByVersion struct { - version string - list []manifestlist.ManifestDescriptor -} - -func (mbv manifestsByVersion) Less(i, j int) bool { - // TODO: Split version by parts and compare - // TODO: Prefer versions which have a greater version number - // Move compatible versions to the top, with no other ordering changes - return versionMatch(mbv.list[i].Platform.OSVersion, mbv.version) && !versionMatch(mbv.list[j].Platform.OSVersion, mbv.version) -} - -func (mbv manifestsByVersion) Len() int { - return len(mbv.list) -} - -func (mbv manifestsByVersion) Swap(i, j int) { - mbv.list[i], mbv.list[j] = mbv.list[j], mbv.list[i] -} - -// checkImageCompatibility blocks pulling incompatible images based on a later OS build -// Fixes https://github.com/moby/moby/issues/36184. -func checkImageCompatibility(imageOS, imageOSVersion string) error { - if imageOS == "windows" { - hostOSV := system.GetOSVersion() - splitImageOSVersion := strings.Split(imageOSVersion, ".") // eg 10.0.16299.nnnn - if len(splitImageOSVersion) >= 3 { - if imageOSBuild, err := strconv.Atoi(splitImageOSVersion[2]); err == nil { - if imageOSBuild > int(hostOSV.Build) { - errMsg := fmt.Sprintf("a Windows version %s.%s.%s-based image is incompatible with a %s host", splitImageOSVersion[0], splitImageOSVersion[1], splitImageOSVersion[2], hostOSV.ToString()) - logrus.Debugf(errMsg) - return errors.New(errMsg) - } - } - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/distribution/push.go b/vendor/github.com/docker/docker/distribution/push.go deleted file mode 100644 index eb3bc5597..000000000 --- a/vendor/github.com/docker/docker/distribution/push.go +++ /dev/null @@ -1,186 +0,0 @@ -package distribution // import "github.com/docker/docker/distribution" - -import ( - "bufio" - "compress/gzip" - "context" - "fmt" - "io" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/registry" - "github.com/sirupsen/logrus" -) - -// Pusher is an interface that abstracts pushing for different API versions. -type Pusher interface { - // Push tries to push the image configured at the creation of Pusher. - // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. - // - // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. - Push(ctx context.Context) error -} - -const compressionBufSize = 32768 - -// NewPusher creates a new Pusher interface that will push to either a v1 or v2 -// registry. The endpoint argument contains a Version field that determines -// whether a v1 or v2 pusher will be created. The other parameters are passed -// through to the underlying pusher implementation for use during the actual -// push operation. -func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { - switch endpoint.Version { - case registry.APIVersion2: - return &v2Pusher{ - v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), - ref: ref, - endpoint: endpoint, - repoInfo: repoInfo, - config: imagePushConfig, - }, nil - case registry.APIVersion1: - return &v1Pusher{ - v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), - ref: ref, - endpoint: endpoint, - repoInfo: repoInfo, - config: imagePushConfig, - }, nil - } - return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) -} - -// Push initiates a push operation on ref. -// ref is the specific variant of the image to be pushed. -// If no tag is provided, all tags will be pushed. -func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { - // FIXME: Allow to interrupt current push when new push of same image is done. - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) - if err != nil { - return err - } - - endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(reference.Domain(repoInfo.Name)) - if err != nil { - return err - } - - progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to repository [%s]", repoInfo.Name.Name()) - - associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo.Name) - if len(associations) == 0 { - return fmt.Errorf("An image does not exist locally with the tag: %s", reference.FamiliarName(repoInfo.Name)) - } - - var ( - lastErr error - - // confirmedV2 is set to true if a push attempt managed to - // confirm that it was talking to a v2 registry. This will - // prevent fallback to the v1 protocol. - confirmedV2 bool - - // confirmedTLSRegistries is a map indicating which registries - // are known to be using TLS. There should never be a plaintext - // retry for any of these. - confirmedTLSRegistries = make(map[string]struct{}) - ) - - for _, endpoint := range endpoints { - if imagePushConfig.RequireSchema2 && endpoint.Version == registry.APIVersion1 { - continue - } - if confirmedV2 && endpoint.Version == registry.APIVersion1 { - logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) - continue - } - - if endpoint.URL.Scheme != "https" { - if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { - logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) - continue - } - } - - logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name.Name(), endpoint.URL, endpoint.Version) - - pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) - if err != nil { - lastErr = err - continue - } - if err := pusher.Push(ctx); err != nil { - // Was this push cancelled? If so, don't try to fall - // back. - select { - case <-ctx.Done(): - default: - if fallbackErr, ok := err.(fallbackError); ok { - confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 - if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { - confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} - } - err = fallbackErr.err - lastErr = err - logrus.Infof("Attempting next endpoint for push after error: %v", err) - continue - } - } - - logrus.Errorf("Not continuing with push after error: %v", err) - return err - } - - imagePushConfig.ImageEventLogger(reference.FamiliarString(ref), reference.FamiliarName(repoInfo.Name), "push") - return nil - } - - if lastErr == nil { - lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.Name.Name()) - } - return lastErr -} - -// compress returns an io.ReadCloser which will supply a compressed version of -// the provided Reader. The caller must close the ReadCloser after reading the -// compressed data. -// -// Note that this function returns a reader instead of taking a writer as an -// argument so that it can be used with httpBlobWriter's ReadFrom method. -// Using httpBlobWriter's Write method would send a PATCH request for every -// Write call. -// -// The second return value is a channel that gets closed when the goroutine -// is finished. This allows the caller to make sure the goroutine finishes -// before it releases any resources connected with the reader that was -// passed in. -func compress(in io.Reader) (io.ReadCloser, chan struct{}) { - compressionDone := make(chan struct{}) - - pipeReader, pipeWriter := io.Pipe() - // Use a bufio.Writer to avoid excessive chunking in HTTP request. - bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) - compressor := gzip.NewWriter(bufWriter) - - go func() { - _, err := io.Copy(compressor, in) - if err == nil { - err = compressor.Close() - } - if err == nil { - err = bufWriter.Flush() - } - if err != nil { - pipeWriter.CloseWithError(err) - } else { - pipeWriter.Close() - } - close(compressionDone) - }() - - return pipeReader, compressionDone -} diff --git a/vendor/github.com/docker/docker/distribution/push_v1.go b/vendor/github.com/docker/docker/distribution/push_v1.go deleted file mode 100644 index 7bd75e9fe..000000000 --- a/vendor/github.com/docker/docker/distribution/push_v1.go +++ /dev/null @@ -1,457 +0,0 @@ -package distribution // import "github.com/docker/docker/distribution" - -import ( - "context" - "fmt" - "sync" - - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/registry" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -type v1Pusher struct { - v1IDService *metadata.V1IDService - endpoint registry.APIEndpoint - ref reference.Named - repoInfo *registry.RepositoryInfo - config *ImagePushConfig - session *registry.Session -} - -func (p *v1Pusher) Push(ctx context.Context) error { - tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) - if err != nil { - return err - } - // Adds Docker-specific headers as well as user-specified headers (metaHeaders) - tr := transport.NewTransport( - // TODO(tiborvass): was NoTimeout - registry.NewTransport(tlsConfig), - registry.Headers(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., - ) - client := registry.HTTPClient(tr) - v1Endpoint := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) - p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) - if err != nil { - // TODO(dmcgowan): Check if should fallback - return fallbackError{err: err} - } - if err := p.pushRepository(ctx); err != nil { - // TODO(dmcgowan): Check if should fallback - return err - } - return nil -} - -// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an -// image being pushed to a v1 registry. -type v1Image interface { - Config() []byte - Layer() layer.Layer - V1ID() string -} - -type v1ImageCommon struct { - layer layer.Layer - config []byte - v1ID string -} - -func (common *v1ImageCommon) Config() []byte { - return common.config -} - -func (common *v1ImageCommon) V1ID() string { - return common.v1ID -} - -func (common *v1ImageCommon) Layer() layer.Layer { - return common.layer -} - -// v1TopImage defines a runnable (top layer) image being pushed to a v1 -// registry. -type v1TopImage struct { - v1ImageCommon - imageID image.ID -} - -func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { - v1ID := imageID.Digest().Hex() - parentV1ID := "" - if parent != nil { - parentV1ID = parent.V1ID() - } - - config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) - if err != nil { - return nil, err - } - - return &v1TopImage{ - v1ImageCommon: v1ImageCommon{ - v1ID: v1ID, - config: config, - layer: l, - }, - imageID: imageID, - }, nil -} - -// v1DependencyImage defines a dependency layer being pushed to a v1 registry. -type v1DependencyImage struct { - v1ImageCommon -} - -func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) *v1DependencyImage { - v1ID := digest.Digest(l.ChainID()).Hex() - - var config string - if parent != nil { - config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) - } else { - config = fmt.Sprintf(`{"id":"%s"}`, v1ID) - } - return &v1DependencyImage{ - v1ImageCommon: v1ImageCommon{ - v1ID: v1ID, - config: []byte(config), - layer: l, - }, - } -} - -// Retrieve the all the images to be uploaded in the correct order -func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []PushLayer, err error) { - tagsByImage = make(map[image.ID][]string) - - // Ignore digest references - if _, isCanonical := p.ref.(reference.Canonical); isCanonical { - return - } - - tagged, isTagged := p.ref.(reference.NamedTagged) - if isTagged { - // Push a specific tag - var imgID image.ID - var dgst digest.Digest - dgst, err = p.config.ReferenceStore.Get(p.ref) - if err != nil { - return - } - imgID = image.IDFromDigest(dgst) - - imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) - if err != nil { - return - } - - tagsByImage[imgID] = []string{tagged.Tag()} - - return - } - - imagesSeen := make(map[digest.Digest]struct{}) - dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) - - associations := p.config.ReferenceStore.ReferencesByName(p.ref) - for _, association := range associations { - if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged { - // Ignore digest references. - continue - } - - imgID := image.IDFromDigest(association.ID) - tagsByImage[imgID] = append(tagsByImage[imgID], tagged.Tag()) - - if _, present := imagesSeen[association.ID]; present { - // Skip generating image list for already-seen image - continue - } - imagesSeen[association.ID] = struct{}{} - - imageListForThisTag, err := p.imageListForTag(imgID, dependenciesSeen, &referencedLayers) - if err != nil { - return nil, nil, nil, err - } - - // append to main image list - imageList = append(imageList, imageListForThisTag...) - } - if len(imageList) == 0 { - return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") - } - logrus.Debugf("Image list: %v", imageList) - logrus.Debugf("Tags by image: %v", tagsByImage) - - return -} - -func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]PushLayer) (imageListForThisTag []v1Image, err error) { - ics, ok := p.config.ImageStore.(*imageConfigStore) - if !ok { - return nil, fmt.Errorf("only image store images supported for v1 push") - } - img, err := ics.Store.Get(imgID) - if err != nil { - return nil, err - } - - topLayerID := img.RootFS.ChainID() - - if !system.IsOSSupported(img.OperatingSystem()) { - return nil, system.ErrNotSupportedOperatingSystem - } - pl, err := p.config.LayerStores[img.OperatingSystem()].Get(topLayerID) - *referencedLayers = append(*referencedLayers, pl) - if err != nil { - return nil, fmt.Errorf("failed to get top layer from image: %v", err) - } - - // V1 push is deprecated, only support existing layerstore layers - lsl, ok := pl.(*storeLayer) - if !ok { - return nil, fmt.Errorf("only layer store layers supported for v1 push") - } - l := lsl.Layer - - dependencyImages, parent := generateDependencyImages(l.Parent(), dependenciesSeen) - - topImage, err := newV1TopImage(imgID, img, l, parent) - if err != nil { - return nil, err - } - - imageListForThisTag = append(dependencyImages, topImage) - - return -} - -func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage) { - if l == nil { - return nil, nil - } - - imageListForThisTag, parent = generateDependencyImages(l.Parent(), dependenciesSeen) - - if dependenciesSeen != nil { - if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { - // This layer is already on the list, we can ignore it - // and all its parents. - return imageListForThisTag, dependencyImage - } - } - - dependencyImage := newV1DependencyImage(l, parent) - imageListForThisTag = append(imageListForThisTag, dependencyImage) - - if dependenciesSeen != nil { - dependenciesSeen[l.ChainID()] = dependencyImage - } - - return imageListForThisTag, dependencyImage -} - -// createImageIndex returns an index of an image's layer IDs and tags. -func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { - var imageIndex []*registry.ImgData - for _, img := range images { - v1ID := img.V1ID() - - if topImage, isTopImage := img.(*v1TopImage); isTopImage { - if tags, hasTags := tags[topImage.imageID]; hasTags { - // If an image has tags you must add an entry in the image index - // for each tag - for _, tag := range tags { - imageIndex = append(imageIndex, ®istry.ImgData{ - ID: v1ID, - Tag: tag, - }) - } - continue - } - } - - // If the image does not have a tag it still needs to be sent to the - // registry with an empty tag so that it is associated with the repository - imageIndex = append(imageIndex, ®istry.ImgData{ - ID: v1ID, - Tag: "", - }) - } - return imageIndex -} - -// lookupImageOnEndpoint checks the specified endpoint to see if an image exists -// and if it is absent then it sends the image id to the channel to be pushed. -func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { - defer wg.Done() - for image := range images { - v1ID := image.V1ID() - truncID := stringid.TruncateID(image.Layer().DiffID().String()) - if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { - logrus.Errorf("Error in LookupRemoteImage: %s", err) - imagesToPush <- v1ID - progress.Update(p.config.ProgressOutput, truncID, "Waiting") - } else { - progress.Update(p.config.ProgressOutput, truncID, "Already exists") - } - } -} - -func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { - workerCount := len(imageList) - // start a maximum of 5 workers to check if images exist on the specified endpoint. - if workerCount > 5 { - workerCount = 5 - } - var ( - wg = &sync.WaitGroup{} - imageData = make(chan v1Image, workerCount*2) - imagesToPush = make(chan string, workerCount*2) - pushes = make(chan map[string]struct{}, 1) - ) - for i := 0; i < workerCount; i++ { - wg.Add(1) - go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) - } - // start a go routine that consumes the images to push - go func() { - shouldPush := make(map[string]struct{}) - for id := range imagesToPush { - shouldPush[id] = struct{}{} - } - pushes <- shouldPush - }() - for _, v1Image := range imageList { - imageData <- v1Image - } - // close the channel to notify the workers that there will be no more images to check. - close(imageData) - wg.Wait() - close(imagesToPush) - // wait for all the images that require pushes to be collected into a consumable map. - shouldPush := <-pushes - // finish by pushing any images and tags to the endpoint. The order that the images are pushed - // is very important that is why we are still iterating over the ordered list of imageIDs. - for _, img := range imageList { - v1ID := img.V1ID() - if _, push := shouldPush[v1ID]; push { - if _, err := p.pushImage(ctx, img, endpoint); err != nil { - // FIXME: Continue on error? - return err - } - } - if topImage, isTopImage := img.(*v1TopImage); isTopImage { - for _, tag := range tags[topImage.imageID] { - progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+reference.Path(p.repoInfo.Name)+"/tags/"+tag) - if err := p.session.PushRegistryTag(p.repoInfo.Name, v1ID, tag, endpoint); err != nil { - return err - } - } - } - } - return nil -} - -// pushRepository pushes layers that do not already exist on the registry. -func (p *v1Pusher) pushRepository(ctx context.Context) error { - imgList, tags, referencedLayers, err := p.getImageList() - defer func() { - for _, l := range referencedLayers { - l.Release() - } - }() - if err != nil { - return err - } - - imageIndex := createImageIndex(imgList, tags) - for _, data := range imageIndex { - logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) - } - - // Register all the images in a repository with the registry - // If an image is not in this list it will not be associated with the repository - repoData, err := p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, false, nil) - if err != nil { - return err - } - // push the repository to each of the endpoints only if it does not exist. - for _, endpoint := range repoData.Endpoints { - if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil { - return err - } - } - _, err = p.session.PushImageJSONIndex(p.repoInfo.Name, imageIndex, true, repoData.Endpoints) - return err -} - -func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) { - l := v1Image.Layer() - v1ID := v1Image.V1ID() - truncID := stringid.TruncateID(l.DiffID().String()) - - jsonRaw := v1Image.Config() - progress.Update(p.config.ProgressOutput, truncID, "Pushing") - - // General rule is to use ID for graph accesses and compatibilityID for - // calls to session.registry() - imgData := ®istry.ImgData{ - ID: v1ID, - } - - // Send the json - if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { - if err == registry.ErrAlreadyExists { - progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping") - return "", nil - } - return "", err - } - - arch, err := l.TarStream() - if err != nil { - return "", err - } - defer arch.Close() - - // don't care if this fails; best effort - size, _ := l.DiffSize() - - // Send the layer - logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) - - reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing") - defer reader.Close() - - checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) - if err != nil { - return "", err - } - imgData.Checksum = checksum - imgData.ChecksumPayload = checksumPayload - // Send the checksum - if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { - return "", err - } - - if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil { - logrus.Warnf("Could not set v1 ID mapping: %v", err) - } - - progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed") - return imgData.Checksum, nil -} diff --git a/vendor/github.com/docker/docker/distribution/push_v2.go b/vendor/github.com/docker/docker/distribution/push_v2.go deleted file mode 100644 index 9dc3e7a2a..000000000 --- a/vendor/github.com/docker/docker/distribution/push_v2.go +++ /dev/null @@ -1,709 +0,0 @@ -package distribution // import "github.com/docker/docker/distribution" - -import ( - "context" - "errors" - "fmt" - "io" - "runtime" - "sort" - "strings" - "sync" - - "github.com/docker/distribution" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/client" - apitypes "github.com/docker/docker/api/types" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/registry" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -const ( - smallLayerMaximumSize = 100 * (1 << 10) // 100KB - middleLayerMaximumSize = 10 * (1 << 20) // 10MB -) - -type v2Pusher struct { - v2MetadataService metadata.V2MetadataService - ref reference.Named - endpoint registry.APIEndpoint - repoInfo *registry.RepositoryInfo - config *ImagePushConfig - repo distribution.Repository - - // pushState is state built by the Upload functions. - pushState pushState -} - -type pushState struct { - sync.Mutex - // remoteLayers is the set of layers known to exist on the remote side. - // This avoids redundant queries when pushing multiple tags that - // involve the same layers. It is also used to fill in digest and size - // information when building the manifest. - remoteLayers map[layer.DiffID]distribution.Descriptor - // confirmedV2 is set to true if we confirm we're talking to a v2 - // registry. This is used to limit fallbacks to the v1 protocol. - confirmedV2 bool - hasAuthInfo bool -} - -func (p *v2Pusher) Push(ctx context.Context) (err error) { - p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) - - p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") - p.pushState.hasAuthInfo = p.config.AuthConfig.RegistryToken != "" || (p.config.AuthConfig.Username != "" && p.config.AuthConfig.Password != "") - if err != nil { - logrus.Debugf("Error getting v2 registry: %v", err) - return err - } - - if err = p.pushV2Repository(ctx); err != nil { - if continueOnError(err, p.endpoint.Mirror) { - return fallbackError{ - err: err, - confirmedV2: p.pushState.confirmedV2, - transportOK: true, - } - } - } - return err -} - -func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { - if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { - imageID, err := p.config.ReferenceStore.Get(p.ref) - if err != nil { - return fmt.Errorf("tag does not exist: %s", reference.FamiliarString(p.ref)) - } - - return p.pushV2Tag(ctx, namedTagged, imageID) - } - - if !reference.IsNameOnly(p.ref) { - return errors.New("cannot push a digest reference") - } - - // Pull all tags - pushed := 0 - for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { - if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { - pushed++ - if err := p.pushV2Tag(ctx, namedTagged, association.ID); err != nil { - return err - } - } - } - - if pushed == 0 { - return fmt.Errorf("no tags to push for %s", reference.FamiliarName(p.repoInfo.Name)) - } - - return nil -} - -func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, id digest.Digest) error { - logrus.Debugf("Pushing repository: %s", reference.FamiliarString(ref)) - - imgConfig, err := p.config.ImageStore.Get(id) - if err != nil { - return fmt.Errorf("could not find image from tag %s: %v", reference.FamiliarString(ref), err) - } - - rootfs, err := p.config.ImageStore.RootFSFromConfig(imgConfig) - if err != nil { - return fmt.Errorf("unable to get rootfs for image %s: %s", reference.FamiliarString(ref), err) - } - - platform, err := p.config.ImageStore.PlatformFromConfig(imgConfig) - if err != nil { - return fmt.Errorf("unable to get platform for image %s: %s", reference.FamiliarString(ref), err) - } - - l, err := p.config.LayerStores[platform.OS].Get(rootfs.ChainID()) - if err != nil { - return fmt.Errorf("failed to get top layer from image: %v", err) - } - defer l.Release() - - hmacKey, err := metadata.ComputeV2MetadataHMACKey(p.config.AuthConfig) - if err != nil { - return fmt.Errorf("failed to compute hmac key of auth config: %v", err) - } - - var descriptors []xfer.UploadDescriptor - - descriptorTemplate := v2PushDescriptor{ - v2MetadataService: p.v2MetadataService, - hmacKey: hmacKey, - repoInfo: p.repoInfo.Name, - ref: p.ref, - endpoint: p.endpoint, - repo: p.repo, - pushState: &p.pushState, - } - - // Loop bounds condition is to avoid pushing the base layer on Windows. - for range rootfs.DiffIDs { - descriptor := descriptorTemplate - descriptor.layer = l - descriptor.checkedDigests = make(map[digest.Digest]struct{}) - descriptors = append(descriptors, &descriptor) - - l = l.Parent() - } - - if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { - return err - } - - // Try schema2 first - builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), p.config.ConfigMediaType, imgConfig) - manifest, err := manifestFromBuilder(ctx, builder, descriptors) - if err != nil { - return err - } - - manSvc, err := p.repo.Manifests(ctx) - if err != nil { - return err - } - - putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} - if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { - if runtime.GOOS == "windows" || p.config.TrustKey == nil || p.config.RequireSchema2 { - logrus.Warnf("failed to upload schema2 manifest: %v", err) - return err - } - - logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) - - manifestRef, err := reference.WithTag(p.repo.Named(), ref.Tag()) - if err != nil { - return err - } - builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, imgConfig) - manifest, err = manifestFromBuilder(ctx, builder, descriptors) - if err != nil { - return err - } - - if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { - return err - } - } - - var canonicalManifest []byte - - switch v := manifest.(type) { - case *schema1.SignedManifest: - canonicalManifest = v.Canonical - case *schema2.DeserializedManifest: - _, canonicalManifest, err = v.Payload() - if err != nil { - return err - } - } - - manifestDigest := digest.FromBytes(canonicalManifest) - progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) - - if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil { - return err - } - - // Signal digest to the trust client so it can sign the - // push, if appropriate. - progress.Aux(p.config.ProgressOutput, apitypes.PushResult{Tag: ref.Tag(), Digest: manifestDigest.String(), Size: len(canonicalManifest)}) - - return nil -} - -func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { - // descriptors is in reverse order; iterate backwards to get references - // appended in the right order. - for i := len(descriptors) - 1; i >= 0; i-- { - if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { - return nil, err - } - } - - return builder.Build(ctx) -} - -type v2PushDescriptor struct { - layer PushLayer - v2MetadataService metadata.V2MetadataService - hmacKey []byte - repoInfo reference.Named - ref reference.Named - endpoint registry.APIEndpoint - repo distribution.Repository - pushState *pushState - remoteDescriptor distribution.Descriptor - // a set of digests whose presence has been checked in a target repository - checkedDigests map[digest.Digest]struct{} -} - -func (pd *v2PushDescriptor) Key() string { - return "v2push:" + pd.ref.Name() + " " + pd.layer.DiffID().String() -} - -func (pd *v2PushDescriptor) ID() string { - return stringid.TruncateID(pd.layer.DiffID().String()) -} - -func (pd *v2PushDescriptor) DiffID() layer.DiffID { - return pd.layer.DiffID() -} - -func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { - // Skip foreign layers unless this registry allows nondistributable artifacts. - if !pd.endpoint.AllowNondistributableArtifacts { - if fs, ok := pd.layer.(distribution.Describable); ok { - if d := fs.Descriptor(); len(d.URLs) > 0 { - progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") - return d, nil - } - } - } - - diffID := pd.DiffID() - - pd.pushState.Lock() - if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { - // it is already known that the push is not needed and - // therefore doing a stat is unnecessary - pd.pushState.Unlock() - progress.Update(progressOutput, pd.ID(), "Layer already exists") - return descriptor, nil - } - pd.pushState.Unlock() - - maxMountAttempts, maxExistenceChecks, checkOtherRepositories := getMaxMountAndExistenceCheckAttempts(pd.layer) - - // Do we have any metadata associated with this layer's DiffID? - v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) - if err == nil { - // check for blob existence in the target repository - descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, true, 1, v2Metadata) - if exists || err != nil { - return descriptor, err - } - } - - // if digest was empty or not saved, or if blob does not exist on the remote repository, - // then push the blob. - bs := pd.repo.Blobs(ctx) - - var layerUpload distribution.BlobWriter - - // Attempt to find another repository in the same registry to mount the layer from to avoid an unnecessary upload - candidates := getRepositoryMountCandidates(pd.repoInfo, pd.hmacKey, maxMountAttempts, v2Metadata) - isUnauthorizedError := false - for _, mountCandidate := range candidates { - logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountCandidate.Digest, mountCandidate.SourceRepository) - createOpts := []distribution.BlobCreateOption{} - - if len(mountCandidate.SourceRepository) > 0 { - namedRef, err := reference.ParseNormalizedNamed(mountCandidate.SourceRepository) - if err != nil { - logrus.Errorf("failed to parse source repository reference %v: %v", reference.FamiliarString(namedRef), err) - pd.v2MetadataService.Remove(mountCandidate) - continue - } - - // Candidates are always under same domain, create remote reference - // with only path to set mount from with - remoteRef, err := reference.WithName(reference.Path(namedRef)) - if err != nil { - logrus.Errorf("failed to make remote reference out of %q: %v", reference.Path(namedRef), err) - continue - } - - canonicalRef, err := reference.WithDigest(reference.TrimNamed(remoteRef), mountCandidate.Digest) - if err != nil { - logrus.Errorf("failed to make canonical reference: %v", err) - continue - } - - createOpts = append(createOpts, client.WithMountFrom(canonicalRef)) - } - - // send the layer - lu, err := bs.Create(ctx, createOpts...) - switch err := err.(type) { - case nil: - // noop - case distribution.ErrBlobMounted: - progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) - - err.Descriptor.MediaType = schema2.MediaTypeLayer - - pd.pushState.Lock() - pd.pushState.confirmedV2 = true - pd.pushState.remoteLayers[diffID] = err.Descriptor - pd.pushState.Unlock() - - // Cache mapping from this layer's DiffID to the blobsum - if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ - Digest: err.Descriptor.Digest, - SourceRepository: pd.repoInfo.Name(), - }); err != nil { - return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} - } - return err.Descriptor, nil - case errcode.Errors: - for _, e := range err { - switch e := e.(type) { - case errcode.Error: - if e.Code == errcode.ErrorCodeUnauthorized { - // when unauthorized error that indicate user don't has right to push layer to register - logrus.Debugln("failed to push layer to registry because unauthorized error") - isUnauthorizedError = true - } - default: - } - } - default: - logrus.Infof("failed to mount layer %s (%s) from %s: %v", diffID, mountCandidate.Digest, mountCandidate.SourceRepository, err) - } - - // when error is unauthorizedError and user don't hasAuthInfo that's the case user don't has right to push layer to register - // and he hasn't login either, in this case candidate cache should be removed - if len(mountCandidate.SourceRepository) > 0 && - !(isUnauthorizedError && !pd.pushState.hasAuthInfo) && - (metadata.CheckV2MetadataHMAC(&mountCandidate, pd.hmacKey) || - len(mountCandidate.HMAC) == 0) { - cause := "blob mount failure" - if err != nil { - cause = fmt.Sprintf("an error: %v", err.Error()) - } - logrus.Debugf("removing association between layer %s and %s due to %s", mountCandidate.Digest, mountCandidate.SourceRepository, cause) - pd.v2MetadataService.Remove(mountCandidate) - } - - if lu != nil { - // cancel previous upload - cancelLayerUpload(ctx, mountCandidate.Digest, layerUpload) - layerUpload = lu - } - } - - if maxExistenceChecks-len(pd.checkedDigests) > 0 { - // do additional layer existence checks with other known digests if any - descriptor, exists, err := pd.layerAlreadyExists(ctx, progressOutput, diffID, checkOtherRepositories, maxExistenceChecks-len(pd.checkedDigests), v2Metadata) - if exists || err != nil { - return descriptor, err - } - } - - logrus.Debugf("Pushing layer: %s", diffID) - if layerUpload == nil { - layerUpload, err = bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, retryOnError(err) - } - } - defer layerUpload.Close() - // upload the blob - return pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload) -} - -func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { - pd.remoteDescriptor = descriptor -} - -func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { - return pd.remoteDescriptor -} - -func (pd *v2PushDescriptor) uploadUsingSession( - ctx context.Context, - progressOutput progress.Output, - diffID layer.DiffID, - layerUpload distribution.BlobWriter, -) (distribution.Descriptor, error) { - var reader io.ReadCloser - - contentReader, err := pd.layer.Open() - if err != nil { - return distribution.Descriptor{}, retryOnError(err) - } - - size, _ := pd.layer.Size() - - reader = progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, contentReader), progressOutput, size, pd.ID(), "Pushing") - - switch m := pd.layer.MediaType(); m { - case schema2.MediaTypeUncompressedLayer: - compressedReader, compressionDone := compress(reader) - defer func(closer io.Closer) { - closer.Close() - <-compressionDone - }(reader) - reader = compressedReader - case schema2.MediaTypeLayer: - default: - reader.Close() - return distribution.Descriptor{}, fmt.Errorf("unsupported layer media type %s", m) - } - - digester := digest.Canonical.Digester() - tee := io.TeeReader(reader, digester.Hash()) - - nn, err := layerUpload.ReadFrom(tee) - reader.Close() - if err != nil { - return distribution.Descriptor{}, retryOnError(err) - } - - pushDigest := digester.Digest() - if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { - return distribution.Descriptor{}, retryOnError(err) - } - - logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) - progress.Update(progressOutput, pd.ID(), "Pushed") - - // Cache mapping from this layer's DiffID to the blobsum - if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ - Digest: pushDigest, - SourceRepository: pd.repoInfo.Name(), - }); err != nil { - return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} - } - - desc := distribution.Descriptor{ - Digest: pushDigest, - MediaType: schema2.MediaTypeLayer, - Size: nn, - } - - pd.pushState.Lock() - // If Commit succeeded, that's an indication that the remote registry speaks the v2 protocol. - pd.pushState.confirmedV2 = true - pd.pushState.remoteLayers[diffID] = desc - pd.pushState.Unlock() - - return desc, nil -} - -// layerAlreadyExists checks if the registry already knows about any of the metadata passed in the "metadata" -// slice. If it finds one that the registry knows about, it returns the known digest and "true". If -// "checkOtherRepositories" is true, stat will be performed also with digests mapped to any other repository -// (not just the target one). -func (pd *v2PushDescriptor) layerAlreadyExists( - ctx context.Context, - progressOutput progress.Output, - diffID layer.DiffID, - checkOtherRepositories bool, - maxExistenceCheckAttempts int, - v2Metadata []metadata.V2Metadata, -) (desc distribution.Descriptor, exists bool, err error) { - // filter the metadata - candidates := []metadata.V2Metadata{} - for _, meta := range v2Metadata { - if len(meta.SourceRepository) > 0 && !checkOtherRepositories && meta.SourceRepository != pd.repoInfo.Name() { - continue - } - candidates = append(candidates, meta) - } - // sort the candidates by similarity - sortV2MetadataByLikenessAndAge(pd.repoInfo, pd.hmacKey, candidates) - - digestToMetadata := make(map[digest.Digest]*metadata.V2Metadata) - // an array of unique blob digests ordered from the best mount candidates to worst - layerDigests := []digest.Digest{} - for i := 0; i < len(candidates); i++ { - if len(layerDigests) >= maxExistenceCheckAttempts { - break - } - meta := &candidates[i] - if _, exists := digestToMetadata[meta.Digest]; exists { - // keep reference just to the first mapping (the best mount candidate) - continue - } - if _, exists := pd.checkedDigests[meta.Digest]; exists { - // existence of this digest has already been tested - continue - } - digestToMetadata[meta.Digest] = meta - layerDigests = append(layerDigests, meta.Digest) - } - -attempts: - for _, dgst := range layerDigests { - meta := digestToMetadata[dgst] - logrus.Debugf("Checking for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) - desc, err = pd.repo.Blobs(ctx).Stat(ctx, dgst) - pd.checkedDigests[meta.Digest] = struct{}{} - switch err { - case nil: - if m, ok := digestToMetadata[desc.Digest]; !ok || m.SourceRepository != pd.repoInfo.Name() || !metadata.CheckV2MetadataHMAC(m, pd.hmacKey) { - // cache mapping from this layer's DiffID to the blobsum - if err := pd.v2MetadataService.TagAndAdd(diffID, pd.hmacKey, metadata.V2Metadata{ - Digest: desc.Digest, - SourceRepository: pd.repoInfo.Name(), - }); err != nil { - return distribution.Descriptor{}, false, xfer.DoNotRetry{Err: err} - } - } - desc.MediaType = schema2.MediaTypeLayer - exists = true - break attempts - case distribution.ErrBlobUnknown: - if meta.SourceRepository == pd.repoInfo.Name() { - // remove the mapping to the target repository - pd.v2MetadataService.Remove(*meta) - } - default: - logrus.WithError(err).Debugf("Failed to check for presence of layer %s (%s) in %s", diffID, dgst, pd.repoInfo.Name()) - } - } - - if exists { - progress.Update(progressOutput, pd.ID(), "Layer already exists") - pd.pushState.Lock() - pd.pushState.remoteLayers[diffID] = desc - pd.pushState.Unlock() - } - - return desc, exists, nil -} - -// getMaxMountAndExistenceCheckAttempts returns a maximum number of cross repository mount attempts from -// source repositories of target registry, maximum number of layer existence checks performed on the target -// repository and whether the check shall be done also with digests mapped to different repositories. The -// decision is based on layer size. The smaller the layer, the fewer attempts shall be made because the cost -// of upload does not outweigh a latency. -func getMaxMountAndExistenceCheckAttempts(layer PushLayer) (maxMountAttempts, maxExistenceCheckAttempts int, checkOtherRepositories bool) { - size, err := layer.Size() - switch { - // big blob - case size > middleLayerMaximumSize: - // 1st attempt to mount the blob few times - // 2nd few existence checks with digests associated to any repository - // then fallback to upload - return 4, 3, true - - // middle sized blobs; if we could not get the size, assume we deal with middle sized blob - case size > smallLayerMaximumSize, err != nil: - // 1st attempt to mount blobs of average size few times - // 2nd try at most 1 existence check if there's an existing mapping to the target repository - // then fallback to upload - return 3, 1, false - - // small blobs, do a minimum number of checks - default: - return 1, 1, false - } -} - -// getRepositoryMountCandidates returns an array of v2 metadata items belonging to the given registry. The -// array is sorted from youngest to oldest. If requireRegistryMatch is true, the resulting array will contain -// only metadata entries having registry part of SourceRepository matching the part of repoInfo. -func getRepositoryMountCandidates( - repoInfo reference.Named, - hmacKey []byte, - max int, - v2Metadata []metadata.V2Metadata, -) []metadata.V2Metadata { - candidates := []metadata.V2Metadata{} - for _, meta := range v2Metadata { - sourceRepo, err := reference.ParseNamed(meta.SourceRepository) - if err != nil || reference.Domain(repoInfo) != reference.Domain(sourceRepo) { - continue - } - // target repository is not a viable candidate - if meta.SourceRepository == repoInfo.Name() { - continue - } - candidates = append(candidates, meta) - } - - sortV2MetadataByLikenessAndAge(repoInfo, hmacKey, candidates) - if max >= 0 && len(candidates) > max { - // select the youngest metadata - candidates = candidates[:max] - } - - return candidates -} - -// byLikeness is a sorting container for v2 metadata candidates for cross repository mount. The -// candidate "a" is preferred over "b": -// -// 1. if it was hashed using the same AuthConfig as the one used to authenticate to target repository and the -// "b" was not -// 2. if a number of its repository path components exactly matching path components of target repository is higher -type byLikeness struct { - arr []metadata.V2Metadata - hmacKey []byte - pathComponents []string -} - -func (bla byLikeness) Less(i, j int) bool { - aMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[i], bla.hmacKey) - bMacMatch := metadata.CheckV2MetadataHMAC(&bla.arr[j], bla.hmacKey) - if aMacMatch != bMacMatch { - return aMacMatch - } - aMatch := numOfMatchingPathComponents(bla.arr[i].SourceRepository, bla.pathComponents) - bMatch := numOfMatchingPathComponents(bla.arr[j].SourceRepository, bla.pathComponents) - return aMatch > bMatch -} -func (bla byLikeness) Swap(i, j int) { - bla.arr[i], bla.arr[j] = bla.arr[j], bla.arr[i] -} -func (bla byLikeness) Len() int { return len(bla.arr) } - -// nolint: interfacer -func sortV2MetadataByLikenessAndAge(repoInfo reference.Named, hmacKey []byte, marr []metadata.V2Metadata) { - // reverse the metadata array to shift the newest entries to the beginning - for i := 0; i < len(marr)/2; i++ { - marr[i], marr[len(marr)-i-1] = marr[len(marr)-i-1], marr[i] - } - // keep equal entries ordered from the youngest to the oldest - sort.Stable(byLikeness{ - arr: marr, - hmacKey: hmacKey, - pathComponents: getPathComponents(repoInfo.Name()), - }) -} - -// numOfMatchingPathComponents returns a number of path components in "pth" that exactly match "matchComponents". -func numOfMatchingPathComponents(pth string, matchComponents []string) int { - pthComponents := getPathComponents(pth) - i := 0 - for ; i < len(pthComponents) && i < len(matchComponents); i++ { - if matchComponents[i] != pthComponents[i] { - return i - } - } - return i -} - -func getPathComponents(path string) []string { - return strings.Split(path, "/") -} - -func cancelLayerUpload(ctx context.Context, dgst digest.Digest, layerUpload distribution.BlobWriter) { - if layerUpload != nil { - logrus.Debugf("cancelling upload of blob %s", dgst) - err := layerUpload.Cancel(ctx) - if err != nil { - logrus.Warnf("failed to cancel upload: %v", err) - } - } -} diff --git a/vendor/github.com/docker/docker/distribution/registry.go b/vendor/github.com/docker/docker/distribution/registry.go deleted file mode 100644 index 8b46aaad6..000000000 --- a/vendor/github.com/docker/docker/distribution/registry.go +++ /dev/null @@ -1,156 +0,0 @@ -package distribution // import "github.com/docker/docker/distribution" - -import ( - "context" - "fmt" - "net" - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/api/types" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/registry" - "github.com/docker/go-connections/sockets" -) - -// ImageTypes represents the schema2 config types for images -var ImageTypes = []string{ - schema2.MediaTypeImageConfig, - // Handle unexpected values from https://github.com/docker/distribution/issues/1621 - // (see also https://github.com/docker/docker/issues/22378, - // https://github.com/docker/docker/issues/30083) - "application/octet-stream", - "application/json", - "text/html", - // Treat defaulted values as images, newer types cannot be implied - "", -} - -// PluginTypes represents the schema2 config types for plugins -var PluginTypes = []string{ - schema2.MediaTypePluginConfig, -} - -var mediaTypeClasses map[string]string - -func init() { - // initialize media type classes with all know types for - // plugin - mediaTypeClasses = map[string]string{} - for _, t := range ImageTypes { - mediaTypeClasses[t] = "image" - } - for _, t := range PluginTypes { - mediaTypeClasses[t] = "plugin" - } -} - -// NewV2Repository returns a repository (v2 only). It creates an HTTP transport -// providing timeout settings and authentication support, and also verifies the -// remote API version. -func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { - repoName := repoInfo.Name.Name() - // If endpoint does not support CanonicalName, use the RemoteName instead - if endpoint.TrimHostname { - repoName = reference.Path(repoInfo.Name) - } - - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - - // TODO(dmcgowan): Call close idle connections when complete, use keep alive - base := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: direct.Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: endpoint.TLSConfig, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } - - proxyDialer, err := sockets.DialerFromEnvironment(direct) - if err == nil { - base.Dial = proxyDialer.Dial - } - - modifiers := registry.Headers(dockerversion.DockerUserAgent(ctx), metaHeaders) - authTransport := transport.NewTransport(base, modifiers...) - - challengeManager, foundVersion, err := registry.PingV2Registry(endpoint.URL, authTransport) - if err != nil { - transportOK := false - if responseErr, ok := err.(registry.PingResponseError); ok { - transportOK = true - err = responseErr.Err - } - return nil, foundVersion, fallbackError{ - err: err, - confirmedV2: foundVersion, - transportOK: transportOK, - } - } - - if authConfig.RegistryToken != "" { - passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) - } else { - scope := auth.RepositoryScope{ - Repository: repoName, - Actions: actions, - Class: repoInfo.Class, - } - - creds := registry.NewStaticCredentialStore(authConfig) - tokenHandlerOptions := auth.TokenHandlerOptions{ - Transport: authTransport, - Credentials: creds, - Scopes: []auth.Scope{scope}, - ClientID: registry.AuthClientID, - } - tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) - } - tr := transport.NewTransport(base, modifiers...) - - repoNameRef, err := reference.WithName(repoName) - if err != nil { - return nil, foundVersion, fallbackError{ - err: err, - confirmedV2: foundVersion, - transportOK: true, - } - } - - repo, err = client.NewRepository(repoNameRef, endpoint.URL.String(), tr) - if err != nil { - err = fallbackError{ - err: err, - confirmedV2: foundVersion, - transportOK: true, - } - } - return -} - -type existingTokenHandler struct { - token string -} - -func (th *existingTokenHandler) Scheme() string { - return "bearer" -} - -func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) - return nil -} diff --git a/vendor/github.com/docker/docker/distribution/utils/progress.go b/vendor/github.com/docker/docker/distribution/utils/progress.go deleted file mode 100644 index 73ee2be61..000000000 --- a/vendor/github.com/docker/docker/distribution/utils/progress.go +++ /dev/null @@ -1,44 +0,0 @@ -package utils // import "github.com/docker/docker/distribution/utils" - -import ( - "io" - "net" - "os" - "syscall" - - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/sirupsen/logrus" -) - -// WriteDistributionProgress is a helper for writing progress from chan to JSON -// stream with an optional cancel function. -func WriteDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { - progressOutput := streamformatter.NewJSONProgressOutput(outStream, false) - operationCancelled := false - - for prog := range progressChan { - if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { - // don't log broken pipe errors as this is the normal case when a client aborts - if isBrokenPipe(err) { - logrus.Info("Pull session cancelled") - } else { - logrus.Errorf("error writing progress to client: %v", err) - } - cancelFunc() - operationCancelled = true - // Don't return, because we need to continue draining - // progressChan until it's closed to avoid a deadlock. - } - } -} - -func isBrokenPipe(e error) bool { - if netErr, ok := e.(*net.OpError); ok { - e = netErr.Err - if sysErr, ok := netErr.Err.(*os.SyscallError); ok { - e = sysErr.Err - } - } - return e == syscall.EPIPE -} diff --git a/vendor/github.com/docker/docker/distribution/xfer/download.go b/vendor/github.com/docker/docker/distribution/xfer/download.go deleted file mode 100644 index e8cda9362..000000000 --- a/vendor/github.com/docker/docker/distribution/xfer/download.go +++ /dev/null @@ -1,474 +0,0 @@ -package xfer // import "github.com/docker/docker/distribution/xfer" - -import ( - "context" - "errors" - "fmt" - "io" - "runtime" - "time" - - "github.com/docker/distribution" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -const maxDownloadAttempts = 5 - -// LayerDownloadManager figures out which layers need to be downloaded, then -// registers and downloads those, taking into account dependencies between -// layers. -type LayerDownloadManager struct { - layerStores map[string]layer.Store - tm TransferManager - waitDuration time.Duration -} - -// SetConcurrency sets the max concurrent downloads for each pull -func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) { - ldm.tm.SetConcurrency(concurrency) -} - -// NewLayerDownloadManager returns a new LayerDownloadManager. -func NewLayerDownloadManager(layerStores map[string]layer.Store, concurrencyLimit int, options ...func(*LayerDownloadManager)) *LayerDownloadManager { - manager := LayerDownloadManager{ - layerStores: layerStores, - tm: NewTransferManager(concurrencyLimit), - waitDuration: time.Second, - } - for _, option := range options { - option(&manager) - } - return &manager -} - -type downloadTransfer struct { - Transfer - - layerStore layer.Store - layer layer.Layer - err error -} - -// result returns the layer resulting from the download, if the download -// and registration were successful. -func (d *downloadTransfer) result() (layer.Layer, error) { - return d.layer, d.err -} - -// A DownloadDescriptor references a layer that may need to be downloaded. -type DownloadDescriptor interface { - // Key returns the key used to deduplicate downloads. - Key() string - // ID returns the ID for display purposes. - ID() string - // DiffID should return the DiffID for this layer, or an error - // if it is unknown (for example, if it has not been downloaded - // before). - DiffID() (layer.DiffID, error) - // Download is called to perform the download. - Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) - // Close is called when the download manager is finished with this - // descriptor and will not call Download again or read from the reader - // that Download returned. - Close() -} - -// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an -// additional Registered method which gets called after a downloaded layer is -// registered. This allows the user of the download manager to know the DiffID -// of each registered layer. This method is called if a cast to -// DownloadDescriptorWithRegistered is successful. -type DownloadDescriptorWithRegistered interface { - DownloadDescriptor - Registered(diffID layer.DiffID) -} - -// Download is a blocking function which ensures the requested layers are -// present in the layer store. It uses the string returned by the Key method to -// deduplicate downloads. If a given layer is not already known to present in -// the layer store, and the key is not used by an in-progress download, the -// Download method is called to get the layer tar data. Layers are then -// registered in the appropriate order. The caller must call the returned -// release function once it is done with the returned RootFS object. -func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { - var ( - topLayer layer.Layer - topDownload *downloadTransfer - watcher *Watcher - missingLayer bool - transferKey = "" - downloadsByKey = make(map[string]*downloadTransfer) - ) - - // Assume that the operating system is the host OS if blank, and validate it - // to ensure we don't cause a panic by an invalid index into the layerstores. - if os == "" { - os = runtime.GOOS - } - if !system.IsOSSupported(os) { - return image.RootFS{}, nil, system.ErrNotSupportedOperatingSystem - } - - rootFS := initialRootFS - for _, descriptor := range layers { - key := descriptor.Key() - transferKey += key - - if !missingLayer { - missingLayer = true - diffID, err := descriptor.DiffID() - if err == nil { - getRootFS := rootFS - getRootFS.Append(diffID) - l, err := ldm.layerStores[os].Get(getRootFS.ChainID()) - if err == nil { - // Layer already exists. - logrus.Debugf("Layer already exists: %s", descriptor.ID()) - progress.Update(progressOutput, descriptor.ID(), "Already exists") - if topLayer != nil { - layer.ReleaseAndLog(ldm.layerStores[os], topLayer) - } - topLayer = l - missingLayer = false - rootFS.Append(diffID) - // Register this repository as a source of this layer. - withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) - if hasRegistered { // As layerstore may set the driver - withRegistered.Registered(diffID) - } - continue - } - } - } - - // Does this layer have the same data as a previous layer in - // the stack? If so, avoid downloading it more than once. - var topDownloadUncasted Transfer - if existingDownload, ok := downloadsByKey[key]; ok { - xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload, os) - defer topDownload.Transfer.Release(watcher) - topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) - topDownload = topDownloadUncasted.(*downloadTransfer) - continue - } - - // Layer is not known to exist - download and register it. - progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") - - var xferFunc DoFunc - if topDownload != nil { - xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload, os) - defer topDownload.Transfer.Release(watcher) - } else { - xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil, os) - } - topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) - topDownload = topDownloadUncasted.(*downloadTransfer) - downloadsByKey[key] = topDownload - } - - if topDownload == nil { - return rootFS, func() { - if topLayer != nil { - layer.ReleaseAndLog(ldm.layerStores[os], topLayer) - } - }, nil - } - - // Won't be using the list built up so far - will generate it - // from downloaded layers instead. - rootFS.DiffIDs = []layer.DiffID{} - - defer func() { - if topLayer != nil { - layer.ReleaseAndLog(ldm.layerStores[os], topLayer) - } - }() - - select { - case <-ctx.Done(): - topDownload.Transfer.Release(watcher) - return rootFS, func() {}, ctx.Err() - case <-topDownload.Done(): - break - } - - l, err := topDownload.result() - if err != nil { - topDownload.Transfer.Release(watcher) - return rootFS, func() {}, err - } - - // Must do this exactly len(layers) times, so we don't include the - // base layer on Windows. - for range layers { - if l == nil { - topDownload.Transfer.Release(watcher) - return rootFS, func() {}, errors.New("internal error: too few parent layers") - } - rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) - l = l.Parent() - } - return rootFS, func() { topDownload.Transfer.Release(watcher) }, err -} - -// makeDownloadFunc returns a function that performs the layer download and -// registration. If parentDownload is non-nil, it waits for that download to -// complete before the registration step, and registers the downloaded data -// on top of parentDownload's resulting layer. Otherwise, it registers the -// layer on top of the ChainID given by parentLayer. -func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer, os string) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - d := &downloadTransfer{ - Transfer: NewTransfer(), - layerStore: ldm.layerStores[os], - } - - go func() { - defer func() { - close(progressChan) - }() - - progressOutput := progress.ChanOutput(progressChan) - - select { - case <-start: - default: - progress.Update(progressOutput, descriptor.ID(), "Waiting") - <-start - } - - if parentDownload != nil { - // Did the parent download already fail or get - // cancelled? - select { - case <-parentDownload.Done(): - _, err := parentDownload.result() - if err != nil { - d.err = err - return - } - default: - } - } - - var ( - downloadReader io.ReadCloser - size int64 - err error - retries int - ) - - defer descriptor.Close() - - for { - downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) - if err == nil { - break - } - - // If an error was returned because the context - // was cancelled, we shouldn't retry. - select { - case <-d.Transfer.Context().Done(): - d.err = err - return - default: - } - - retries++ - if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { - logrus.Errorf("Download failed: %v", err) - d.err = err - return - } - - logrus.Errorf("Download failed, retrying: %v", err) - delay := retries * 5 - ticker := time.NewTicker(ldm.waitDuration) - - selectLoop: - for { - progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) - select { - case <-ticker.C: - delay-- - if delay == 0 { - ticker.Stop() - break selectLoop - } - case <-d.Transfer.Context().Done(): - ticker.Stop() - d.err = errors.New("download cancelled during retry delay") - return - } - - } - } - - close(inactive) - - if parentDownload != nil { - select { - case <-d.Transfer.Context().Done(): - d.err = errors.New("layer registration cancelled") - downloadReader.Close() - return - case <-parentDownload.Done(): - } - - l, err := parentDownload.result() - if err != nil { - d.err = err - downloadReader.Close() - return - } - parentLayer = l.ChainID() - } - - reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") - defer reader.Close() - - inflatedLayerData, err := archive.DecompressStream(reader) - if err != nil { - d.err = fmt.Errorf("could not get decompression stream: %v", err) - return - } - - var src distribution.Descriptor - if fs, ok := descriptor.(distribution.Describable); ok { - src = fs.Descriptor() - } - if ds, ok := d.layerStore.(layer.DescribableStore); ok { - d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, src) - } else { - d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) - } - if err != nil { - select { - case <-d.Transfer.Context().Done(): - d.err = errors.New("layer registration cancelled") - default: - d.err = fmt.Errorf("failed to register layer: %v", err) - } - return - } - - progress.Update(progressOutput, descriptor.ID(), "Pull complete") - withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) - if hasRegistered { - withRegistered.Registered(d.layer.DiffID()) - } - - // Doesn't actually need to be its own goroutine, but - // done like this so we can defer close(c). - go func() { - <-d.Transfer.Released() - if d.layer != nil { - layer.ReleaseAndLog(d.layerStore, d.layer) - } - }() - }() - - return d - } -} - -// makeDownloadFuncFromDownload returns a function that performs the layer -// registration when the layer data is coming from an existing download. It -// waits for sourceDownload and parentDownload to complete, and then -// reregisters the data from sourceDownload's top layer on top of -// parentDownload. This function does not log progress output because it would -// interfere with the progress reporting for sourceDownload, which has the same -// Key. -func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer, os string) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - d := &downloadTransfer{ - Transfer: NewTransfer(), - layerStore: ldm.layerStores[os], - } - - go func() { - defer func() { - close(progressChan) - }() - - <-start - - close(inactive) - - select { - case <-d.Transfer.Context().Done(): - d.err = errors.New("layer registration cancelled") - return - case <-parentDownload.Done(): - } - - l, err := parentDownload.result() - if err != nil { - d.err = err - return - } - parentLayer := l.ChainID() - - // sourceDownload should have already finished if - // parentDownload finished, but wait for it explicitly - // to be sure. - select { - case <-d.Transfer.Context().Done(): - d.err = errors.New("layer registration cancelled") - return - case <-sourceDownload.Done(): - } - - l, err = sourceDownload.result() - if err != nil { - d.err = err - return - } - - layerReader, err := l.TarStream() - if err != nil { - d.err = err - return - } - defer layerReader.Close() - - var src distribution.Descriptor - if fs, ok := l.(distribution.Describable); ok { - src = fs.Descriptor() - } - if ds, ok := d.layerStore.(layer.DescribableStore); ok { - d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, src) - } else { - d.layer, err = d.layerStore.Register(layerReader, parentLayer) - } - if err != nil { - d.err = fmt.Errorf("failed to register layer: %v", err) - return - } - - withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) - if hasRegistered { - withRegistered.Registered(d.layer.DiffID()) - } - - // Doesn't actually need to be its own goroutine, but - // done like this so we can defer close(c). - go func() { - <-d.Transfer.Released() - if d.layer != nil { - layer.ReleaseAndLog(d.layerStore, d.layer) - } - }() - }() - - return d - } -} diff --git a/vendor/github.com/docker/docker/distribution/xfer/transfer.go b/vendor/github.com/docker/docker/distribution/xfer/transfer.go deleted file mode 100644 index c356fde8d..000000000 --- a/vendor/github.com/docker/docker/distribution/xfer/transfer.go +++ /dev/null @@ -1,401 +0,0 @@ -package xfer // import "github.com/docker/docker/distribution/xfer" - -import ( - "context" - "runtime" - "sync" - - "github.com/docker/docker/pkg/progress" -) - -// DoNotRetry is an error wrapper indicating that the error cannot be resolved -// with a retry. -type DoNotRetry struct { - Err error -} - -// Error returns the stringified representation of the encapsulated error. -func (e DoNotRetry) Error() string { - return e.Err.Error() -} - -// Watcher is returned by Watch and can be passed to Release to stop watching. -type Watcher struct { - // signalChan is used to signal to the watcher goroutine that - // new progress information is available, or that the transfer - // has finished. - signalChan chan struct{} - // releaseChan signals to the watcher goroutine that the watcher - // should be detached. - releaseChan chan struct{} - // running remains open as long as the watcher is watching the - // transfer. It gets closed if the transfer finishes or the - // watcher is detached. - running chan struct{} -} - -// Transfer represents an in-progress transfer. -type Transfer interface { - Watch(progressOutput progress.Output) *Watcher - Release(*Watcher) - Context() context.Context - Close() - Done() <-chan struct{} - Released() <-chan struct{} - Broadcast(masterProgressChan <-chan progress.Progress) -} - -type transfer struct { - mu sync.Mutex - - ctx context.Context - cancel context.CancelFunc - - // watchers keeps track of the goroutines monitoring progress output, - // indexed by the channels that release them. - watchers map[chan struct{}]*Watcher - - // lastProgress is the most recently received progress event. - lastProgress progress.Progress - // hasLastProgress is true when lastProgress has been set. - hasLastProgress bool - - // running remains open as long as the transfer is in progress. - running chan struct{} - // released stays open until all watchers release the transfer and - // the transfer is no longer tracked by the transfer manager. - released chan struct{} - - // broadcastDone is true if the master progress channel has closed. - broadcastDone bool - // closed is true if Close has been called - closed bool - // broadcastSyncChan allows watchers to "ping" the broadcasting - // goroutine to wait for it for deplete its input channel. This ensures - // a detaching watcher won't miss an event that was sent before it - // started detaching. - broadcastSyncChan chan struct{} -} - -// NewTransfer creates a new transfer. -func NewTransfer() Transfer { - t := &transfer{ - watchers: make(map[chan struct{}]*Watcher), - running: make(chan struct{}), - released: make(chan struct{}), - broadcastSyncChan: make(chan struct{}), - } - - // This uses context.Background instead of a caller-supplied context - // so that a transfer won't be cancelled automatically if the client - // which requested it is ^C'd (there could be other viewers). - t.ctx, t.cancel = context.WithCancel(context.Background()) - - return t -} - -// Broadcast copies the progress and error output to all viewers. -func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { - for { - var ( - p progress.Progress - ok bool - ) - select { - case p, ok = <-masterProgressChan: - default: - // We've depleted the channel, so now we can handle - // reads on broadcastSyncChan to let detaching watchers - // know we're caught up. - select { - case <-t.broadcastSyncChan: - continue - case p, ok = <-masterProgressChan: - } - } - - t.mu.Lock() - if ok { - t.lastProgress = p - t.hasLastProgress = true - for _, w := range t.watchers { - select { - case w.signalChan <- struct{}{}: - default: - } - } - } else { - t.broadcastDone = true - } - t.mu.Unlock() - if !ok { - close(t.running) - return - } - } -} - -// Watch adds a watcher to the transfer. The supplied channel gets progress -// updates and is closed when the transfer finishes. -func (t *transfer) Watch(progressOutput progress.Output) *Watcher { - t.mu.Lock() - defer t.mu.Unlock() - - w := &Watcher{ - releaseChan: make(chan struct{}), - signalChan: make(chan struct{}), - running: make(chan struct{}), - } - - t.watchers[w.releaseChan] = w - - if t.broadcastDone { - close(w.running) - return w - } - - go func() { - defer func() { - close(w.running) - }() - var ( - done bool - lastWritten progress.Progress - hasLastWritten bool - ) - for { - t.mu.Lock() - hasLastProgress := t.hasLastProgress - lastProgress := t.lastProgress - t.mu.Unlock() - - // Make sure we don't write the last progress item - // twice. - if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { - progressOutput.WriteProgress(lastProgress) - lastWritten = lastProgress - hasLastWritten = true - } - - if done { - return - } - - select { - case <-w.signalChan: - case <-w.releaseChan: - done = true - // Since the watcher is going to detach, make - // sure the broadcaster is caught up so we - // don't miss anything. - select { - case t.broadcastSyncChan <- struct{}{}: - case <-t.running: - } - case <-t.running: - done = true - } - } - }() - - return w -} - -// Release is the inverse of Watch; indicating that the watcher no longer wants -// to be notified about the progress of the transfer. All calls to Watch must -// be paired with later calls to Release so that the lifecycle of the transfer -// is properly managed. -func (t *transfer) Release(watcher *Watcher) { - t.mu.Lock() - delete(t.watchers, watcher.releaseChan) - - if len(t.watchers) == 0 { - if t.closed { - // released may have been closed already if all - // watchers were released, then another one was added - // while waiting for a previous watcher goroutine to - // finish. - select { - case <-t.released: - default: - close(t.released) - } - } else { - t.cancel() - } - } - t.mu.Unlock() - - close(watcher.releaseChan) - // Block until the watcher goroutine completes - <-watcher.running -} - -// Done returns a channel which is closed if the transfer completes or is -// cancelled. Note that having 0 watchers causes a transfer to be cancelled. -func (t *transfer) Done() <-chan struct{} { - // Note that this doesn't return t.ctx.Done() because that channel will - // be closed the moment Cancel is called, and we need to return a - // channel that blocks until a cancellation is actually acknowledged by - // the transfer function. - return t.running -} - -// Released returns a channel which is closed once all watchers release the -// transfer AND the transfer is no longer tracked by the transfer manager. -func (t *transfer) Released() <-chan struct{} { - return t.released -} - -// Context returns the context associated with the transfer. -func (t *transfer) Context() context.Context { - return t.ctx -} - -// Close is called by the transfer manager when the transfer is no longer -// being tracked. -func (t *transfer) Close() { - t.mu.Lock() - t.closed = true - if len(t.watchers) == 0 { - close(t.released) - } - t.mu.Unlock() -} - -// DoFunc is a function called by the transfer manager to actually perform -// a transfer. It should be non-blocking. It should wait until the start channel -// is closed before transferring any data. If the function closes inactive, that -// signals to the transfer manager that the job is no longer actively moving -// data - for example, it may be waiting for a dependent transfer to finish. -// This prevents it from taking up a slot. -type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer - -// TransferManager is used by LayerDownloadManager and LayerUploadManager to -// schedule and deduplicate transfers. It is up to the TransferManager -// implementation to make the scheduling and concurrency decisions. -type TransferManager interface { - // Transfer checks if a transfer with the given key is in progress. If - // so, it returns progress and error output from that transfer. - // Otherwise, it will call xferFunc to initiate the transfer. - Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) - // SetConcurrency set the concurrencyLimit so that it is adjustable daemon reload - SetConcurrency(concurrency int) -} - -type transferManager struct { - mu sync.Mutex - - concurrencyLimit int - activeTransfers int - transfers map[string]Transfer - waitingTransfers []chan struct{} -} - -// NewTransferManager returns a new TransferManager. -func NewTransferManager(concurrencyLimit int) TransferManager { - return &transferManager{ - concurrencyLimit: concurrencyLimit, - transfers: make(map[string]Transfer), - } -} - -// SetConcurrency sets the concurrencyLimit -func (tm *transferManager) SetConcurrency(concurrency int) { - tm.mu.Lock() - tm.concurrencyLimit = concurrency - tm.mu.Unlock() -} - -// Transfer checks if a transfer matching the given key is in progress. If not, -// it starts one by calling xferFunc. The caller supplies a channel which -// receives progress output from the transfer. -func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { - tm.mu.Lock() - defer tm.mu.Unlock() - - for { - xfer, present := tm.transfers[key] - if !present { - break - } - // Transfer is already in progress. - watcher := xfer.Watch(progressOutput) - - select { - case <-xfer.Context().Done(): - // We don't want to watch a transfer that has been cancelled. - // Wait for it to be removed from the map and try again. - xfer.Release(watcher) - tm.mu.Unlock() - // The goroutine that removes this transfer from the - // map is also waiting for xfer.Done(), so yield to it. - // This could be avoided by adding a Closed method - // to Transfer to allow explicitly waiting for it to be - // removed the map, but forcing a scheduling round in - // this very rare case seems better than bloating the - // interface definition. - runtime.Gosched() - <-xfer.Done() - tm.mu.Lock() - default: - return xfer, watcher - } - } - - start := make(chan struct{}) - inactive := make(chan struct{}) - - if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit { - close(start) - tm.activeTransfers++ - } else { - tm.waitingTransfers = append(tm.waitingTransfers, start) - } - - masterProgressChan := make(chan progress.Progress) - xfer := xferFunc(masterProgressChan, start, inactive) - watcher := xfer.Watch(progressOutput) - go xfer.Broadcast(masterProgressChan) - tm.transfers[key] = xfer - - // When the transfer is finished, remove from the map. - go func() { - for { - select { - case <-inactive: - tm.mu.Lock() - tm.inactivate(start) - tm.mu.Unlock() - inactive = nil - case <-xfer.Done(): - tm.mu.Lock() - if inactive != nil { - tm.inactivate(start) - } - delete(tm.transfers, key) - tm.mu.Unlock() - xfer.Close() - return - } - } - }() - - return xfer, watcher -} - -func (tm *transferManager) inactivate(start chan struct{}) { - // If the transfer was started, remove it from the activeTransfers - // count. - select { - case <-start: - // Start next transfer if any are waiting - if len(tm.waitingTransfers) != 0 { - close(tm.waitingTransfers[0]) - tm.waitingTransfers = tm.waitingTransfers[1:] - } else { - tm.activeTransfers-- - } - default: - } -} diff --git a/vendor/github.com/docker/docker/distribution/xfer/upload.go b/vendor/github.com/docker/docker/distribution/xfer/upload.go deleted file mode 100644 index 33b45ad74..000000000 --- a/vendor/github.com/docker/docker/distribution/xfer/upload.go +++ /dev/null @@ -1,174 +0,0 @@ -package xfer // import "github.com/docker/docker/distribution/xfer" - -import ( - "context" - "errors" - "time" - - "github.com/docker/distribution" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/progress" - "github.com/sirupsen/logrus" -) - -const maxUploadAttempts = 5 - -// LayerUploadManager provides task management and progress reporting for -// uploads. -type LayerUploadManager struct { - tm TransferManager - waitDuration time.Duration -} - -// SetConcurrency sets the max concurrent uploads for each push -func (lum *LayerUploadManager) SetConcurrency(concurrency int) { - lum.tm.SetConcurrency(concurrency) -} - -// NewLayerUploadManager returns a new LayerUploadManager. -func NewLayerUploadManager(concurrencyLimit int, options ...func(*LayerUploadManager)) *LayerUploadManager { - manager := LayerUploadManager{ - tm: NewTransferManager(concurrencyLimit), - waitDuration: time.Second, - } - for _, option := range options { - option(&manager) - } - return &manager -} - -type uploadTransfer struct { - Transfer - - remoteDescriptor distribution.Descriptor - err error -} - -// An UploadDescriptor references a layer that may need to be uploaded. -type UploadDescriptor interface { - // Key returns the key used to deduplicate uploads. - Key() string - // ID returns the ID for display purposes. - ID() string - // DiffID should return the DiffID for this layer. - DiffID() layer.DiffID - // Upload is called to perform the Upload. - Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) - // SetRemoteDescriptor provides the distribution.Descriptor that was - // returned by Upload. This descriptor is not to be confused with - // the UploadDescriptor interface, which is used for internally - // identifying layers that are being uploaded. - SetRemoteDescriptor(descriptor distribution.Descriptor) -} - -// Upload is a blocking function which ensures the listed layers are present on -// the remote registry. It uses the string returned by the Key method to -// deduplicate uploads. -func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { - var ( - uploads []*uploadTransfer - dedupDescriptors = make(map[string]*uploadTransfer) - ) - - for _, descriptor := range layers { - progress.Update(progressOutput, descriptor.ID(), "Preparing") - - key := descriptor.Key() - if _, present := dedupDescriptors[key]; present { - continue - } - - xferFunc := lum.makeUploadFunc(descriptor) - upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) - defer upload.Release(watcher) - uploads = append(uploads, upload.(*uploadTransfer)) - dedupDescriptors[key] = upload.(*uploadTransfer) - } - - for _, upload := range uploads { - select { - case <-ctx.Done(): - return ctx.Err() - case <-upload.Transfer.Done(): - if upload.err != nil { - return upload.err - } - } - } - for _, l := range layers { - l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor) - } - - return nil -} - -func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - u := &uploadTransfer{ - Transfer: NewTransfer(), - } - - go func() { - defer func() { - close(progressChan) - }() - - progressOutput := progress.ChanOutput(progressChan) - - select { - case <-start: - default: - progress.Update(progressOutput, descriptor.ID(), "Waiting") - <-start - } - - retries := 0 - for { - remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput) - if err == nil { - u.remoteDescriptor = remoteDescriptor - break - } - - // If an error was returned because the context - // was cancelled, we shouldn't retry. - select { - case <-u.Transfer.Context().Done(): - u.err = err - return - default: - } - - retries++ - if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { - logrus.Errorf("Upload failed: %v", err) - u.err = err - return - } - - logrus.Errorf("Upload failed, retrying: %v", err) - delay := retries * 5 - ticker := time.NewTicker(lum.waitDuration) - - selectLoop: - for { - progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) - select { - case <-ticker.C: - delay-- - if delay == 0 { - ticker.Stop() - break selectLoop - } - case <-u.Transfer.Context().Done(): - ticker.Stop() - u.err = errors.New("upload cancelled during retry delay") - return - } - } - } - }() - - return u - } -} diff --git a/vendor/github.com/docker/docker/dockerversion/useragent.go b/vendor/github.com/docker/docker/dockerversion/useragent.go deleted file mode 100644 index 2eceb6fa9..000000000 --- a/vendor/github.com/docker/docker/dockerversion/useragent.go +++ /dev/null @@ -1,76 +0,0 @@ -package dockerversion // import "github.com/docker/docker/dockerversion" - -import ( - "context" - "fmt" - "runtime" - - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/useragent" -) - -// UAStringKey is used as key type for user-agent string in net/context struct -const UAStringKey = "upstream-user-agent" - -// DockerUserAgent is the User-Agent the Docker client uses to identify itself. -// In accordance with RFC 7231 (5.5.3) is of the form: -// [docker client's UA] UpstreamClient([upstream client's UA]) -func DockerUserAgent(ctx context.Context) string { - httpVersion := make([]useragent.VersionInfo, 0, 6) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) - if kernelVersion, err := kernel.GetKernelVersion(); err == nil { - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) - } - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) - - dockerUA := useragent.AppendVersions("", httpVersion...) - upstreamUA := getUserAgentFromContext(ctx) - if len(upstreamUA) > 0 { - ret := insertUpstreamUserAgent(upstreamUA, dockerUA) - return ret - } - return dockerUA -} - -// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists -func getUserAgentFromContext(ctx context.Context) string { - var upstreamUA string - if ctx != nil { - var ki interface{} = ctx.Value(UAStringKey) - if ki != nil { - upstreamUA = ctx.Value(UAStringKey).(string) - } - } - return upstreamUA -} - -// escapeStr returns s with every rune in charsToEscape escaped by a backslash -func escapeStr(s string, charsToEscape string) string { - var ret string - for _, currRune := range s { - appended := false - for _, escapableRune := range charsToEscape { - if currRune == escapableRune { - ret += `\` + string(currRune) - appended = true - break - } - } - if !appended { - ret += string(currRune) - } - } - return ret -} - -// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent -// string of the form: -// $dockerUA UpstreamClient($upstreamUA) -func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { - charsToEscape := `();\` - upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) - return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) -} diff --git a/vendor/github.com/docker/docker/dockerversion/version_lib.go b/vendor/github.com/docker/docker/dockerversion/version_lib.go deleted file mode 100644 index 0897c0728..000000000 --- a/vendor/github.com/docker/docker/dockerversion/version_lib.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !autogen - -// Package dockerversion is auto-generated at build-time -package dockerversion // import "github.com/docker/docker/dockerversion" - -// Default build-time variable for library-import. -// This file is overridden on build with build-time informations. -const ( - GitCommit = "library-import" - Version = "library-import" - BuildTime = "library-import" - IAmStatic = "library-import" - ContainerdCommitID = "library-import" - RuncCommitID = "library-import" - InitCommitID = "library-import" - PlatformName = "" -) diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go deleted file mode 100644 index e6a2275b2..000000000 --- a/vendor/github.com/docker/docker/errdefs/defs.go +++ /dev/null @@ -1,74 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -// ErrNotFound signals that the requested object doesn't exist -type ErrNotFound interface { - NotFound() -} - -// ErrInvalidParameter signals that the user input is invalid -type ErrInvalidParameter interface { - InvalidParameter() -} - -// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. -// A change in state should be able to clear this error. -type ErrConflict interface { - Conflict() -} - -// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action -type ErrUnauthorized interface { - Unauthorized() -} - -// ErrUnavailable signals that the requested action/subsystem is not available. -type ErrUnavailable interface { - Unavailable() -} - -// ErrForbidden signals that the requested action cannot be performed under any circumstances. -// When a ErrForbidden is returned, the caller should never retry the action. -type ErrForbidden interface { - Forbidden() -} - -// ErrSystem signals that some internal error occurred. -// An example of this would be a failed mount request. -type ErrSystem interface { - System() -} - -// ErrNotModified signals that an action can't be performed because it's already in the desired state -type ErrNotModified interface { - NotModified() -} - -// ErrAlreadyExists is a special case of ErrConflict which signals that the desired object already exists -type ErrAlreadyExists interface { - AlreadyExists() -} - -// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. -type ErrNotImplemented interface { - NotImplemented() -} - -// ErrUnknown signals that the kind of error that occurred is not known. -type ErrUnknown interface { - Unknown() -} - -// ErrCancelled signals that the action was cancelled. -type ErrCancelled interface { - Cancelled() -} - -// ErrDeadline signals that the deadline was reached before the action completed. -type ErrDeadline interface { - DeadlineExceeded() -} - -// ErrDataLoss indicates that data was lost or there is data corruption. -type ErrDataLoss interface { - DataLoss() -} diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go deleted file mode 100644 index c211f174f..000000000 --- a/vendor/github.com/docker/docker/errdefs/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. -// Errors that cross the package boundary should implement one (and only one) of these interfaces. -// -// Packages should not reference these interfaces directly, only implement them. -// To check if a particular error implements one of these interfaces, there are helper -// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. -// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). -package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go deleted file mode 100644 index 6169c2bc6..000000000 --- a/vendor/github.com/docker/docker/errdefs/helpers.go +++ /dev/null @@ -1,240 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -import "context" - -type errNotFound struct{ error } - -func (errNotFound) NotFound() {} - -func (e errNotFound) Cause() error { - return e.error -} - -// NotFound is a helper to create an error of the class with the same name from any error type -func NotFound(err error) error { - if err == nil { - return nil - } - return errNotFound{err} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { - return e.error -} - -// InvalidParameter is a helper to create an error of the class with the same name from any error type -func InvalidParameter(err error) error { - if err == nil { - return nil - } - return errInvalidParameter{err} -} - -type errConflict struct{ error } - -func (errConflict) Conflict() {} - -func (e errConflict) Cause() error { - return e.error -} - -// Conflict is a helper to create an error of the class with the same name from any error type -func Conflict(err error) error { - if err == nil { - return nil - } - return errConflict{err} -} - -type errUnauthorized struct{ error } - -func (errUnauthorized) Unauthorized() {} - -func (e errUnauthorized) Cause() error { - return e.error -} - -// Unauthorized is a helper to create an error of the class with the same name from any error type -func Unauthorized(err error) error { - if err == nil { - return nil - } - return errUnauthorized{err} -} - -type errUnavailable struct{ error } - -func (errUnavailable) Unavailable() {} - -func (e errUnavailable) Cause() error { - return e.error -} - -// Unavailable is a helper to create an error of the class with the same name from any error type -func Unavailable(err error) error { - return errUnavailable{err} -} - -type errForbidden struct{ error } - -func (errForbidden) Forbidden() {} - -func (e errForbidden) Cause() error { - return e.error -} - -// Forbidden is a helper to create an error of the class with the same name from any error type -func Forbidden(err error) error { - if err == nil { - return nil - } - return errForbidden{err} -} - -type errSystem struct{ error } - -func (errSystem) System() {} - -func (e errSystem) Cause() error { - return e.error -} - -// System is a helper to create an error of the class with the same name from any error type -func System(err error) error { - if err == nil { - return nil - } - return errSystem{err} -} - -type errNotModified struct{ error } - -func (errNotModified) NotModified() {} - -func (e errNotModified) Cause() error { - return e.error -} - -// NotModified is a helper to create an error of the class with the same name from any error type -func NotModified(err error) error { - if err == nil { - return nil - } - return errNotModified{err} -} - -type errAlreadyExists struct{ error } - -func (errAlreadyExists) AlreadyExists() {} - -func (e errAlreadyExists) Cause() error { - return e.error -} - -// AlreadyExists is a helper to create an error of the class with the same name from any error type -func AlreadyExists(err error) error { - if err == nil { - return nil - } - return errAlreadyExists{err} -} - -type errNotImplemented struct{ error } - -func (errNotImplemented) NotImplemented() {} - -func (e errNotImplemented) Cause() error { - return e.error -} - -// NotImplemented is a helper to create an error of the class with the same name from any error type -func NotImplemented(err error) error { - if err == nil { - return nil - } - return errNotImplemented{err} -} - -type errUnknown struct{ error } - -func (errUnknown) Unknown() {} - -func (e errUnknown) Cause() error { - return e.error -} - -// Unknown is a helper to create an error of the class with the same name from any error type -func Unknown(err error) error { - if err == nil { - return nil - } - return errUnknown{err} -} - -type errCancelled struct{ error } - -func (errCancelled) Cancelled() {} - -func (e errCancelled) Cause() error { - return e.error -} - -// Cancelled is a helper to create an error of the class with the same name from any error type -func Cancelled(err error) error { - if err == nil { - return nil - } - return errCancelled{err} -} - -type errDeadline struct{ error } - -func (errDeadline) DeadlineExceeded() {} - -func (e errDeadline) Cause() error { - return e.error -} - -// Deadline is a helper to create an error of the class with the same name from any error type -func Deadline(err error) error { - if err == nil { - return nil - } - return errDeadline{err} -} - -type errDataLoss struct{ error } - -func (errDataLoss) DataLoss() {} - -func (e errDataLoss) Cause() error { - return e.error -} - -// DataLoss is a helper to create an error of the class with the same name from any error type -func DataLoss(err error) error { - if err == nil { - return nil - } - return errDataLoss{err} -} - -// FromContext returns the error class from the passed in context -func FromContext(ctx context.Context) error { - e := ctx.Err() - if e == nil { - return nil - } - - if e == context.Canceled { - return Cancelled(e) - } - if e == context.DeadlineExceeded { - return Deadline(e) - } - return Unknown(e) -} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go deleted file mode 100644 index e0513331b..000000000 --- a/vendor/github.com/docker/docker/errdefs/is.go +++ /dev/null @@ -1,114 +0,0 @@ -package errdefs // import "github.com/docker/docker/errdefs" - -type causer interface { - Cause() error -} - -func getImplementer(err error) error { - switch e := err.(type) { - case - ErrNotFound, - ErrInvalidParameter, - ErrConflict, - ErrUnauthorized, - ErrUnavailable, - ErrForbidden, - ErrSystem, - ErrNotModified, - ErrAlreadyExists, - ErrNotImplemented, - ErrCancelled, - ErrDeadline, - ErrDataLoss, - ErrUnknown: - return err - case causer: - return getImplementer(e.Cause()) - default: - return err - } -} - -// IsNotFound returns if the passed in error is an ErrNotFound -func IsNotFound(err error) bool { - _, ok := getImplementer(err).(ErrNotFound) - return ok -} - -// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter -func IsInvalidParameter(err error) bool { - _, ok := getImplementer(err).(ErrInvalidParameter) - return ok -} - -// IsConflict returns if the passed in error is an ErrConflict -func IsConflict(err error) bool { - _, ok := getImplementer(err).(ErrConflict) - return ok -} - -// IsUnauthorized returns if the passed in error is an ErrUnauthorized -func IsUnauthorized(err error) bool { - _, ok := getImplementer(err).(ErrUnauthorized) - return ok -} - -// IsUnavailable returns if the passed in error is an ErrUnavailable -func IsUnavailable(err error) bool { - _, ok := getImplementer(err).(ErrUnavailable) - return ok -} - -// IsForbidden returns if the passed in error is an ErrForbidden -func IsForbidden(err error) bool { - _, ok := getImplementer(err).(ErrForbidden) - return ok -} - -// IsSystem returns if the passed in error is an ErrSystem -func IsSystem(err error) bool { - _, ok := getImplementer(err).(ErrSystem) - return ok -} - -// IsNotModified returns if the passed in error is a NotModified error -func IsNotModified(err error) bool { - _, ok := getImplementer(err).(ErrNotModified) - return ok -} - -// IsAlreadyExists returns if the passed in error is a AlreadyExists error -func IsAlreadyExists(err error) bool { - _, ok := getImplementer(err).(ErrAlreadyExists) - return ok -} - -// IsNotImplemented returns if the passed in error is an ErrNotImplemented -func IsNotImplemented(err error) bool { - _, ok := getImplementer(err).(ErrNotImplemented) - return ok -} - -// IsUnknown returns if the passed in error is an ErrUnknown -func IsUnknown(err error) bool { - _, ok := getImplementer(err).(ErrUnknown) - return ok -} - -// IsCancelled returns if the passed in error is an ErrCancelled -func IsCancelled(err error) bool { - _, ok := getImplementer(err).(ErrCancelled) - return ok -} - -// IsDeadline returns if the passed in error is an ErrDeadline -func IsDeadline(err error) bool { - _, ok := getImplementer(err).(ErrDeadline) - return ok -} - -// IsDataLoss returns if the passed in error is an ErrDataLoss -func IsDataLoss(err error) bool { - _, ok := getImplementer(err).(ErrDataLoss) - return ok -} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/call.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/call.go deleted file mode 100644 index dab9c6707..000000000 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/call.go +++ /dev/null @@ -1,132 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "log" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/bfirsh/funker-go" - "github.com/docker/docker/hack/integration-cli-on-swarm/agent/types" -) - -const ( - // funkerRetryTimeout is for the issue https://github.com/bfirsh/funker/issues/3 - // When all the funker replicas are busy in their own job, we cannot connect to funker. - funkerRetryTimeout = 1 * time.Hour - funkerRetryDuration = 1 * time.Second -) - -// ticker is needed for some CI (e.g., on Travis, job is aborted when no output emitted for 10 minutes) -func ticker(d time.Duration) chan struct{} { - t := time.NewTicker(d) - stop := make(chan struct{}) - go func() { - for { - select { - case <-t.C: - log.Printf("tick (just for keeping CI job active) per %s", d.String()) - case <-stop: - t.Stop() - } - } - }() - return stop -} - -func executeTests(funkerName string, testChunks [][]string) error { - tickerStopper := ticker(9*time.Minute + 55*time.Second) - defer func() { - close(tickerStopper) - }() - begin := time.Now() - log.Printf("Executing %d chunks in parallel, against %q", len(testChunks), funkerName) - var wg sync.WaitGroup - var passed, failed uint32 - for chunkID, tests := range testChunks { - log.Printf("Executing chunk %d (contains %d test filters)", chunkID, len(tests)) - wg.Add(1) - go func(chunkID int, tests []string) { - defer wg.Done() - chunkBegin := time.Now() - result, err := executeTestChunkWithRetry(funkerName, types.Args{ - ChunkID: chunkID, - Tests: tests, - }) - if result.RawLog != "" { - for _, s := range strings.Split(result.RawLog, "\n") { - log.Printf("Log (chunk %d): %s", chunkID, s) - } - } - if err != nil { - log.Printf("Error while executing chunk %d: %v", - chunkID, err) - atomic.AddUint32(&failed, 1) - } else { - if result.Code == 0 { - atomic.AddUint32(&passed, 1) - } else { - atomic.AddUint32(&failed, 1) - } - log.Printf("Finished chunk %d [%d/%d] with %d test filters in %s, code=%d.", - chunkID, passed+failed, len(testChunks), len(tests), - time.Since(chunkBegin), result.Code) - } - }(chunkID, tests) - } - wg.Wait() - // TODO: print actual tests rather than chunks - log.Printf("Executed %d chunks in %s. PASS: %d, FAIL: %d.", - len(testChunks), time.Since(begin), passed, failed) - if failed > 0 { - return fmt.Errorf("%d chunks failed", failed) - } - return nil -} - -func executeTestChunk(funkerName string, args types.Args) (types.Result, error) { - ret, err := funker.Call(funkerName, args) - if err != nil { - return types.Result{}, err - } - tmp, err := json.Marshal(ret) - if err != nil { - return types.Result{}, err - } - var result types.Result - err = json.Unmarshal(tmp, &result) - return result, err -} - -func executeTestChunkWithRetry(funkerName string, args types.Args) (types.Result, error) { - begin := time.Now() - for i := 0; time.Since(begin) < funkerRetryTimeout; i++ { - result, err := executeTestChunk(funkerName, args) - if err == nil { - log.Printf("executeTestChunk(%q, %d) returned code %d in trial %d", funkerName, args.ChunkID, result.Code, i) - return result, nil - } - if errorSeemsInteresting(err) { - log.Printf("Error while calling executeTestChunk(%q, %d), will retry (trial %d): %v", - funkerName, args.ChunkID, i, err) - } - // TODO: non-constant sleep - time.Sleep(funkerRetryDuration) - } - return types.Result{}, fmt.Errorf("could not call executeTestChunk(%q, %d) in %v", funkerName, args.ChunkID, funkerRetryTimeout) -} - -// errorSeemsInteresting returns true if err does not seem about https://github.com/bfirsh/funker/issues/3 -func errorSeemsInteresting(err error) bool { - boringSubstrs := []string{"connection refused", "connection reset by peer", "no such host", "transport endpoint is not connected", "no route to host"} - errS := err.Error() - for _, boringS := range boringSubstrs { - if strings.Contains(errS, boringS) { - return false - } - } - return true -} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/master.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/master.go deleted file mode 100644 index a0d9a0d38..000000000 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/master.go +++ /dev/null @@ -1,65 +0,0 @@ -package main - -import ( - "errors" - "flag" - "io/ioutil" - "log" - "strings" -) - -func main() { - if err := xmain(); err != nil { - log.Fatalf("fatal error: %v", err) - } -} - -func xmain() error { - workerService := flag.String("worker-service", "", "Name of worker service") - chunks := flag.Int("chunks", 0, "Number of chunks") - input := flag.String("input", "", "Path to input file") - randSeed := flag.Int64("rand-seed", int64(0), "Random seed") - shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity") - flag.Parse() - if *workerService == "" { - return errors.New("worker-service unset") - } - if *chunks == 0 { - return errors.New("chunks unset") - } - if *input == "" { - return errors.New("input unset") - } - - tests, err := loadTests(*input) - if err != nil { - return err - } - testChunks := chunkTests(tests, *chunks, *shuffle, *randSeed) - log.Printf("Loaded %d tests (%d chunks)", len(tests), len(testChunks)) - return executeTests(*workerService, testChunks) -} - -func chunkTests(tests []string, numChunks int, shuffle bool, randSeed int64) [][]string { - // shuffling (experimental) mitigates makespan nonuniformity - // Not sure this can cause some locality problem.. - if shuffle { - shuffleStrings(tests, randSeed) - } - return chunkStrings(tests, numChunks) -} - -func loadTests(filename string) ([]string, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - var tests []string - for _, line := range strings.Split(string(b), "\n") { - s := strings.TrimSpace(line) - if s != "" { - tests = append(tests, s) - } - } - return tests, nil -} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/set.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/set.go deleted file mode 100644 index d28c41da7..000000000 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/master/set.go +++ /dev/null @@ -1,28 +0,0 @@ -package main - -import ( - "math/rand" -) - -// chunkStrings chunks the string slice -func chunkStrings(x []string, numChunks int) [][]string { - var result [][]string - chunkSize := (len(x) + numChunks - 1) / numChunks - for i := 0; i < len(x); i += chunkSize { - ub := i + chunkSize - if ub > len(x) { - ub = len(x) - } - result = append(result, x[i:ub]) - } - return result -} - -// shuffleStrings shuffles strings -func shuffleStrings(x []string, seed int64) { - r := rand.New(rand.NewSource(seed)) - for i := range x { - j := r.Intn(i + 1) - x[i], x[j] = x[j], x[i] - } -} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/types/types.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/types/types.go deleted file mode 100644 index fc598f033..000000000 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/types/types.go +++ /dev/null @@ -1,18 +0,0 @@ -package types - -// Args is the type for funker args -type Args struct { - // ChunkID is an unique number of the chunk - ChunkID int `json:"chunk_id"` - // Tests is the set of the strings that are passed as `-check.f` filters - Tests []string `json:"tests"` -} - -// Result is the type for funker result -type Result struct { - // ChunkID corresponds to Args.ChunkID - ChunkID int `json:"chunk_id"` - // Code is the exit code - Code int `json:"code"` - RawLog string `json:"raw_log"` -} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/executor.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/executor.go deleted file mode 100644 index eef80d461..000000000 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/executor.go +++ /dev/null @@ -1,118 +0,0 @@ -package main - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/stdcopy" -) - -// testChunkExecutor executes integration-cli binary. -// image needs to be the worker image itself. testFlags are OR-set of regexp for filtering tests. -type testChunkExecutor func(image string, tests []string) (int64, string, error) - -func dryTestChunkExecutor() testChunkExecutor { - return func(image string, tests []string) (int64, string, error) { - return 0, fmt.Sprintf("DRY RUN (image=%q, tests=%v)", image, tests), nil - } -} - -// privilegedTestChunkExecutor invokes a privileged container from the worker -// service via bind-mounted API socket so as to execute the test chunk -func privilegedTestChunkExecutor(autoRemove bool) testChunkExecutor { - return func(image string, tests []string) (int64, string, error) { - cli, err := client.NewEnvClient() - if err != nil { - return 0, "", err - } - // propagate variables from the host (needs to be defined in the compose file) - experimental := os.Getenv("DOCKER_EXPERIMENTAL") - graphdriver := os.Getenv("DOCKER_GRAPHDRIVER") - if graphdriver == "" { - info, err := cli.Info(context.Background()) - if err != nil { - return 0, "", err - } - graphdriver = info.Driver - } - // `daemon_dest` is similar to `$DEST` (e.g. `bundles/VERSION/test-integration`) - // but it exists outside of `bundles` so as to make `$DOCKER_GRAPHDRIVER` work. - // - // Without this hack, `$DOCKER_GRAPHDRIVER` fails because of (e.g.) `overlay2 is not supported over overlayfs` - // - // see integration-cli/daemon/daemon.go - daemonDest := "/daemon_dest" - config := container.Config{ - Image: image, - Env: []string{ - "TESTFLAGS=-check.f " + strings.Join(tests, "|"), - "KEEPBUNDLE=1", - "DOCKER_INTEGRATION_TESTS_VERIFIED=1", // for avoiding rebuilding integration-cli - "DOCKER_EXPERIMENTAL=" + experimental, - "DOCKER_GRAPHDRIVER=" + graphdriver, - "DOCKER_INTEGRATION_DAEMON_DEST=" + daemonDest, - }, - Labels: map[string]string{ - "org.dockerproject.integration-cli-on-swarm": "", - "org.dockerproject.integration-cli-on-swarm.comment": "this non-service container is created for running privileged programs on Swarm. you can remove this container manually if the corresponding service is already stopped.", - }, - Entrypoint: []string{"hack/dind"}, - Cmd: []string{"hack/make.sh", "test-integration"}, - } - hostConfig := container.HostConfig{ - AutoRemove: autoRemove, - Privileged: true, - Mounts: []mount.Mount{ - { - Type: mount.TypeVolume, - Target: daemonDest, - }, - }, - } - id, stream, err := runContainer(context.Background(), cli, config, hostConfig) - if err != nil { - return 0, "", err - } - var b bytes.Buffer - teeContainerStream(&b, os.Stdout, os.Stderr, stream) - resultC, errC := cli.ContainerWait(context.Background(), id, "") - select { - case err := <-errC: - return 0, "", err - case result := <-resultC: - return result.StatusCode, b.String(), nil - } - } -} - -func runContainer(ctx context.Context, cli *client.Client, config container.Config, hostConfig container.HostConfig) (string, io.ReadCloser, error) { - created, err := cli.ContainerCreate(context.Background(), - &config, &hostConfig, nil, "") - if err != nil { - return "", nil, err - } - if err = cli.ContainerStart(ctx, created.ID, types.ContainerStartOptions{}); err != nil { - return "", nil, err - } - stream, err := cli.ContainerLogs(ctx, - created.ID, - types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Follow: true, - }) - return created.ID, stream, err -} - -func teeContainerStream(w, stdout, stderr io.Writer, stream io.ReadCloser) { - stdcopy.StdCopy(io.MultiWriter(w, stdout), io.MultiWriter(w, stderr), stream) - stream.Close() -} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/worker.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/worker.go deleted file mode 100644 index ea8bb3fe2..000000000 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/worker/worker.go +++ /dev/null @@ -1,69 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "time" - - "github.com/bfirsh/funker-go" - "github.com/docker/distribution/reference" - "github.com/docker/docker/hack/integration-cli-on-swarm/agent/types" -) - -func main() { - if err := xmain(); err != nil { - log.Fatalf("fatal error: %v", err) - } -} - -func validImageDigest(s string) bool { - return reference.DigestRegexp.FindString(s) != "" -} - -func xmain() error { - workerImageDigest := flag.String("worker-image-digest", "", "Needs to be the digest of this worker image itself") - dryRun := flag.Bool("dry-run", false, "Dry run") - keepExecutor := flag.Bool("keep-executor", false, "Do not auto-remove executor containers, which is used for running privileged programs on Swarm") - flag.Parse() - if !validImageDigest(*workerImageDigest) { - // Because of issue #29582. - // `docker service create localregistry.example.com/blahblah:latest` pulls the image data to local, but not a tag. - // So, `docker run localregistry.example.com/blahblah:latest` fails: `Unable to find image 'localregistry.example.com/blahblah:latest' locally` - return fmt.Errorf("worker-image-digest must be a digest, got %q", *workerImageDigest) - } - executor := privilegedTestChunkExecutor(!*keepExecutor) - if *dryRun { - executor = dryTestChunkExecutor() - } - return handle(*workerImageDigest, executor) -} - -func handle(workerImageDigest string, executor testChunkExecutor) error { - log.Printf("Waiting for a funker request") - return funker.Handle(func(args *types.Args) types.Result { - log.Printf("Executing chunk %d, contains %d test filters", - args.ChunkID, len(args.Tests)) - begin := time.Now() - code, rawLog, err := executor(workerImageDigest, args.Tests) - if err != nil { - log.Printf("Error while executing chunk %d: %v", args.ChunkID, err) - if code == 0 { - // Make sure this is a failure - code = 1 - } - return types.Result{ - ChunkID: args.ChunkID, - Code: int(code), - RawLog: rawLog, - } - } - elapsed := time.Since(begin) - log.Printf("Finished chunk %d, code=%d, elapsed=%v", args.ChunkID, code, elapsed) - return types.Result{ - ChunkID: args.ChunkID, - Code: int(code), - RawLog: rawLog, - } - }) -} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/compose.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/compose.go deleted file mode 100644 index a92282a1a..000000000 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/compose.go +++ /dev/null @@ -1,122 +0,0 @@ -package main - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "text/template" - - "github.com/docker/docker/client" -) - -const composeTemplate = `# generated by integration-cli-on-swarm -version: "3" - -services: - worker: - image: "{{.WorkerImage}}" - command: ["-worker-image-digest={{.WorkerImageDigest}}", "-dry-run={{.DryRun}}", "-keep-executor={{.KeepExecutor}}"] - networks: - - net - volumes: -# Bind-mount the API socket so that we can invoke "docker run --privileged" within the service containers - - /var/run/docker.sock:/var/run/docker.sock - environment: - - DOCKER_GRAPHDRIVER={{.EnvDockerGraphDriver}} - - DOCKER_EXPERIMENTAL={{.EnvDockerExperimental}} - deploy: - mode: replicated - replicas: {{.Replicas}} - restart_policy: -# The restart condition needs to be any for funker function - condition: any - - master: - image: "{{.MasterImage}}" - command: ["-worker-service=worker", "-input=/mnt/input", "-chunks={{.Chunks}}", "-shuffle={{.Shuffle}}", "-rand-seed={{.RandSeed}}"] - networks: - - net - volumes: - - {{.Volume}}:/mnt - deploy: - mode: replicated - replicas: 1 - restart_policy: - condition: none - placement: -# Make sure the master can access the volume - constraints: [node.id == {{.SelfNodeID}}] - -networks: - net: - -volumes: - {{.Volume}}: - external: true -` - -type composeOptions struct { - Replicas int - Chunks int - MasterImage string - WorkerImage string - Volume string - Shuffle bool - RandSeed int64 - DryRun bool - KeepExecutor bool -} - -type composeTemplateOptions struct { - composeOptions - WorkerImageDigest string - SelfNodeID string - EnvDockerGraphDriver string - EnvDockerExperimental string -} - -// createCompose creates "dir/docker-compose.yml". -// If dir is empty, TempDir() is used. -func createCompose(dir string, cli *client.Client, opts composeOptions) (string, error) { - if dir == "" { - var err error - dir, err = ioutil.TempDir("", "integration-cli-on-swarm-") - if err != nil { - return "", err - } - } - resolved := composeTemplateOptions{} - resolved.composeOptions = opts - workerImageInspect, _, err := cli.ImageInspectWithRaw(context.Background(), defaultWorkerImageName) - if err != nil { - return "", err - } - if len(workerImageInspect.RepoDigests) > 0 { - resolved.WorkerImageDigest = workerImageInspect.RepoDigests[0] - } else { - // fall back for non-pushed image - resolved.WorkerImageDigest = workerImageInspect.ID - } - info, err := cli.Info(context.Background()) - if err != nil { - return "", err - } - resolved.SelfNodeID = info.Swarm.NodeID - resolved.EnvDockerGraphDriver = os.Getenv("DOCKER_GRAPHDRIVER") - resolved.EnvDockerExperimental = os.Getenv("DOCKER_EXPERIMENTAL") - composeFilePath := filepath.Join(dir, "docker-compose.yml") - tmpl, err := template.New("").Parse(composeTemplate) - if err != nil { - return "", err - } - f, err := os.Create(composeFilePath) - if err != nil { - return "", err - } - defer f.Close() - if err = tmpl.Execute(f, resolved); err != nil { - return "", err - } - return composeFilePath, nil -} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/dockercmd.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/dockercmd.go deleted file mode 100644 index c08b763a2..000000000 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/dockercmd.go +++ /dev/null @@ -1,64 +0,0 @@ -package main - -import ( - "fmt" - "os" - "os/exec" - "strings" - "time" - - "github.com/docker/docker/client" -) - -func system(commands [][]string) error { - for _, c := range commands { - cmd := exec.Command(c[0], c[1:]...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.Env = os.Environ() - if err := cmd.Run(); err != nil { - return err - } - } - return nil -} - -func pushImage(unusedCli *client.Client, remote, local string) error { - // FIXME: eliminate os/exec (but it is hard to pass auth without os/exec ...) - return system([][]string{ - {"docker", "image", "tag", local, remote}, - {"docker", "image", "push", remote}, - }) -} - -func deployStack(unusedCli *client.Client, stackName, composeFilePath string) error { - // FIXME: eliminate os/exec (but stack is implemented in CLI ...) - return system([][]string{ - {"docker", "stack", "deploy", - "--compose-file", composeFilePath, - "--with-registry-auth", - stackName}, - }) -} - -func hasStack(unusedCli *client.Client, stackName string) bool { - // FIXME: eliminate os/exec (but stack is implemented in CLI ...) - out, err := exec.Command("docker", "stack", "ls").CombinedOutput() - if err != nil { - panic(fmt.Errorf("`docker stack ls` failed with: %s", string(out))) - } - // FIXME: not accurate - return strings.Contains(string(out), stackName) -} - -func removeStack(unusedCli *client.Client, stackName string) error { - // FIXME: eliminate os/exec (but stack is implemented in CLI ...) - if err := system([][]string{ - {"docker", "stack", "rm", stackName}, - }); err != nil { - return err - } - // FIXME - time.Sleep(10 * time.Second) - return nil -} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate.go deleted file mode 100644 index 3354c23c0..000000000 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/enumerate.go +++ /dev/null @@ -1,55 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "regexp" -) - -var testFuncRegexp *regexp.Regexp - -func init() { - testFuncRegexp = regexp.MustCompile(`(?m)^\s*func\s+\(\w*\s*\*(\w+Suite)\)\s+(Test\w+)`) -} - -func enumerateTestsForBytes(b []byte) ([]string, error) { - var tests []string - submatches := testFuncRegexp.FindAllSubmatch(b, -1) - for _, submatch := range submatches { - if len(submatch) == 3 { - tests = append(tests, fmt.Sprintf("%s.%s$", submatch[1], submatch[2])) - } - } - return tests, nil -} - -// enumerateTests enumerates valid `-check.f` strings for all the test functions. -// Note that we use regexp rather than parsing Go files for performance reason. -// (Try `TESTFLAGS=-check.list make test-integration` to see the slowness of parsing) -// The files needs to be `gofmt`-ed -// -// The result will be as follows, but unsorted ('$' is appended because they are regexp for `-check.f`): -// "DockerAuthzSuite.TestAuthZPluginAPIDenyResponse$" -// "DockerAuthzSuite.TestAuthZPluginAllowEventStream$" -// ... -// "DockerTrustedSwarmSuite.TestTrustedServiceUpdate$" -func enumerateTests(wd string) ([]string, error) { - testGoFiles, err := filepath.Glob(filepath.Join(wd, "integration-cli", "*_test.go")) - if err != nil { - return nil, err - } - var allTests []string - for _, testGoFile := range testGoFiles { - b, err := ioutil.ReadFile(testGoFile) - if err != nil { - return nil, err - } - tests, err := enumerateTestsForBytes(b) - if err != nil { - return nil, err - } - allTests = append(allTests, tests...) - } - return allTests, nil -} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/host.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/host.go deleted file mode 100644 index fdc2a83e7..000000000 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/host.go +++ /dev/null @@ -1,198 +0,0 @@ -package main - -import ( - "context" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/stdcopy" - "github.com/sirupsen/logrus" -) - -const ( - defaultStackName = "integration-cli-on-swarm" - defaultVolumeName = "integration-cli-on-swarm" - defaultMasterImageName = "integration-cli-master" - defaultWorkerImageName = "integration-cli-worker" -) - -func main() { - rc, err := xmain() - if err != nil { - logrus.Fatalf("fatal error: %v", err) - } - os.Exit(rc) -} - -func xmain() (int, error) { - // Should we use cobra maybe? - replicas := flag.Int("replicas", 1, "Number of worker service replica") - chunks := flag.Int("chunks", 0, "Number of test chunks executed in batch (0 == replicas)") - pushWorkerImage := flag.String("push-worker-image", "", "Push the worker image to the registry. Required for distributed execution. (empty == not to push)") - shuffle := flag.Bool("shuffle", false, "Shuffle the input so as to mitigate makespan nonuniformity") - // flags below are rarely used - randSeed := flag.Int64("rand-seed", int64(0), "Random seed used for shuffling (0 == current time)") - filtersFile := flag.String("filters-file", "", "Path to optional file composed of `-check.f` filter strings") - dryRun := flag.Bool("dry-run", false, "Dry run") - keepExecutor := flag.Bool("keep-executor", false, "Do not auto-remove executor containers, which is used for running privileged programs on Swarm") - flag.Parse() - if *chunks == 0 { - *chunks = *replicas - } - if *randSeed == int64(0) { - *randSeed = time.Now().UnixNano() - } - cli, err := client.NewEnvClient() - if err != nil { - return 1, err - } - if hasStack(cli, defaultStackName) { - logrus.Infof("Removing stack %s", defaultStackName) - removeStack(cli, defaultStackName) - } - if hasVolume(cli, defaultVolumeName) { - logrus.Infof("Removing volume %s", defaultVolumeName) - removeVolume(cli, defaultVolumeName) - } - if err = ensureImages(cli, []string{defaultWorkerImageName, defaultMasterImageName}); err != nil { - return 1, err - } - workerImageForStack := defaultWorkerImageName - if *pushWorkerImage != "" { - logrus.Infof("Pushing %s to %s", defaultWorkerImageName, *pushWorkerImage) - if err = pushImage(cli, *pushWorkerImage, defaultWorkerImageName); err != nil { - return 1, err - } - workerImageForStack = *pushWorkerImage - } - compose, err := createCompose("", cli, composeOptions{ - Replicas: *replicas, - Chunks: *chunks, - MasterImage: defaultMasterImageName, - WorkerImage: workerImageForStack, - Volume: defaultVolumeName, - Shuffle: *shuffle, - RandSeed: *randSeed, - DryRun: *dryRun, - KeepExecutor: *keepExecutor, - }) - if err != nil { - return 1, err - } - filters, err := filtersBytes(*filtersFile) - if err != nil { - return 1, err - } - logrus.Infof("Creating volume %s with input data", defaultVolumeName) - if err = createVolumeWithData(cli, - defaultVolumeName, - map[string][]byte{"/input": filters}, - defaultMasterImageName); err != nil { - return 1, err - } - logrus.Infof("Deploying stack %s from %s", defaultStackName, compose) - defer func() { - logrus.Infof("NOTE: You may want to inspect or clean up following resources:") - logrus.Infof(" - Stack: %s", defaultStackName) - logrus.Infof(" - Volume: %s", defaultVolumeName) - logrus.Infof(" - Compose file: %s", compose) - logrus.Infof(" - Master image: %s", defaultMasterImageName) - logrus.Infof(" - Worker image: %s", workerImageForStack) - }() - if err = deployStack(cli, defaultStackName, compose); err != nil { - return 1, err - } - logrus.Infof("The log will be displayed here after some duration."+ - "You can watch the live status via `docker service logs %s_worker`", - defaultStackName) - masterContainerID, err := waitForMasterUp(cli, defaultStackName) - if err != nil { - return 1, err - } - rc, err := waitForContainerCompletion(cli, os.Stdout, os.Stderr, masterContainerID) - if err != nil { - return 1, err - } - logrus.Infof("Exit status: %d", rc) - return int(rc), nil -} - -func ensureImages(cli *client.Client, images []string) error { - for _, image := range images { - _, _, err := cli.ImageInspectWithRaw(context.Background(), image) - if err != nil { - return fmt.Errorf("could not find image %s, please run `make build-integration-cli-on-swarm`: %v", - image, err) - } - } - return nil -} - -func filtersBytes(optionalFiltersFile string) ([]byte, error) { - var b []byte - if optionalFiltersFile == "" { - tests, err := enumerateTests(".") - if err != nil { - return b, err - } - b = []byte(strings.Join(tests, "\n") + "\n") - } else { - var err error - b, err = ioutil.ReadFile(optionalFiltersFile) - if err != nil { - return b, err - } - } - return b, nil -} - -func waitForMasterUp(cli *client.Client, stackName string) (string, error) { - // FIXME(AkihiroSuda): it should retry until master is up, rather than pre-sleeping - time.Sleep(10 * time.Second) - - fil := filters.NewArgs() - fil.Add("label", "com.docker.stack.namespace="+stackName) - // FIXME(AkihiroSuda): we should not rely on internal service naming convention - fil.Add("label", "com.docker.swarm.service.name="+stackName+"_master") - masters, err := cli.ContainerList(context.Background(), types.ContainerListOptions{ - All: true, - Filters: fil, - }) - if err != nil { - return "", err - } - if len(masters) == 0 { - return "", fmt.Errorf("master not running in stack %s?", stackName) - } - return masters[0].ID, nil -} - -func waitForContainerCompletion(cli *client.Client, stdout, stderr io.Writer, containerID string) (int64, error) { - stream, err := cli.ContainerLogs(context.Background(), - containerID, - types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Follow: true, - }) - if err != nil { - return 1, err - } - stdcopy.StdCopy(stdout, stderr, stream) - stream.Close() - resultC, errC := cli.ContainerWait(context.Background(), containerID, "") - select { - case err := <-errC: - return 1, err - case result := <-resultC: - return result.StatusCode, nil - } -} diff --git a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/volume.go b/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/volume.go deleted file mode 100644 index a6ddc6fae..000000000 --- a/vendor/github.com/docker/docker/hack/integration-cli-on-swarm/host/volume.go +++ /dev/null @@ -1,88 +0,0 @@ -package main - -import ( - "archive/tar" - "bytes" - "context" - "io" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/api/types/volume" - "github.com/docker/docker/client" -) - -func createTar(data map[string][]byte) (io.Reader, error) { - var b bytes.Buffer - tw := tar.NewWriter(&b) - for path, datum := range data { - hdr := tar.Header{ - Name: path, - Mode: 0644, - Size: int64(len(datum)), - } - if err := tw.WriteHeader(&hdr); err != nil { - return nil, err - } - _, err := tw.Write(datum) - if err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return &b, nil -} - -// createVolumeWithData creates a volume with the given data (e.g. data["/foo"] = []byte("bar")) -// Internally, a container is created from the image so as to provision the data to the volume, -// which is attached to the container. -func createVolumeWithData(cli *client.Client, volumeName string, data map[string][]byte, image string) error { - _, err := cli.VolumeCreate(context.Background(), - volume.VolumeCreateBody{ - Driver: "local", - Name: volumeName, - }) - if err != nil { - return err - } - mnt := "/mnt" - miniContainer, err := cli.ContainerCreate(context.Background(), - &container.Config{ - Image: image, - }, - &container.HostConfig{ - Mounts: []mount.Mount{ - { - Type: mount.TypeVolume, - Source: volumeName, - Target: mnt, - }, - }, - }, nil, "") - if err != nil { - return err - } - tr, err := createTar(data) - if err != nil { - return err - } - if cli.CopyToContainer(context.Background(), - miniContainer.ID, mnt, tr, types.CopyToContainerOptions{}); err != nil { - return err - } - return cli.ContainerRemove(context.Background(), - miniContainer.ID, - types.ContainerRemoveOptions{}) -} - -func hasVolume(cli *client.Client, volumeName string) bool { - _, err := cli.VolumeInspect(context.Background(), volumeName) - return err == nil -} - -func removeVolume(cli *client.Client, volumeName string) error { - return cli.VolumeRemove(context.Background(), volumeName, true) -} diff --git a/vendor/github.com/docker/docker/hack/make/.go-autogen b/vendor/github.com/docker/docker/hack/make/.go-autogen deleted file mode 100644 index ba001895d..000000000 --- a/vendor/github.com/docker/docker/hack/make/.go-autogen +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env bash - -rm -rf autogen - -source hack/dockerfile/install/runc.installer -source hack/dockerfile/install/tini.installer -source hack/dockerfile/install/containerd.installer - -cat > dockerversion/version_autogen.go < dockerversion/version_autogen_unix.go < - -param( - [Parameter(Mandatory=$true)][string]$CommitString, - [Parameter(Mandatory=$true)][string]$DockerVersion, - [Parameter(Mandatory=$false)][string]$Platform -) - -$ErrorActionPreference = "Stop" - -# Utility function to get the build date/time in UTC -Function Get-BuildDateTime() { - return $(Get-Date).ToUniversalTime() -} - -try { - $buildDateTime=Get-BuildDateTime - - if (Test-Path ".\autogen") { - Remove-Item ".\autogen" -Recurse -Force | Out-Null - } - - $fileContents = ' -// +build autogen - -// Package dockerversion is auto-generated at build-time -package dockerversion - -// Default build-time variable for library-import. -// This file is overridden on build with build-time informations. -const ( - GitCommit string = "'+$CommitString+'" - Version string = "'+$DockerVersion+'" - BuildTime string = "'+$buildDateTime+'" - PlatformName string = "'+$Platform+'" -) - -// AUTOGENERATED FILE; see hack\make\.go-autogen.ps1 -' - - # Write the file without BOM - $outputFile="$(pwd)\dockerversion\version_autogen.go" - if (Test-Path $outputFile) { Remove-Item $outputFile } - [System.IO.File]::WriteAllText($outputFile, $fileContents, (New-Object System.Text.UTF8Encoding($False))) - - New-Item -ItemType Directory -Path "autogen\winresources\tmp" | Out-Null - New-Item -ItemType Directory -Path "autogen\winresources\docker" | Out-Null - New-Item -ItemType Directory -Path "autogen\winresources\dockerd" | Out-Null - Copy-Item "hack\make\.resources-windows\resources.go" "autogen\winresources\docker" - Copy-Item "hack\make\.resources-windows\resources.go" "autogen\winresources\dockerd" - - # Generate a version in the form major,minor,patch,build - $versionQuad=$DockerVersion -replace "[^0-9.]*" -replace "\.", "," - - # Compile the messages - windmc hack\make\.resources-windows\event_messages.mc -h autogen\winresources\tmp -r autogen\winresources\tmp - if ($LASTEXITCODE -ne 0) { Throw "Failed to compile event message resources" } - - # If you really want to understand this madness below, search the Internet for powershell variables after verbatim arguments... Needed to get double-quotes passed through to the compiler options. - # Generate the .syso files containing all the resources and manifest needed to compile the final docker binaries. Both 32 and 64-bit clients. - $env:_ag_dockerVersion=$DockerVersion - $env:_ag_gitCommit=$CommitString - - windres -i hack/make/.resources-windows/docker.rc -o autogen/winresources/docker/rsrc_amd64.syso -F pe-x86-64 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" - if ($LASTEXITCODE -ne 0) { Throw "Failed to compile client 64-bit resources" } - - windres -i hack/make/.resources-windows/docker.rc -o autogen/winresources/docker/rsrc_386.syso -F pe-i386 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" - if ($LASTEXITCODE -ne 0) { Throw "Failed to compile client 32-bit resources" } - - windres -i hack/make/.resources-windows/dockerd.rc -o autogen/winresources/dockerd/rsrc_amd64.syso -F pe-x86-64 --use-temp-file -I autogen/winresources/tmp -D DOCKER_VERSION_QUAD=$versionQuad --% -D DOCKER_VERSION=\"%_ag_dockerVersion%\" -D DOCKER_COMMIT=\"%_ag_gitCommit%\" - if ($LASTEXITCODE -ne 0) { Throw "Failed to compile daemon resources" } -} -Catch [Exception] { - # Throw the error onto the caller to display errors. We don't expect this script to be called directly - Throw ".go-autogen.ps1 failed with error $_" -} -Finally { - Remove-Item .\autogen\winresources\tmp -Recurse -Force -ErrorAction SilentlyContinue | Out-Null - $env:_ag_dockerVersion="" - $env:_ag_gitCommit="" -} diff --git a/vendor/github.com/docker/docker/hack/make/.resources-windows/common.rc b/vendor/github.com/docker/docker/hack/make/.resources-windows/common.rc deleted file mode 100644 index 000fb3536..000000000 --- a/vendor/github.com/docker/docker/hack/make/.resources-windows/common.rc +++ /dev/null @@ -1,38 +0,0 @@ -// Application icon -1 ICON "docker.ico" - -// Windows executable manifest -1 24 /* RT_MANIFEST */ "docker.exe.manifest" - -// Version information -1 VERSIONINFO - -#ifdef DOCKER_VERSION_QUAD -FILEVERSION DOCKER_VERSION_QUAD -PRODUCTVERSION DOCKER_VERSION_QUAD -#endif - -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "000004B0" - BEGIN - VALUE "ProductName", DOCKER_NAME - -#ifdef DOCKER_VERSION - VALUE "FileVersion", DOCKER_VERSION - VALUE "ProductVersion", DOCKER_VERSION -#endif - -#ifdef DOCKER_COMMIT - VALUE "OriginalFileName", DOCKER_COMMIT -#endif - - END - END - - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x0000, 0x04B0 - END -END diff --git a/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.rc b/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.rc deleted file mode 100644 index 40c645ad1..000000000 --- a/vendor/github.com/docker/docker/hack/make/.resources-windows/docker.rc +++ /dev/null @@ -1,3 +0,0 @@ -#define DOCKER_NAME "Docker Client" - -#include "common.rc" diff --git a/vendor/github.com/docker/docker/hack/make/.resources-windows/dockerd.rc b/vendor/github.com/docker/docker/hack/make/.resources-windows/dockerd.rc deleted file mode 100644 index e77fc1751..000000000 --- a/vendor/github.com/docker/docker/hack/make/.resources-windows/dockerd.rc +++ /dev/null @@ -1,4 +0,0 @@ -#define DOCKER_NAME "Docker Engine" - -#include "common.rc" -#include "event_messages.rc" diff --git a/vendor/github.com/docker/docker/hack/make/.resources-windows/event_messages.mc b/vendor/github.com/docker/docker/hack/make/.resources-windows/event_messages.mc deleted file mode 100644 index 980107a44..000000000 --- a/vendor/github.com/docker/docker/hack/make/.resources-windows/event_messages.mc +++ /dev/null @@ -1,39 +0,0 @@ -MessageId=1 -Language=English -%1 -. - -MessageId=2 -Language=English -debug: %1 -. - -MessageId=3 -Language=English -panic: %1 -. - -MessageId=4 -Language=English -fatal: %1 -. - -MessageId=11 -Language=English -%1 [%2] -. - -MessageId=12 -Language=English -debug: %1 [%2] -. - -MessageId=13 -Language=English -panic: %1 [%2] -. - -MessageId=14 -Language=English -fatal: %1 [%2] -. diff --git a/vendor/github.com/docker/docker/hack/make/.resources-windows/resources.go b/vendor/github.com/docker/docker/hack/make/.resources-windows/resources.go deleted file mode 100644 index b171259f8..000000000 --- a/vendor/github.com/docker/docker/hack/make/.resources-windows/resources.go +++ /dev/null @@ -1,18 +0,0 @@ -/* - -Package winresources is used to embed Windows resources into docker.exe. -These resources are used to provide - - * Version information - * An icon - * A Windows manifest declaring Windows version support - -The resource object files are generated in hack/make/.go-autogen from -source files in hack/make/.resources-windows. This occurs automatically -when you run hack/make.sh. - -These object files are picked up automatically by go build when this package -is included. - -*/ -package winresources diff --git a/vendor/github.com/docker/docker/image/cache/cache.go b/vendor/github.com/docker/docker/image/cache/cache.go deleted file mode 100644 index 6d3f4c57b..000000000 --- a/vendor/github.com/docker/docker/image/cache/cache.go +++ /dev/null @@ -1,253 +0,0 @@ -package cache // import "github.com/docker/docker/image/cache" - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" - - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/pkg/errors" -) - -// NewLocal returns a local image cache, based on parent chain -func NewLocal(store image.Store) *LocalImageCache { - return &LocalImageCache{ - store: store, - } -} - -// LocalImageCache is cache based on parent chain. -type LocalImageCache struct { - store image.Store -} - -// GetCache returns the image id found in the cache -func (lic *LocalImageCache) GetCache(imgID string, config *containertypes.Config) (string, error) { - return getImageIDAndError(getLocalCachedImage(lic.store, image.ID(imgID), config)) -} - -// New returns an image cache, based on history objects -func New(store image.Store) *ImageCache { - return &ImageCache{ - store: store, - localImageCache: NewLocal(store), - } -} - -// ImageCache is cache based on history objects. Requires initial set of images. -type ImageCache struct { - sources []*image.Image - store image.Store - localImageCache *LocalImageCache -} - -// Populate adds an image to the cache (to be queried later) -func (ic *ImageCache) Populate(image *image.Image) { - ic.sources = append(ic.sources, image) -} - -// GetCache returns the image id found in the cache -func (ic *ImageCache) GetCache(parentID string, cfg *containertypes.Config) (string, error) { - imgID, err := ic.localImageCache.GetCache(parentID, cfg) - if err != nil { - return "", err - } - if imgID != "" { - for _, s := range ic.sources { - if ic.isParent(s.ID(), image.ID(imgID)) { - return imgID, nil - } - } - } - - var parent *image.Image - lenHistory := 0 - if parentID != "" { - parent, err = ic.store.Get(image.ID(parentID)) - if err != nil { - return "", errors.Wrapf(err, "unable to find image %v", parentID) - } - lenHistory = len(parent.History) - } - - for _, target := range ic.sources { - if !isValidParent(target, parent) || !isValidConfig(cfg, target.History[lenHistory]) { - continue - } - - if len(target.History)-1 == lenHistory { // last - if parent != nil { - if err := ic.store.SetParent(target.ID(), parent.ID()); err != nil { - return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) - } - } - return target.ID().String(), nil - } - - imgID, err := ic.restoreCachedImage(parent, target, cfg) - if err != nil { - return "", errors.Wrapf(err, "failed to restore cached image from %q to %v", parentID, target.ID()) - } - - ic.sources = []*image.Image{target} // avoid jumping to different target, tuned for safety atm - return imgID.String(), nil - } - - return "", nil -} - -func (ic *ImageCache) restoreCachedImage(parent, target *image.Image, cfg *containertypes.Config) (image.ID, error) { - var history []image.History - rootFS := image.NewRootFS() - lenHistory := 0 - if parent != nil { - history = parent.History - rootFS = parent.RootFS - lenHistory = len(parent.History) - } - history = append(history, target.History[lenHistory]) - if layer := getLayerForHistoryIndex(target, lenHistory); layer != "" { - rootFS.Append(layer) - } - - config, err := json.Marshal(&image.Image{ - V1Image: image.V1Image{ - DockerVersion: dockerversion.Version, - Config: cfg, - Architecture: target.Architecture, - OS: target.OS, - Author: target.Author, - Created: history[len(history)-1].Created, - }, - RootFS: rootFS, - History: history, - OSFeatures: target.OSFeatures, - OSVersion: target.OSVersion, - }) - if err != nil { - return "", errors.Wrap(err, "failed to marshal image config") - } - - imgID, err := ic.store.Create(config) - if err != nil { - return "", errors.Wrap(err, "failed to create cache image") - } - - if parent != nil { - if err := ic.store.SetParent(imgID, parent.ID()); err != nil { - return "", errors.Wrapf(err, "failed to set parent for %v to %v", target.ID(), parent.ID()) - } - } - return imgID, nil -} - -func (ic *ImageCache) isParent(imgID, parentID image.ID) bool { - nextParent, err := ic.store.GetParent(imgID) - if err != nil { - return false - } - if nextParent == parentID { - return true - } - return ic.isParent(nextParent, parentID) -} - -func getLayerForHistoryIndex(image *image.Image, index int) layer.DiffID { - layerIndex := 0 - for i, h := range image.History { - if i == index { - if h.EmptyLayer { - return "" - } - break - } - if !h.EmptyLayer { - layerIndex++ - } - } - return image.RootFS.DiffIDs[layerIndex] // validate? -} - -func isValidConfig(cfg *containertypes.Config, h image.History) bool { - // todo: make this format better than join that loses data - return strings.Join(cfg.Cmd, " ") == h.CreatedBy -} - -func isValidParent(img, parent *image.Image) bool { - if len(img.History) == 0 { - return false - } - if parent == nil || len(parent.History) == 0 && len(parent.RootFS.DiffIDs) == 0 { - return true - } - if len(parent.History) >= len(img.History) { - return false - } - if len(parent.RootFS.DiffIDs) > len(img.RootFS.DiffIDs) { - return false - } - - for i, h := range parent.History { - if !reflect.DeepEqual(h, img.History[i]) { - return false - } - } - for i, d := range parent.RootFS.DiffIDs { - if d != img.RootFS.DiffIDs[i] { - return false - } - } - return true -} - -func getImageIDAndError(img *image.Image, err error) (string, error) { - if img == nil || err != nil { - return "", err - } - return img.ID().String(), nil -} - -// getLocalCachedImage returns the most recent created image that is a child -// of the image with imgID, that had the same config when it was -// created. nil is returned if a child cannot be found. An error is -// returned if the parent image cannot be found. -func getLocalCachedImage(imageStore image.Store, imgID image.ID, config *containertypes.Config) (*image.Image, error) { - // Loop on the children of the given image and check the config - getMatch := func(siblings []image.ID) (*image.Image, error) { - var match *image.Image - for _, id := range siblings { - img, err := imageStore.Get(id) - if err != nil { - return nil, fmt.Errorf("unable to find image %q", id) - } - - if compare(&img.ContainerConfig, config) { - // check for the most up to date match - if match == nil || match.Created.Before(img.Created) { - match = img - } - } - } - return match, nil - } - - // In this case, this is `FROM scratch`, which isn't an actual image. - if imgID == "" { - images := imageStore.Map() - var siblings []image.ID - for id, img := range images { - if img.Parent == imgID { - siblings = append(siblings, id) - } - } - return getMatch(siblings) - } - - // find match from child images - siblings := imageStore.Children(imgID) - return getMatch(siblings) -} diff --git a/vendor/github.com/docker/docker/image/cache/compare.go b/vendor/github.com/docker/docker/image/cache/compare.go deleted file mode 100644 index e31e9c8bd..000000000 --- a/vendor/github.com/docker/docker/image/cache/compare.go +++ /dev/null @@ -1,63 +0,0 @@ -package cache // import "github.com/docker/docker/image/cache" - -import ( - "github.com/docker/docker/api/types/container" -) - -// compare two Config struct. Do not compare the "Image" nor "Hostname" fields -// If OpenStdin is set, then it differs -func compare(a, b *container.Config) bool { - if a == nil || b == nil || - a.OpenStdin || b.OpenStdin { - return false - } - if a.AttachStdout != b.AttachStdout || - a.AttachStderr != b.AttachStderr || - a.User != b.User || - a.OpenStdin != b.OpenStdin || - a.Tty != b.Tty { - return false - } - - if len(a.Cmd) != len(b.Cmd) || - len(a.Env) != len(b.Env) || - len(a.Labels) != len(b.Labels) || - len(a.ExposedPorts) != len(b.ExposedPorts) || - len(a.Entrypoint) != len(b.Entrypoint) || - len(a.Volumes) != len(b.Volumes) { - return false - } - - for i := 0; i < len(a.Cmd); i++ { - if a.Cmd[i] != b.Cmd[i] { - return false - } - } - for i := 0; i < len(a.Env); i++ { - if a.Env[i] != b.Env[i] { - return false - } - } - for k, v := range a.Labels { - if v != b.Labels[k] { - return false - } - } - for k := range a.ExposedPorts { - if _, exists := b.ExposedPorts[k]; !exists { - return false - } - } - - for i := 0; i < len(a.Entrypoint); i++ { - if a.Entrypoint[i] != b.Entrypoint[i] { - return false - } - } - for key := range a.Volumes { - if _, exists := b.Volumes[key]; !exists { - return false - } - } - return true -} diff --git a/vendor/github.com/docker/docker/image/fs.go b/vendor/github.com/docker/docker/image/fs.go deleted file mode 100644 index 7080c8c01..000000000 --- a/vendor/github.com/docker/docker/image/fs.go +++ /dev/null @@ -1,175 +0,0 @@ -package image // import "github.com/docker/docker/image" - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/docker/docker/pkg/ioutils" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// DigestWalkFunc is function called by StoreBackend.Walk -type DigestWalkFunc func(id digest.Digest) error - -// StoreBackend provides interface for image.Store persistence -type StoreBackend interface { - Walk(f DigestWalkFunc) error - Get(id digest.Digest) ([]byte, error) - Set(data []byte) (digest.Digest, error) - Delete(id digest.Digest) error - SetMetadata(id digest.Digest, key string, data []byte) error - GetMetadata(id digest.Digest, key string) ([]byte, error) - DeleteMetadata(id digest.Digest, key string) error -} - -// fs implements StoreBackend using the filesystem. -type fs struct { - sync.RWMutex - root string -} - -const ( - contentDirName = "content" - metadataDirName = "metadata" -) - -// NewFSStoreBackend returns new filesystem based backend for image.Store -func NewFSStoreBackend(root string) (StoreBackend, error) { - return newFSStore(root) -} - -func newFSStore(root string) (*fs, error) { - s := &fs{ - root: root, - } - if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { - return nil, errors.Wrap(err, "failed to create storage backend") - } - if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { - return nil, errors.Wrap(err, "failed to create storage backend") - } - return s, nil -} - -func (s *fs) contentFile(dgst digest.Digest) string { - return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) -} - -func (s *fs) metadataDir(dgst digest.Digest) string { - return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) -} - -// Walk calls the supplied callback for each image ID in the storage backend. -func (s *fs) Walk(f DigestWalkFunc) error { - // Only Canonical digest (sha256) is currently supported - s.RLock() - dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) - s.RUnlock() - if err != nil { - return err - } - for _, v := range dir { - dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) - if err := dgst.Validate(); err != nil { - logrus.Debugf("skipping invalid digest %s: %s", dgst, err) - continue - } - if err := f(dgst); err != nil { - return err - } - } - return nil -} - -// Get returns the content stored under a given digest. -func (s *fs) Get(dgst digest.Digest) ([]byte, error) { - s.RLock() - defer s.RUnlock() - - return s.get(dgst) -} - -func (s *fs) get(dgst digest.Digest) ([]byte, error) { - content, err := ioutil.ReadFile(s.contentFile(dgst)) - if err != nil { - return nil, errors.Wrapf(err, "failed to get digest %s", dgst) - } - - // todo: maybe optional - if digest.FromBytes(content) != dgst { - return nil, fmt.Errorf("failed to verify: %v", dgst) - } - - return content, nil -} - -// Set stores content by checksum. -func (s *fs) Set(data []byte) (digest.Digest, error) { - s.Lock() - defer s.Unlock() - - if len(data) == 0 { - return "", fmt.Errorf("invalid empty data") - } - - dgst := digest.FromBytes(data) - if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil { - return "", errors.Wrap(err, "failed to write digest data") - } - - return dgst, nil -} - -// Delete removes content and metadata files associated with the digest. -func (s *fs) Delete(dgst digest.Digest) error { - s.Lock() - defer s.Unlock() - - if err := os.RemoveAll(s.metadataDir(dgst)); err != nil { - return err - } - return os.Remove(s.contentFile(dgst)) -} - -// SetMetadata sets metadata for a given ID. It fails if there's no base file. -func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error { - s.Lock() - defer s.Unlock() - if _, err := s.get(dgst); err != nil { - return err - } - - baseDir := filepath.Join(s.metadataDir(dgst)) - if err := os.MkdirAll(baseDir, 0700); err != nil { - return err - } - return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600) -} - -// GetMetadata returns metadata for a given digest. -func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) { - s.RLock() - defer s.RUnlock() - - if _, err := s.get(dgst); err != nil { - return nil, err - } - bytes, err := ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key)) - if err != nil { - return nil, errors.Wrap(err, "failed to read metadata") - } - return bytes, nil -} - -// DeleteMetadata removes the metadata associated with a digest. -func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error { - s.Lock() - defer s.Unlock() - - return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key)) -} diff --git a/vendor/github.com/docker/docker/image/image.go b/vendor/github.com/docker/docker/image/image.go deleted file mode 100644 index 7e0646f07..000000000 --- a/vendor/github.com/docker/docker/image/image.go +++ /dev/null @@ -1,232 +0,0 @@ -package image // import "github.com/docker/docker/image" - -import ( - "encoding/json" - "errors" - "io" - "runtime" - "strings" - "time" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/layer" - "github.com/opencontainers/go-digest" -) - -// ID is the content-addressable ID of an image. -type ID digest.Digest - -func (id ID) String() string { - return id.Digest().String() -} - -// Digest converts ID into a digest -func (id ID) Digest() digest.Digest { - return digest.Digest(id) -} - -// IDFromDigest creates an ID from a digest -func IDFromDigest(digest digest.Digest) ID { - return ID(digest) -} - -// V1Image stores the V1 image configuration. -type V1Image struct { - // ID is a unique 64 character identifier of the image - ID string `json:"id,omitempty"` - // Parent is the ID of the parent image - Parent string `json:"parent,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Container is the id of the container used to commit - Container string `json:"container,omitempty"` - // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig container.Config `json:"container_config,omitempty"` - // DockerVersion specifies the version of Docker that was used to build the image - DockerVersion string `json:"docker_version,omitempty"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *container.Config `json:"config,omitempty"` - // Architecture is the hardware that the image is built and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` - // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` -} - -// Image stores the image configuration -type Image struct { - V1Image - Parent ID `json:"parent,omitempty"` - RootFS *RootFS `json:"rootfs,omitempty"` - History []History `json:"history,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - - // rawJSON caches the immutable JSON associated with this image. - rawJSON []byte - - // computedID is the ID computed from the hash of the image config. - // Not to be confused with the legacy V1 ID in V1Image. - computedID ID -} - -// RawJSON returns the immutable JSON associated with the image. -func (img *Image) RawJSON() []byte { - return img.rawJSON -} - -// ID returns the image's content-addressable ID. -func (img *Image) ID() ID { - return img.computedID -} - -// ImageID stringifies ID. -func (img *Image) ImageID() string { - return img.ID().String() -} - -// RunConfig returns the image's container config. -func (img *Image) RunConfig() *container.Config { - return img.Config -} - -// BaseImgArch returns the image's architecture. If not populated, defaults to the host runtime arch. -func (img *Image) BaseImgArch() string { - arch := img.Architecture - if arch == "" { - arch = runtime.GOARCH - } - return arch -} - -// OperatingSystem returns the image's operating system. If not populated, defaults to the host runtime OS. -func (img *Image) OperatingSystem() string { - os := img.OS - if os == "" { - os = runtime.GOOS - } - return os -} - -// MarshalJSON serializes the image to JSON. It sorts the top-level keys so -// that JSON that's been manipulated by a push/pull cycle with a legacy -// registry won't end up with a different key order. -func (img *Image) MarshalJSON() ([]byte, error) { - type MarshalImage Image - - pass1, err := json.Marshal(MarshalImage(*img)) - if err != nil { - return nil, err - } - - var c map[string]*json.RawMessage - if err := json.Unmarshal(pass1, &c); err != nil { - return nil, err - } - return json.Marshal(c) -} - -// ChildConfig is the configuration to apply to an Image to create a new -// Child image. Other properties of the image are copied from the parent. -type ChildConfig struct { - ContainerID string - Author string - Comment string - DiffID layer.DiffID - ContainerConfig *container.Config - Config *container.Config -} - -// NewChildImage creates a new Image as a child of this image. -func NewChildImage(img *Image, child ChildConfig, platform string) *Image { - isEmptyLayer := layer.IsEmpty(child.DiffID) - var rootFS *RootFS - if img.RootFS != nil { - rootFS = img.RootFS.Clone() - } else { - rootFS = NewRootFS() - } - - if !isEmptyLayer { - rootFS.Append(child.DiffID) - } - imgHistory := NewHistory( - child.Author, - child.Comment, - strings.Join(child.ContainerConfig.Cmd, " "), - isEmptyLayer) - - return &Image{ - V1Image: V1Image{ - DockerVersion: dockerversion.Version, - Config: child.Config, - Architecture: img.BaseImgArch(), - OS: platform, - Container: child.ContainerID, - ContainerConfig: *child.ContainerConfig, - Author: child.Author, - Created: imgHistory.Created, - }, - RootFS: rootFS, - History: append(img.History, imgHistory), - OSFeatures: img.OSFeatures, - OSVersion: img.OSVersion, - } -} - -// History stores build commands that were used to create an image -type History struct { - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building the image - CreatedBy string `json:"created_by,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // EmptyLayer is set to true if this history item did not generate a - // layer. Otherwise, the history item is associated with the next - // layer in the RootFS section. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// NewHistory creates a new history struct from arguments, and sets the created -// time to the current time in UTC -func NewHistory(author, comment, createdBy string, isEmptyLayer bool) History { - return History{ - Author: author, - Created: time.Now().UTC(), - CreatedBy: createdBy, - Comment: comment, - EmptyLayer: isEmptyLayer, - } -} - -// Exporter provides interface for loading and saving images -type Exporter interface { - Load(io.ReadCloser, io.Writer, bool) error - // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error - Save([]string, io.Writer) error -} - -// NewFromJSON creates an Image configuration from json. -func NewFromJSON(src []byte) (*Image, error) { - img := &Image{} - - if err := json.Unmarshal(src, img); err != nil { - return nil, err - } - if img.RootFS == nil { - return nil, errors.New("invalid image JSON, no RootFS key") - } - - img.rawJSON = src - - return img, nil -} diff --git a/vendor/github.com/docker/docker/image/rootfs.go b/vendor/github.com/docker/docker/image/rootfs.go deleted file mode 100644 index 84843e10c..000000000 --- a/vendor/github.com/docker/docker/image/rootfs.go +++ /dev/null @@ -1,52 +0,0 @@ -package image // import "github.com/docker/docker/image" - -import ( - "runtime" - - "github.com/docker/docker/layer" - "github.com/sirupsen/logrus" -) - -// TypeLayers is used for RootFS.Type for filesystems organized into layers. -const TypeLayers = "layers" - -// typeLayersWithBase is an older format used by Windows up to v1.12. We -// explicitly handle this as an error case to ensure that a daemon which still -// has an older image like this on disk can still start, even though the -// image itself is not usable. See https://github.com/docker/docker/pull/25806. -const typeLayersWithBase = "layers+base" - -// RootFS describes images root filesystem -// This is currently a placeholder that only supports layers. In the future -// this can be made into an interface that supports different implementations. -type RootFS struct { - Type string `json:"type"` - DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` -} - -// NewRootFS returns empty RootFS struct -func NewRootFS() *RootFS { - return &RootFS{Type: TypeLayers} -} - -// Append appends a new diffID to rootfs -func (r *RootFS) Append(id layer.DiffID) { - r.DiffIDs = append(r.DiffIDs, id) -} - -// Clone returns a copy of the RootFS -func (r *RootFS) Clone() *RootFS { - newRoot := NewRootFS() - newRoot.Type = r.Type - newRoot.DiffIDs = append(r.DiffIDs) - return newRoot -} - -// ChainID returns the ChainID for the top layer in RootFS. -func (r *RootFS) ChainID() layer.ChainID { - if runtime.GOOS == "windows" && r.Type == typeLayersWithBase { - logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs) - return "" - } - return layer.CreateChainID(r.DiffIDs) -} diff --git a/vendor/github.com/docker/docker/image/store.go b/vendor/github.com/docker/docker/image/store.go deleted file mode 100644 index 9fd7d7dcf..000000000 --- a/vendor/github.com/docker/docker/image/store.go +++ /dev/null @@ -1,345 +0,0 @@ -package image // import "github.com/docker/docker/image" - -import ( - "encoding/json" - "fmt" - "sync" - "time" - - "github.com/docker/distribution/digestset" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/system" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// Store is an interface for creating and accessing images -type Store interface { - Create(config []byte) (ID, error) - Get(id ID) (*Image, error) - Delete(id ID) ([]layer.Metadata, error) - Search(partialID string) (ID, error) - SetParent(id ID, parent ID) error - GetParent(id ID) (ID, error) - SetLastUpdated(id ID) error - GetLastUpdated(id ID) (time.Time, error) - Children(id ID) []ID - Map() map[ID]*Image - Heads() map[ID]*Image - Len() int -} - -// LayerGetReleaser is a minimal interface for getting and releasing images. -type LayerGetReleaser interface { - Get(layer.ChainID) (layer.Layer, error) - Release(layer.Layer) ([]layer.Metadata, error) -} - -type imageMeta struct { - layer layer.Layer - children map[ID]struct{} -} - -type store struct { - sync.RWMutex - lss map[string]LayerGetReleaser - images map[ID]*imageMeta - fs StoreBackend - digestSet *digestset.Set -} - -// NewImageStore returns new store object for given set of layer stores -func NewImageStore(fs StoreBackend, lss map[string]LayerGetReleaser) (Store, error) { - is := &store{ - lss: lss, - images: make(map[ID]*imageMeta), - fs: fs, - digestSet: digestset.NewSet(), - } - - // load all current images and retain layers - if err := is.restore(); err != nil { - return nil, err - } - - return is, nil -} - -func (is *store) restore() error { - err := is.fs.Walk(func(dgst digest.Digest) error { - img, err := is.Get(IDFromDigest(dgst)) - if err != nil { - logrus.Errorf("invalid image %v, %v", dgst, err) - return nil - } - var l layer.Layer - if chainID := img.RootFS.ChainID(); chainID != "" { - if !system.IsOSSupported(img.OperatingSystem()) { - return system.ErrNotSupportedOperatingSystem - } - l, err = is.lss[img.OperatingSystem()].Get(chainID) - if err != nil { - if err == layer.ErrLayerDoesNotExist { - logrus.Errorf("layer does not exist, not restoring image %v, %v, %s", dgst, chainID, img.OperatingSystem()) - return nil - } - return err - } - } - if err := is.digestSet.Add(dgst); err != nil { - return err - } - - imageMeta := &imageMeta{ - layer: l, - children: make(map[ID]struct{}), - } - - is.images[IDFromDigest(dgst)] = imageMeta - - return nil - }) - if err != nil { - return err - } - - // Second pass to fill in children maps - for id := range is.images { - if parent, err := is.GetParent(id); err == nil { - if parentMeta := is.images[parent]; parentMeta != nil { - parentMeta.children[id] = struct{}{} - } - } - } - - return nil -} - -func (is *store) Create(config []byte) (ID, error) { - var img Image - err := json.Unmarshal(config, &img) - if err != nil { - return "", err - } - - // Must reject any config that references diffIDs from the history - // which aren't among the rootfs layers. - rootFSLayers := make(map[layer.DiffID]struct{}) - for _, diffID := range img.RootFS.DiffIDs { - rootFSLayers[diffID] = struct{}{} - } - - layerCounter := 0 - for _, h := range img.History { - if !h.EmptyLayer { - layerCounter++ - } - } - if layerCounter > len(img.RootFS.DiffIDs) { - return "", errors.New("too many non-empty layers in History section") - } - - dgst, err := is.fs.Set(config) - if err != nil { - return "", err - } - imageID := IDFromDigest(dgst) - - is.Lock() - defer is.Unlock() - - if _, exists := is.images[imageID]; exists { - return imageID, nil - } - - layerID := img.RootFS.ChainID() - - var l layer.Layer - if layerID != "" { - if !system.IsOSSupported(img.OperatingSystem()) { - return "", system.ErrNotSupportedOperatingSystem - } - l, err = is.lss[img.OperatingSystem()].Get(layerID) - if err != nil { - return "", errors.Wrapf(err, "failed to get layer %s", layerID) - } - } - - imageMeta := &imageMeta{ - layer: l, - children: make(map[ID]struct{}), - } - - is.images[imageID] = imageMeta - if err := is.digestSet.Add(imageID.Digest()); err != nil { - delete(is.images, imageID) - return "", err - } - - return imageID, nil -} - -type imageNotFoundError string - -func (e imageNotFoundError) Error() string { - return "No such image: " + string(e) -} - -func (imageNotFoundError) NotFound() {} - -func (is *store) Search(term string) (ID, error) { - dgst, err := is.digestSet.Lookup(term) - if err != nil { - if err == digestset.ErrDigestNotFound { - err = imageNotFoundError(term) - } - return "", errors.WithStack(err) - } - return IDFromDigest(dgst), nil -} - -func (is *store) Get(id ID) (*Image, error) { - // todo: Check if image is in images - // todo: Detect manual insertions and start using them - config, err := is.fs.Get(id.Digest()) - if err != nil { - return nil, err - } - - img, err := NewFromJSON(config) - if err != nil { - return nil, err - } - img.computedID = id - - img.Parent, err = is.GetParent(id) - if err != nil { - img.Parent = "" - } - - return img, nil -} - -func (is *store) Delete(id ID) ([]layer.Metadata, error) { - is.Lock() - defer is.Unlock() - - imageMeta := is.images[id] - if imageMeta == nil { - return nil, fmt.Errorf("unrecognized image ID %s", id.String()) - } - img, err := is.Get(id) - if err != nil { - return nil, fmt.Errorf("unrecognized image %s, %v", id.String(), err) - } - if !system.IsOSSupported(img.OperatingSystem()) { - return nil, fmt.Errorf("unsupported image operating system %q", img.OperatingSystem()) - } - for id := range imageMeta.children { - is.fs.DeleteMetadata(id.Digest(), "parent") - } - if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { - delete(is.images[parent].children, id) - } - - if err := is.digestSet.Remove(id.Digest()); err != nil { - logrus.Errorf("error removing %s from digest set: %q", id, err) - } - delete(is.images, id) - is.fs.Delete(id.Digest()) - - if imageMeta.layer != nil { - return is.lss[img.OperatingSystem()].Release(imageMeta.layer) - } - return nil, nil -} - -func (is *store) SetParent(id, parent ID) error { - is.Lock() - defer is.Unlock() - parentMeta := is.images[parent] - if parentMeta == nil { - return fmt.Errorf("unknown parent image ID %s", parent.String()) - } - if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { - delete(is.images[parent].children, id) - } - parentMeta.children[id] = struct{}{} - return is.fs.SetMetadata(id.Digest(), "parent", []byte(parent)) -} - -func (is *store) GetParent(id ID) (ID, error) { - d, err := is.fs.GetMetadata(id.Digest(), "parent") - if err != nil { - return "", err - } - return ID(d), nil // todo: validate? -} - -// SetLastUpdated time for the image ID to the current time -func (is *store) SetLastUpdated(id ID) error { - lastUpdated := []byte(time.Now().Format(time.RFC3339Nano)) - return is.fs.SetMetadata(id.Digest(), "lastUpdated", lastUpdated) -} - -// GetLastUpdated time for the image ID -func (is *store) GetLastUpdated(id ID) (time.Time, error) { - bytes, err := is.fs.GetMetadata(id.Digest(), "lastUpdated") - if err != nil || len(bytes) == 0 { - // No lastUpdated time - return time.Time{}, nil - } - return time.Parse(time.RFC3339Nano, string(bytes)) -} - -func (is *store) Children(id ID) []ID { - is.RLock() - defer is.RUnlock() - - return is.children(id) -} - -func (is *store) children(id ID) []ID { - var ids []ID - if is.images[id] != nil { - for id := range is.images[id].children { - ids = append(ids, id) - } - } - return ids -} - -func (is *store) Heads() map[ID]*Image { - return is.imagesMap(false) -} - -func (is *store) Map() map[ID]*Image { - return is.imagesMap(true) -} - -func (is *store) imagesMap(all bool) map[ID]*Image { - is.RLock() - defer is.RUnlock() - - images := make(map[ID]*Image) - - for id := range is.images { - if !all && len(is.children(id)) > 0 { - continue - } - img, err := is.Get(id) - if err != nil { - logrus.Errorf("invalid image access: %q, error: %q", id, err) - continue - } - images[id] = img - } - return images -} - -func (is *store) Len() int { - is.RLock() - defer is.RUnlock() - return len(is.images) -} diff --git a/vendor/github.com/docker/docker/image/tarexport/load.go b/vendor/github.com/docker/docker/image/tarexport/load.go deleted file mode 100644 index c89dd08f9..000000000 --- a/vendor/github.com/docker/docker/image/tarexport/load.go +++ /dev/null @@ -1,429 +0,0 @@ -package tarexport // import "github.com/docker/docker/image/tarexport" - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "runtime" - - "github.com/docker/distribution" - "github.com/docker/distribution/reference" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/system" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - var progressOutput progress.Output - if !quiet { - progressOutput = streamformatter.NewJSONProgressOutput(outStream, false) - } - outStream = streamformatter.NewStdoutWriter(outStream) - - tmpDir, err := ioutil.TempDir("", "docker-import-") - if err != nil { - return err - } - defer os.RemoveAll(tmpDir) - - if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil { - return err - } - // read manifest, if no file then load in legacy mode - manifestPath, err := safePath(tmpDir, manifestFileName) - if err != nil { - return err - } - manifestFile, err := os.Open(manifestPath) - if err != nil { - if os.IsNotExist(err) { - return l.legacyLoad(tmpDir, outStream, progressOutput) - } - return err - } - defer manifestFile.Close() - - var manifest []manifestItem - if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil { - return err - } - - var parentLinks []parentLink - var imageIDsStr string - var imageRefCount int - - for _, m := range manifest { - configPath, err := safePath(tmpDir, m.Config) - if err != nil { - return err - } - config, err := ioutil.ReadFile(configPath) - if err != nil { - return err - } - img, err := image.NewFromJSON(config) - if err != nil { - return err - } - if err := checkCompatibleOS(img.OS); err != nil { - return err - } - rootFS := *img.RootFS - rootFS.DiffIDs = nil - - if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { - return fmt.Errorf("invalid manifest, layers length mismatch: expected %d, got %d", expected, actual) - } - - // On Windows, validate the platform, defaulting to windows if not present. - os := img.OS - if os == "" { - os = runtime.GOOS - } - if runtime.GOOS == "windows" { - if (os != "windows") && (os != "linux") { - return fmt.Errorf("configuration for this image has an unsupported operating system: %s", os) - } - } - - for i, diffID := range img.RootFS.DiffIDs { - layerPath, err := safePath(tmpDir, m.Layers[i]) - if err != nil { - return err - } - r := rootFS - r.Append(diffID) - newLayer, err := l.lss[os].Get(r.ChainID()) - if err != nil { - newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), os, m.LayerSources[diffID], progressOutput) - if err != nil { - return err - } - } - defer layer.ReleaseAndLog(l.lss[os], newLayer) - if expected, actual := diffID, newLayer.DiffID(); expected != actual { - return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) - } - rootFS.Append(diffID) - } - - imgID, err := l.is.Create(config) - if err != nil { - return err - } - imageIDsStr += fmt.Sprintf("Loaded image ID: %s\n", imgID) - - imageRefCount = 0 - for _, repoTag := range m.RepoTags { - named, err := reference.ParseNormalizedNamed(repoTag) - if err != nil { - return err - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid tag %q", repoTag) - } - l.setLoadedTag(ref, imgID.Digest(), outStream) - outStream.Write([]byte(fmt.Sprintf("Loaded image: %s\n", reference.FamiliarString(ref)))) - imageRefCount++ - } - - parentLinks = append(parentLinks, parentLink{imgID, m.Parent}) - l.loggerImgEvent.LogImageEvent(imgID.String(), imgID.String(), "load") - } - - for _, p := range validatedParentLinks(parentLinks) { - if p.parentID != "" { - if err := l.setParentID(p.id, p.parentID); err != nil { - return err - } - } - } - - if imageRefCount == 0 { - outStream.Write([]byte(imageIDsStr)) - } - - return nil -} - -func (l *tarexporter) setParentID(id, parentID image.ID) error { - img, err := l.is.Get(id) - if err != nil { - return err - } - parent, err := l.is.Get(parentID) - if err != nil { - return err - } - if !checkValidParent(img, parent) { - return fmt.Errorf("image %v is not a valid parent for %v", parent.ID(), img.ID()) - } - return l.is.SetParent(id, parentID) -} - -func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, os string, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) { - // We use system.OpenSequential to use sequential file access on Windows, avoiding - // depleting the standby list. On Linux, this equates to a regular os.Open. - rawTar, err := system.OpenSequential(filename) - if err != nil { - logrus.Debugf("Error reading embedded tar: %v", err) - return nil, err - } - defer rawTar.Close() - - var r io.Reader - if progressOutput != nil { - fileInfo, err := rawTar.Stat() - if err != nil { - logrus.Debugf("Error statting file: %v", err) - return nil, err - } - - r = progress.NewProgressReader(rawTar, progressOutput, fileInfo.Size(), stringid.TruncateID(id), "Loading layer") - } else { - r = rawTar - } - - inflatedLayerData, err := archive.DecompressStream(r) - if err != nil { - return nil, err - } - defer inflatedLayerData.Close() - - if ds, ok := l.lss[os].(layer.DescribableStore); ok { - return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), foreignSrc) - } - return l.lss[os].Register(inflatedLayerData, rootFS.ChainID()) -} - -func (l *tarexporter) setLoadedTag(ref reference.Named, imgID digest.Digest, outStream io.Writer) error { - if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID { - fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", reference.FamiliarString(ref), string(prevID)) // todo: this message is wrong in case of multiple tags - } - - return l.rs.AddTag(ref, imgID, true) -} - -func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOutput progress.Output) error { - if runtime.GOOS == "windows" { - return errors.New("Windows does not support legacy loading of images") - } - - legacyLoadedMap := make(map[string]image.ID) - - dirs, err := ioutil.ReadDir(tmpDir) - if err != nil { - return err - } - - // every dir represents an image - for _, d := range dirs { - if d.IsDir() { - if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap, progressOutput); err != nil { - return err - } - } - } - - // load tags from repositories file - repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName) - if err != nil { - return err - } - repositoriesFile, err := os.Open(repositoriesPath) - if err != nil { - return err - } - defer repositoriesFile.Close() - - repositories := make(map[string]map[string]string) - if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil { - return err - } - - for name, tagMap := range repositories { - for tag, oldID := range tagMap { - imgID, ok := legacyLoadedMap[oldID] - if !ok { - return fmt.Errorf("invalid target ID: %v", oldID) - } - named, err := reference.ParseNormalizedNamed(name) - if err != nil { - return err - } - ref, err := reference.WithTag(named, tag) - if err != nil { - return err - } - l.setLoadedTag(ref, imgID.Digest(), outStream) - } - } - - return nil -} - -func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID, progressOutput progress.Output) error { - if _, loaded := loadedMap[oldID]; loaded { - return nil - } - configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) - if err != nil { - return err - } - imageJSON, err := ioutil.ReadFile(configPath) - if err != nil { - logrus.Debugf("Error reading json: %v", err) - return err - } - - var img struct { - OS string - Parent string - } - if err := json.Unmarshal(imageJSON, &img); err != nil { - return err - } - - if err := checkCompatibleOS(img.OS); err != nil { - return err - } - if img.OS == "" { - img.OS = runtime.GOOS - } - - var parentID image.ID - if img.Parent != "" { - for { - var loaded bool - if parentID, loaded = loadedMap[img.Parent]; !loaded { - if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap, progressOutput); err != nil { - return err - } - } else { - break - } - } - } - - // todo: try to connect with migrate code - rootFS := image.NewRootFS() - var history []image.History - - if parentID != "" { - parentImg, err := l.is.Get(parentID) - if err != nil { - return err - } - - rootFS = parentImg.RootFS - history = parentImg.History - } - - layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) - if err != nil { - return err - } - newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, img.OS, distribution.Descriptor{}, progressOutput) - if err != nil { - return err - } - rootFS.Append(newLayer.DiffID()) - - h, err := v1.HistoryFromConfig(imageJSON, false) - if err != nil { - return err - } - history = append(history, h) - - config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) - if err != nil { - return err - } - imgID, err := l.is.Create(config) - if err != nil { - return err - } - - metadata, err := l.lss[img.OS].Release(newLayer) - layer.LogReleaseMetadata(metadata) - if err != nil { - return err - } - - if parentID != "" { - if err := l.is.SetParent(imgID, parentID); err != nil { - return err - } - } - - loadedMap[oldID] = imgID - return nil -} - -func safePath(base, path string) (string, error) { - return symlink.FollowSymlinkInScope(filepath.Join(base, path), base) -} - -type parentLink struct { - id, parentID image.ID -} - -func validatedParentLinks(pl []parentLink) (ret []parentLink) { -mainloop: - for i, p := range pl { - ret = append(ret, p) - for _, p2 := range pl { - if p2.id == p.parentID && p2.id != p.id { - continue mainloop - } - } - ret[i].parentID = "" - } - return -} - -func checkValidParent(img, parent *image.Image) bool { - if len(img.History) == 0 && len(parent.History) == 0 { - return true // having history is not mandatory - } - if len(img.History)-len(parent.History) != 1 { - return false - } - for i, h := range parent.History { - if !reflect.DeepEqual(h, img.History[i]) { - return false - } - } - return true -} - -func checkCompatibleOS(imageOS string) error { - // always compatible if the images OS matches the host OS; also match an empty image OS - if imageOS == runtime.GOOS || imageOS == "" { - return nil - } - // On non-Windows hosts, for compatibility, fail if the image is Windows. - if runtime.GOOS != "windows" && imageOS == "windows" { - return fmt.Errorf("cannot load %s image on %s", imageOS, runtime.GOOS) - } - // Finally, check the image OS is supported for the platform. - if err := system.ValidatePlatform(system.ParsePlatform(imageOS)); err != nil { - return fmt.Errorf("cannot load %s image on %s: %s", imageOS, runtime.GOOS, err) - } - return nil -} diff --git a/vendor/github.com/docker/docker/image/tarexport/save.go b/vendor/github.com/docker/docker/image/tarexport/save.go deleted file mode 100644 index 4e734b350..000000000 --- a/vendor/github.com/docker/docker/image/tarexport/save.go +++ /dev/null @@ -1,431 +0,0 @@ -package tarexport // import "github.com/docker/docker/image/tarexport" - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "runtime" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/reference" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/system" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type imageDescriptor struct { - refs []reference.NamedTagged - layers []string - image *image.Image - layerRef layer.Layer -} - -type saveSession struct { - *tarexporter - outDir string - images map[image.ID]*imageDescriptor - savedLayers map[string]struct{} - diffIDPaths map[layer.DiffID]string // cache every diffID blob to avoid duplicates -} - -func (l *tarexporter) Save(names []string, outStream io.Writer) error { - images, err := l.parseNames(names) - if err != nil { - return err - } - - // Release all the image top layer references - defer l.releaseLayerReferences(images) - return (&saveSession{tarexporter: l, images: images}).save(outStream) -} - -// parseNames will parse the image names to a map which contains image.ID to *imageDescriptor. -// Each imageDescriptor holds an image top layer reference named 'layerRef'. It is taken here, should be released later. -func (l *tarexporter) parseNames(names []string) (desc map[image.ID]*imageDescriptor, rErr error) { - imgDescr := make(map[image.ID]*imageDescriptor) - defer func() { - if rErr != nil { - l.releaseLayerReferences(imgDescr) - } - }() - - addAssoc := func(id image.ID, ref reference.Named) error { - if _, ok := imgDescr[id]; !ok { - descr := &imageDescriptor{} - if err := l.takeLayerReference(id, descr); err != nil { - return err - } - imgDescr[id] = descr - } - - if ref != nil { - if _, ok := ref.(reference.Canonical); ok { - return nil - } - tagged, ok := reference.TagNameOnly(ref).(reference.NamedTagged) - if !ok { - return nil - } - - for _, t := range imgDescr[id].refs { - if tagged.String() == t.String() { - return nil - } - } - imgDescr[id].refs = append(imgDescr[id].refs, tagged) - } - return nil - } - - for _, name := range names { - ref, err := reference.ParseAnyReference(name) - if err != nil { - return nil, err - } - namedRef, ok := ref.(reference.Named) - if !ok { - // Check if digest ID reference - if digested, ok := ref.(reference.Digested); ok { - id := image.IDFromDigest(digested.Digest()) - if err := addAssoc(id, nil); err != nil { - return nil, err - } - continue - } - return nil, errors.Errorf("invalid reference: %v", name) - } - - if reference.FamiliarName(namedRef) == string(digest.Canonical) { - imgID, err := l.is.Search(name) - if err != nil { - return nil, err - } - if err := addAssoc(imgID, nil); err != nil { - return nil, err - } - continue - } - if reference.IsNameOnly(namedRef) { - assocs := l.rs.ReferencesByName(namedRef) - for _, assoc := range assocs { - if err := addAssoc(image.IDFromDigest(assoc.ID), assoc.Ref); err != nil { - return nil, err - } - } - if len(assocs) == 0 { - imgID, err := l.is.Search(name) - if err != nil { - return nil, err - } - if err := addAssoc(imgID, nil); err != nil { - return nil, err - } - } - continue - } - id, err := l.rs.Get(namedRef) - if err != nil { - return nil, err - } - if err := addAssoc(image.IDFromDigest(id), namedRef); err != nil { - return nil, err - } - - } - return imgDescr, nil -} - -// takeLayerReference will take/Get the image top layer reference -func (l *tarexporter) takeLayerReference(id image.ID, imgDescr *imageDescriptor) error { - img, err := l.is.Get(id) - if err != nil { - return err - } - imgDescr.image = img - topLayerID := img.RootFS.ChainID() - if topLayerID == "" { - return nil - } - os := img.OS - if os == "" { - os = runtime.GOOS - } - if !system.IsOSSupported(os) { - return fmt.Errorf("os %q is not supported", os) - } - layer, err := l.lss[os].Get(topLayerID) - if err != nil { - return err - } - imgDescr.layerRef = layer - return nil -} - -// releaseLayerReferences will release all the image top layer references -func (l *tarexporter) releaseLayerReferences(imgDescr map[image.ID]*imageDescriptor) error { - for _, descr := range imgDescr { - if descr.layerRef != nil { - os := descr.image.OS - if os == "" { - os = runtime.GOOS - } - l.lss[os].Release(descr.layerRef) - } - } - return nil -} - -func (s *saveSession) save(outStream io.Writer) error { - s.savedLayers = make(map[string]struct{}) - s.diffIDPaths = make(map[layer.DiffID]string) - - // get image json - tempDir, err := ioutil.TempDir("", "docker-export-") - if err != nil { - return err - } - defer os.RemoveAll(tempDir) - - s.outDir = tempDir - reposLegacy := make(map[string]map[string]string) - - var manifest []manifestItem - var parentLinks []parentLink - - for id, imageDescr := range s.images { - foreignSrcs, err := s.saveImage(id) - if err != nil { - return err - } - - var repoTags []string - var layers []string - - for _, ref := range imageDescr.refs { - familiarName := reference.FamiliarName(ref) - if _, ok := reposLegacy[familiarName]; !ok { - reposLegacy[familiarName] = make(map[string]string) - } - reposLegacy[familiarName][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] - repoTags = append(repoTags, reference.FamiliarString(ref)) - } - - for _, l := range imageDescr.layers { - // IMPORTANT: We use path, not filepath here to ensure the layers - // in the manifest use Unix-style forward-slashes. Otherwise, a - // Linux image saved from LCOW won't be able to be imported on - // LCOL. - layers = append(layers, path.Join(l, legacyLayerFileName)) - } - - manifest = append(manifest, manifestItem{ - Config: id.Digest().Hex() + ".json", - RepoTags: repoTags, - Layers: layers, - LayerSources: foreignSrcs, - }) - - parentID, _ := s.is.GetParent(id) - parentLinks = append(parentLinks, parentLink{id, parentID}) - s.tarexporter.loggerImgEvent.LogImageEvent(id.String(), id.String(), "save") - } - - for i, p := range validatedParentLinks(parentLinks) { - if p.parentID != "" { - manifest[i].Parent = p.parentID - } - } - - if len(reposLegacy) > 0 { - reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) - rf, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return err - } - - if err := json.NewEncoder(rf).Encode(reposLegacy); err != nil { - rf.Close() - return err - } - - rf.Close() - - if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { - return err - } - } - - manifestFileName := filepath.Join(tempDir, manifestFileName) - f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return err - } - - if err := json.NewEncoder(f).Encode(manifest); err != nil { - f.Close() - return err - } - - f.Close() - - if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { - return err - } - - fs, err := archive.Tar(tempDir, archive.Uncompressed) - if err != nil { - return err - } - defer fs.Close() - - _, err = io.Copy(outStream, fs) - return err -} - -func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Descriptor, error) { - img := s.images[id].image - if len(img.RootFS.DiffIDs) == 0 { - return nil, fmt.Errorf("empty export - not implemented") - } - - var parent digest.Digest - var layers []string - var foreignSrcs map[layer.DiffID]distribution.Descriptor - for i := range img.RootFS.DiffIDs { - v1Img := image.V1Image{ - // This is for backward compatibility used for - // pre v1.9 docker. - Created: time.Unix(0, 0), - } - if i == len(img.RootFS.DiffIDs)-1 { - v1Img = img.V1Image - } - rootFS := *img.RootFS - rootFS.DiffIDs = rootFS.DiffIDs[:i+1] - v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) - if err != nil { - return nil, err - } - - v1Img.ID = v1ID.Hex() - if parent != "" { - v1Img.Parent = parent.Hex() - } - - v1Img.OS = img.OS - src, err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created) - if err != nil { - return nil, err - } - layers = append(layers, v1Img.ID) - parent = v1ID - if src.Digest != "" { - if foreignSrcs == nil { - foreignSrcs = make(map[layer.DiffID]distribution.Descriptor) - } - foreignSrcs[img.RootFS.DiffIDs[i]] = src - } - } - - configFile := filepath.Join(s.outDir, id.Digest().Hex()+".json") - if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { - return nil, err - } - if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { - return nil, err - } - - s.images[id].layers = layers - return foreignSrcs, nil -} - -func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) (distribution.Descriptor, error) { - if _, exists := s.savedLayers[legacyImg.ID]; exists { - return distribution.Descriptor{}, nil - } - - outDir := filepath.Join(s.outDir, legacyImg.ID) - if err := os.Mkdir(outDir, 0755); err != nil { - return distribution.Descriptor{}, err - } - - // todo: why is this version file here? - if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { - return distribution.Descriptor{}, err - } - - imageConfig, err := json.Marshal(legacyImg) - if err != nil { - return distribution.Descriptor{}, err - } - - if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { - return distribution.Descriptor{}, err - } - - // serialize filesystem - layerPath := filepath.Join(outDir, legacyLayerFileName) - operatingSystem := legacyImg.OS - if operatingSystem == "" { - operatingSystem = runtime.GOOS - } - l, err := s.lss[operatingSystem].Get(id) - if err != nil { - return distribution.Descriptor{}, err - } - defer layer.ReleaseAndLog(s.lss[operatingSystem], l) - - if oldPath, exists := s.diffIDPaths[l.DiffID()]; exists { - relPath, err := filepath.Rel(outDir, oldPath) - if err != nil { - return distribution.Descriptor{}, err - } - if err := os.Symlink(relPath, layerPath); err != nil { - return distribution.Descriptor{}, errors.Wrap(err, "error creating symlink while saving layer") - } - } else { - // Use system.CreateSequential rather than os.Create. This ensures sequential - // file access on Windows to avoid eating into MM standby list. - // On Linux, this equates to a regular os.Create. - tarFile, err := system.CreateSequential(layerPath) - if err != nil { - return distribution.Descriptor{}, err - } - defer tarFile.Close() - - arch, err := l.TarStream() - if err != nil { - return distribution.Descriptor{}, err - } - defer arch.Close() - - if _, err := io.Copy(tarFile, arch); err != nil { - return distribution.Descriptor{}, err - } - - for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { - // todo: maybe save layer created timestamp? - if err := system.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { - return distribution.Descriptor{}, err - } - } - - s.diffIDPaths[l.DiffID()] = layerPath - } - s.savedLayers[legacyImg.ID] = struct{}{} - - var src distribution.Descriptor - if fs, ok := l.(distribution.Describable); ok { - src = fs.Descriptor() - } - return src, nil -} diff --git a/vendor/github.com/docker/docker/image/tarexport/tarexport.go b/vendor/github.com/docker/docker/image/tarexport/tarexport.go deleted file mode 100644 index beff668cd..000000000 --- a/vendor/github.com/docker/docker/image/tarexport/tarexport.go +++ /dev/null @@ -1,47 +0,0 @@ -package tarexport // import "github.com/docker/docker/image/tarexport" - -import ( - "github.com/docker/distribution" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - refstore "github.com/docker/docker/reference" -) - -const ( - manifestFileName = "manifest.json" - legacyLayerFileName = "layer.tar" - legacyConfigFileName = "json" - legacyVersionFileName = "VERSION" - legacyRepositoriesFileName = "repositories" -) - -type manifestItem struct { - Config string - RepoTags []string - Layers []string - Parent image.ID `json:",omitempty"` - LayerSources map[layer.DiffID]distribution.Descriptor `json:",omitempty"` -} - -type tarexporter struct { - is image.Store - lss map[string]layer.Store - rs refstore.Store - loggerImgEvent LogImageEvent -} - -// LogImageEvent defines interface for event generation related to image tar(load and save) operations -type LogImageEvent interface { - //LogImageEvent generates an event related to an image operation - LogImageEvent(imageID, refName, action string) -} - -// NewTarExporter returns new Exporter for tar packages -func NewTarExporter(is image.Store, lss map[string]layer.Store, rs refstore.Store, loggerImgEvent LogImageEvent) image.Exporter { - return &tarexporter{ - is: is, - lss: lss, - rs: rs, - loggerImgEvent: loggerImgEvent, - } -} diff --git a/vendor/github.com/docker/docker/image/v1/imagev1.go b/vendor/github.com/docker/docker/image/v1/imagev1.go deleted file mode 100644 index c341ceaa7..000000000 --- a/vendor/github.com/docker/docker/image/v1/imagev1.go +++ /dev/null @@ -1,150 +0,0 @@ -package v1 // import "github.com/docker/docker/image/v1" - -import ( - "encoding/json" - "reflect" - "strings" - - "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/stringid" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -// noFallbackMinVersion is the minimum version for which v1compatibility -// information will not be marshaled through the Image struct to remove -// blank fields. -var noFallbackMinVersion = "1.8.3" - -// HistoryFromConfig creates a History struct from v1 configuration JSON -func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { - h := image.History{} - var v1Image image.V1Image - if err := json.Unmarshal(imageJSON, &v1Image); err != nil { - return h, err - } - - return image.History{ - Author: v1Image.Author, - Created: v1Image.Created, - CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "), - Comment: v1Image.Comment, - EmptyLayer: emptyLayer, - }, nil -} - -// CreateID creates an ID from v1 image, layerID and parent ID. -// Used for backwards compatibility with old clients. -func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { - v1Image.ID = "" - v1JSON, err := json.Marshal(v1Image) - if err != nil { - return "", err - } - - var config map[string]*json.RawMessage - if err := json.Unmarshal(v1JSON, &config); err != nil { - return "", err - } - - // FIXME: note that this is slightly incompatible with RootFS logic - config["layer_id"] = rawJSON(layerID) - if parent != "" { - config["parent"] = rawJSON(parent) - } - - configJSON, err := json.Marshal(config) - if err != nil { - return "", err - } - logrus.Debugf("CreateV1ID %s", configJSON) - - return digest.FromBytes(configJSON), nil -} - -// MakeConfigFromV1Config creates an image config from the legacy V1 config format. -func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { - var dver struct { - DockerVersion string `json:"docker_version"` - } - - if err := json.Unmarshal(imageJSON, &dver); err != nil { - return nil, err - } - - useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion) - - if useFallback { - var v1Image image.V1Image - err := json.Unmarshal(imageJSON, &v1Image) - if err != nil { - return nil, err - } - imageJSON, err = json.Marshal(v1Image) - if err != nil { - return nil, err - } - } - - var c map[string]*json.RawMessage - if err := json.Unmarshal(imageJSON, &c); err != nil { - return nil, err - } - - delete(c, "id") - delete(c, "parent") - delete(c, "Size") // Size is calculated from data on disk and is inconsistent - delete(c, "parent_id") - delete(c, "layer_id") - delete(c, "throwaway") - - c["rootfs"] = rawJSON(rootfs) - c["history"] = rawJSON(history) - - return json.Marshal(c) -} - -// MakeV1ConfigFromConfig creates a legacy V1 image config from an Image struct -func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { - // Top-level v1compatibility string should be a modified version of the - // image config. - var configAsMap map[string]*json.RawMessage - if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { - return nil, err - } - - // Delete fields that didn't exist in old manifest - imageType := reflect.TypeOf(img).Elem() - for i := 0; i < imageType.NumField(); i++ { - f := imageType.Field(i) - jsonName := strings.Split(f.Tag.Get("json"), ",")[0] - // Parent is handled specially below. - if jsonName != "" && jsonName != "parent" { - delete(configAsMap, jsonName) - } - } - configAsMap["id"] = rawJSON(v1ID) - if parentV1ID != "" { - configAsMap["parent"] = rawJSON(parentV1ID) - } - if throwaway { - configAsMap["throwaway"] = rawJSON(true) - } - - return json.Marshal(configAsMap) -} - -func rawJSON(value interface{}) *json.RawMessage { - jsonval, err := json.Marshal(value) - if err != nil { - return nil - } - return (*json.RawMessage)(&jsonval) -} - -// ValidateID checks whether an ID string is a valid image ID. -func ValidateID(id string) error { - return stringid.ValidateID(id) -} diff --git a/vendor/github.com/docker/docker/integration-cli/checker/checker.go b/vendor/github.com/docker/docker/integration-cli/checker/checker.go deleted file mode 100644 index d7fdc412b..000000000 --- a/vendor/github.com/docker/docker/integration-cli/checker/checker.go +++ /dev/null @@ -1,46 +0,0 @@ -// Package checker provides Docker specific implementations of the go-check.Checker interface. -package checker // import "github.com/docker/docker/integration-cli/checker" - -import ( - "github.com/go-check/check" - "github.com/vdemeester/shakers" -) - -// As a commodity, we bring all check.Checker variables into the current namespace to avoid having -// to think about check.X versus checker.X. -var ( - DeepEquals = check.DeepEquals - ErrorMatches = check.ErrorMatches - FitsTypeOf = check.FitsTypeOf - HasLen = check.HasLen - Implements = check.Implements - IsNil = check.IsNil - Matches = check.Matches - Not = check.Not - NotNil = check.NotNil - PanicMatches = check.PanicMatches - Panics = check.Panics - - Contains = shakers.Contains - ContainsAny = shakers.ContainsAny - Count = shakers.Count - Equals = shakers.Equals - EqualFold = shakers.EqualFold - False = shakers.False - GreaterOrEqualThan = shakers.GreaterOrEqualThan - GreaterThan = shakers.GreaterThan - HasPrefix = shakers.HasPrefix - HasSuffix = shakers.HasSuffix - Index = shakers.Index - IndexAny = shakers.IndexAny - IsAfter = shakers.IsAfter - IsBefore = shakers.IsBefore - IsBetween = shakers.IsBetween - IsLower = shakers.IsLower - IsUpper = shakers.IsUpper - LessOrEqualThan = shakers.LessOrEqualThan - LessThan = shakers.LessThan - TimeEquals = shakers.TimeEquals - True = shakers.True - TimeIgnore = shakers.TimeIgnore -) diff --git a/vendor/github.com/docker/docker/integration-cli/cli/build/build.go b/vendor/github.com/docker/docker/integration-cli/cli/build/build.go deleted file mode 100644 index 71048d0d6..000000000 --- a/vendor/github.com/docker/docker/integration-cli/cli/build/build.go +++ /dev/null @@ -1,82 +0,0 @@ -package build // import "github.com/docker/docker/integration-cli/cli/build" - -import ( - "io" - "strings" - - "github.com/docker/docker/internal/test/fakecontext" - "github.com/gotestyourself/gotestyourself/icmd" -) - -type testingT interface { - Fatal(args ...interface{}) - Fatalf(string, ...interface{}) -} - -// WithStdinContext sets the build context from the standard input with the specified reader -func WithStdinContext(closer io.ReadCloser) func(*icmd.Cmd) func() { - return func(cmd *icmd.Cmd) func() { - cmd.Command = append(cmd.Command, "-") - cmd.Stdin = closer - return func() { - // FIXME(vdemeester) we should not ignore the error here… - closer.Close() - } - } -} - -// WithDockerfile creates / returns a CmdOperator to set the Dockerfile for a build operation -func WithDockerfile(dockerfile string) func(*icmd.Cmd) func() { - return func(cmd *icmd.Cmd) func() { - cmd.Command = append(cmd.Command, "-") - cmd.Stdin = strings.NewReader(dockerfile) - return nil - } -} - -// WithoutCache makes the build ignore cache -func WithoutCache(cmd *icmd.Cmd) func() { - cmd.Command = append(cmd.Command, "--no-cache") - return nil -} - -// WithContextPath sets the build context path -func WithContextPath(path string) func(*icmd.Cmd) func() { - return func(cmd *icmd.Cmd) func() { - cmd.Command = append(cmd.Command, path) - return nil - } -} - -// WithExternalBuildContext use the specified context as build context -func WithExternalBuildContext(ctx *fakecontext.Fake) func(*icmd.Cmd) func() { - return func(cmd *icmd.Cmd) func() { - cmd.Dir = ctx.Dir - cmd.Command = append(cmd.Command, ".") - return nil - } -} - -// WithBuildContext sets up the build context -func WithBuildContext(t testingT, contextOperators ...func(*fakecontext.Fake) error) func(*icmd.Cmd) func() { - // FIXME(vdemeester) de-duplicate that - ctx := fakecontext.New(t, "", contextOperators...) - return func(cmd *icmd.Cmd) func() { - cmd.Dir = ctx.Dir - cmd.Command = append(cmd.Command, ".") - return closeBuildContext(t, ctx) - } -} - -// WithFile adds the specified file (with content) in the build context -func WithFile(name, content string) func(*fakecontext.Fake) error { - return fakecontext.WithFile(name, content) -} - -func closeBuildContext(t testingT, ctx *fakecontext.Fake) func() { - return func() { - if err := ctx.Close(); err != nil { - t.Fatal(err) - } - } -} diff --git a/vendor/github.com/docker/docker/integration-cli/cli/cli.go b/vendor/github.com/docker/docker/integration-cli/cli/cli.go deleted file mode 100644 index 17f3fd52c..000000000 --- a/vendor/github.com/docker/docker/integration-cli/cli/cli.go +++ /dev/null @@ -1,226 +0,0 @@ -package cli // import "github.com/docker/docker/integration-cli/cli" - -import ( - "fmt" - "io" - "strings" - "time" - - "github.com/docker/docker/integration-cli/daemon" - "github.com/docker/docker/integration-cli/environment" - "github.com/gotestyourself/gotestyourself/assert" - "github.com/gotestyourself/gotestyourself/icmd" - "github.com/pkg/errors" -) - -var testEnv *environment.Execution - -// SetTestEnvironment sets a static test environment -// TODO: decouple this package from environment -func SetTestEnvironment(env *environment.Execution) { - testEnv = env -} - -// CmdOperator defines functions that can modify a command -type CmdOperator func(*icmd.Cmd) func() - -type testingT interface { - assert.TestingT - Fatal(args ...interface{}) - Fatalf(string, ...interface{}) -} - -// DockerCmd executes the specified docker command and expect a success -func DockerCmd(t testingT, args ...string) *icmd.Result { - return Docker(Args(args...)).Assert(t, icmd.Success) -} - -// BuildCmd executes the specified docker build command and expect a success -func BuildCmd(t testingT, name string, cmdOperators ...CmdOperator) *icmd.Result { - return Docker(Build(name), cmdOperators...).Assert(t, icmd.Success) -} - -// InspectCmd executes the specified docker inspect command and expect a success -func InspectCmd(t testingT, name string, cmdOperators ...CmdOperator) *icmd.Result { - return Docker(Inspect(name), cmdOperators...).Assert(t, icmd.Success) -} - -// WaitRun will wait for the specified container to be running, maximum 5 seconds. -func WaitRun(t testingT, name string, cmdOperators ...CmdOperator) { - WaitForInspectResult(t, name, "{{.State.Running}}", "true", 5*time.Second, cmdOperators...) -} - -// WaitExited will wait for the specified container to state exit, subject -// to a maximum time limit in seconds supplied by the caller -func WaitExited(t testingT, name string, timeout time.Duration, cmdOperators ...CmdOperator) { - WaitForInspectResult(t, name, "{{.State.Status}}", "exited", timeout, cmdOperators...) -} - -// WaitRestart will wait for the specified container to restart once -func WaitRestart(t testingT, name string, timeout time.Duration, cmdOperators ...CmdOperator) { - WaitForInspectResult(t, name, "{{.RestartCount}}", "1", timeout, cmdOperators...) -} - -// WaitForInspectResult waits for the specified expression to be equals to the specified expected string in the given time. -func WaitForInspectResult(t testingT, name, expr, expected string, timeout time.Duration, cmdOperators ...CmdOperator) { - after := time.After(timeout) - - args := []string{"inspect", "-f", expr, name} - for { - result := Docker(Args(args...), cmdOperators...) - if result.Error != nil { - if !strings.Contains(strings.ToLower(result.Stderr()), "no such") { - t.Fatalf("error executing docker inspect: %v\n%s", - result.Stderr(), result.Stdout()) - } - select { - case <-after: - t.Fatal(result.Error) - default: - time.Sleep(10 * time.Millisecond) - continue - } - } - - out := strings.TrimSpace(result.Stdout()) - if out == expected { - break - } - - select { - case <-after: - t.Fatalf("condition \"%q == %q\" not true in time (%v)", out, expected, timeout) - default: - } - - time.Sleep(100 * time.Millisecond) - } -} - -// Docker executes the specified docker command -func Docker(cmd icmd.Cmd, cmdOperators ...CmdOperator) *icmd.Result { - for _, op := range cmdOperators { - deferFn := op(&cmd) - if deferFn != nil { - defer deferFn() - } - } - appendDocker(&cmd) - if err := validateArgs(cmd.Command...); err != nil { - return &icmd.Result{ - Error: err, - } - } - return icmd.RunCmd(cmd) -} - -// validateArgs is a checker to ensure tests are not running commands which are -// not supported on platforms. Specifically on Windows this is 'busybox top'. -func validateArgs(args ...string) error { - if testEnv.OSType != "windows" { - return nil - } - foundBusybox := -1 - for key, value := range args { - if strings.ToLower(value) == "busybox" { - foundBusybox = key - } - if (foundBusybox != -1) && (key == foundBusybox+1) && (strings.ToLower(value) == "top") { - return errors.New("cannot use 'busybox top' in tests on Windows. Use runSleepingContainer()") - } - } - return nil -} - -// Build executes the specified docker build command -func Build(name string) icmd.Cmd { - return icmd.Command("build", "-t", name) -} - -// Inspect executes the specified docker inspect command -func Inspect(name string) icmd.Cmd { - return icmd.Command("inspect", name) -} - -// Format sets the specified format with --format flag -func Format(format string) func(*icmd.Cmd) func() { - return func(cmd *icmd.Cmd) func() { - cmd.Command = append( - []string{cmd.Command[0]}, - append([]string{"--format", fmt.Sprintf("{{%s}}", format)}, cmd.Command[1:]...)..., - ) - return nil - } -} - -func appendDocker(cmd *icmd.Cmd) { - cmd.Command = append([]string{testEnv.DockerBinary()}, cmd.Command...) -} - -// Args build an icmd.Cmd struct from the specified arguments -func Args(args ...string) icmd.Cmd { - switch len(args) { - case 0: - return icmd.Cmd{} - case 1: - return icmd.Command(args[0]) - default: - return icmd.Command(args[0], args[1:]...) - } -} - -// Daemon points to the specified daemon -func Daemon(d *daemon.Daemon) func(*icmd.Cmd) func() { - return func(cmd *icmd.Cmd) func() { - cmd.Command = append([]string{"--host", d.Sock()}, cmd.Command...) - return nil - } -} - -// WithTimeout sets the timeout for the command to run -func WithTimeout(timeout time.Duration) func(cmd *icmd.Cmd) func() { - return func(cmd *icmd.Cmd) func() { - cmd.Timeout = timeout - return nil - } -} - -// WithEnvironmentVariables sets the specified environment variables for the command to run -func WithEnvironmentVariables(envs ...string) func(cmd *icmd.Cmd) func() { - return func(cmd *icmd.Cmd) func() { - cmd.Env = envs - return nil - } -} - -// WithFlags sets the specified flags for the command to run -func WithFlags(flags ...string) func(*icmd.Cmd) func() { - return func(cmd *icmd.Cmd) func() { - cmd.Command = append(cmd.Command, flags...) - return nil - } -} - -// InDir sets the folder in which the command should be executed -func InDir(path string) func(*icmd.Cmd) func() { - return func(cmd *icmd.Cmd) func() { - cmd.Dir = path - return nil - } -} - -// WithStdout sets the standard output writer of the command -func WithStdout(writer io.Writer) func(*icmd.Cmd) func() { - return func(cmd *icmd.Cmd) func() { - cmd.Stdout = writer - return nil - } -} - -// WithStdin sets the standard input reader for the command -func WithStdin(stdin io.Reader) func(*icmd.Cmd) func() { - return func(cmd *icmd.Cmd) func() { - cmd.Stdin = stdin - return nil - } -} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon/daemon.go b/vendor/github.com/docker/docker/integration-cli/daemon/daemon.go deleted file mode 100644 index fcbbfdfb0..000000000 --- a/vendor/github.com/docker/docker/integration-cli/daemon/daemon.go +++ /dev/null @@ -1,143 +0,0 @@ -package daemon // import "github.com/docker/docker/integration-cli/daemon" - -import ( - "fmt" - "strings" - "time" - - "github.com/docker/docker/integration-cli/checker" - "github.com/docker/docker/internal/test/daemon" - "github.com/go-check/check" - "github.com/gotestyourself/gotestyourself/assert" - "github.com/gotestyourself/gotestyourself/icmd" - "github.com/pkg/errors" -) - -type testingT interface { - assert.TestingT - logT - Fatalf(string, ...interface{}) -} - -type logT interface { - Logf(string, ...interface{}) -} - -// Daemon represents a Docker daemon for the testing framework. -type Daemon struct { - *daemon.Daemon - dockerBinary string -} - -// New returns a Daemon instance to be used for testing. -// This will create a directory such as d123456789 in the folder specified by $DOCKER_INTEGRATION_DAEMON_DEST or $DEST. -// The daemon will not automatically start. -func New(t testingT, dockerBinary string, dockerdBinary string, ops ...func(*daemon.Daemon)) *Daemon { - ops = append(ops, daemon.WithDockerdBinary(dockerdBinary)) - d := daemon.New(t, ops...) - return &Daemon{ - Daemon: d, - dockerBinary: dockerBinary, - } -} - -// Cmd executes a docker CLI command against this daemon. -// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version -func (d *Daemon) Cmd(args ...string) (string, error) { - result := icmd.RunCmd(d.Command(args...)) - return result.Combined(), result.Error -} - -// Command creates a docker CLI command against this daemon, to be executed later. -// Example: d.Command("version") creates a command to run "docker -H unix://path/to/unix.sock version" -func (d *Daemon) Command(args ...string) icmd.Cmd { - return icmd.Command(d.dockerBinary, d.PrependHostArg(args)...) -} - -// PrependHostArg prepend the specified arguments by the daemon host flags -func (d *Daemon) PrependHostArg(args []string) []string { - for _, arg := range args { - if arg == "--host" || arg == "-H" { - return args - } - } - return append([]string{"--host", d.Sock()}, args...) -} - -// GetIDByName returns the ID of an object (container, volume, …) given its name -func (d *Daemon) GetIDByName(name string) (string, error) { - return d.inspectFieldWithError(name, "Id") -} - -// InspectField returns the field filter by 'filter' -func (d *Daemon) InspectField(name, filter string) (string, error) { - return d.inspectFilter(name, filter) -} - -func (d *Daemon) inspectFilter(name, filter string) (string, error) { - format := fmt.Sprintf("{{%s}}", filter) - out, err := d.Cmd("inspect", "-f", format, name) - if err != nil { - return "", errors.Errorf("failed to inspect %s: %s", name, out) - } - return strings.TrimSpace(out), nil -} - -func (d *Daemon) inspectFieldWithError(name, field string) (string, error) { - return d.inspectFilter(name, fmt.Sprintf(".%s", field)) -} - -// CheckActiveContainerCount returns the number of active containers -// FIXME(vdemeester) should re-use ActivateContainers in some way -func (d *Daemon) CheckActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) { - out, err := d.Cmd("ps", "-q") - c.Assert(err, checker.IsNil) - if len(strings.TrimSpace(out)) == 0 { - return 0, nil - } - return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out)) -} - -// WaitRun waits for a container to be running for 10s -func (d *Daemon) WaitRun(contID string) error { - args := []string{"--host", d.Sock()} - return WaitInspectWithArgs(d.dockerBinary, contID, "{{.State.Running}}", "true", 10*time.Second, args...) -} - -// WaitInspectWithArgs waits for the specified expression to be equals to the specified expected string in the given time. -// Deprecated: use cli.WaitCmd instead -func WaitInspectWithArgs(dockerBinary, name, expr, expected string, timeout time.Duration, arg ...string) error { - after := time.After(timeout) - - args := append(arg, "inspect", "-f", expr, name) - for { - result := icmd.RunCommand(dockerBinary, args...) - if result.Error != nil { - if !strings.Contains(strings.ToLower(result.Stderr()), "no such") { - return errors.Errorf("error executing docker inspect: %v\n%s", - result.Stderr(), result.Stdout()) - } - select { - case <-after: - return result.Error - default: - time.Sleep(10 * time.Millisecond) - continue - } - } - - out := strings.TrimSpace(result.Stdout()) - if out == expected { - break - } - - select { - case <-after: - return errors.Errorf("condition \"%q == %q\" not true in time (%v)", out, expected, timeout) - default: - } - - time.Sleep(100 * time.Millisecond) - } - return nil -} diff --git a/vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go b/vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go deleted file mode 100644 index c38f5a69e..000000000 --- a/vendor/github.com/docker/docker/integration-cli/daemon/daemon_swarm.go +++ /dev/null @@ -1,197 +0,0 @@ -package daemon // import "github.com/docker/docker/integration-cli/daemon" - -import ( - "context" - "fmt" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/client" - "github.com/docker/docker/integration-cli/checker" - "github.com/go-check/check" - "github.com/gotestyourself/gotestyourself/assert" -) - -// CheckServiceTasksInState returns the number of tasks with a matching state, -// and optional message substring. -func (d *Daemon) CheckServiceTasksInState(service string, state swarm.TaskState, message string) func(*check.C) (interface{}, check.CommentInterface) { - return func(c *check.C) (interface{}, check.CommentInterface) { - tasks := d.GetServiceTasks(c, service) - var count int - for _, task := range tasks { - if task.Status.State == state { - if message == "" || strings.Contains(task.Status.Message, message) { - count++ - } - } - } - return count, nil - } -} - -// CheckServiceTasksInStateWithError returns the number of tasks with a matching state, -// and optional message substring. -func (d *Daemon) CheckServiceTasksInStateWithError(service string, state swarm.TaskState, errorMessage string) func(*check.C) (interface{}, check.CommentInterface) { - return func(c *check.C) (interface{}, check.CommentInterface) { - tasks := d.GetServiceTasks(c, service) - var count int - for _, task := range tasks { - if task.Status.State == state { - if errorMessage == "" || strings.Contains(task.Status.Err, errorMessage) { - count++ - } - } - } - return count, nil - } -} - -// CheckServiceRunningTasks returns the number of running tasks for the specified service -func (d *Daemon) CheckServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { - return d.CheckServiceTasksInState(service, swarm.TaskStateRunning, "") -} - -// CheckServiceUpdateState returns the current update state for the specified service -func (d *Daemon) CheckServiceUpdateState(service string) func(*check.C) (interface{}, check.CommentInterface) { - return func(c *check.C) (interface{}, check.CommentInterface) { - service := d.GetService(c, service) - if service.UpdateStatus == nil { - return "", nil - } - return service.UpdateStatus.State, nil - } -} - -// CheckPluginRunning returns the runtime state of the plugin -func (d *Daemon) CheckPluginRunning(plugin string) func(c *check.C) (interface{}, check.CommentInterface) { - return func(c *check.C) (interface{}, check.CommentInterface) { - apiclient, err := d.NewClient() - assert.NilError(c, err) - resp, _, err := apiclient.PluginInspectWithRaw(context.Background(), plugin) - if client.IsErrNotFound(err) { - return false, check.Commentf("%v", err) - } - assert.NilError(c, err) - return resp.Enabled, check.Commentf("%+v", resp) - } -} - -// CheckPluginImage returns the runtime state of the plugin -func (d *Daemon) CheckPluginImage(plugin string) func(c *check.C) (interface{}, check.CommentInterface) { - return func(c *check.C) (interface{}, check.CommentInterface) { - apiclient, err := d.NewClient() - assert.NilError(c, err) - resp, _, err := apiclient.PluginInspectWithRaw(context.Background(), plugin) - if client.IsErrNotFound(err) { - return false, check.Commentf("%v", err) - } - assert.NilError(c, err) - return resp.PluginReference, check.Commentf("%+v", resp) - } -} - -// CheckServiceTasks returns the number of tasks for the specified service -func (d *Daemon) CheckServiceTasks(service string) func(*check.C) (interface{}, check.CommentInterface) { - return func(c *check.C) (interface{}, check.CommentInterface) { - tasks := d.GetServiceTasks(c, service) - return len(tasks), nil - } -} - -// CheckRunningTaskNetworks returns the number of times each network is referenced from a task. -func (d *Daemon) CheckRunningTaskNetworks(c *check.C) (interface{}, check.CommentInterface) { - cli, err := d.NewClient() - c.Assert(err, checker.IsNil) - defer cli.Close() - - filterArgs := filters.NewArgs() - filterArgs.Add("desired-state", "running") - - options := types.TaskListOptions{ - Filters: filterArgs, - } - - tasks, err := cli.TaskList(context.Background(), options) - c.Assert(err, checker.IsNil) - - result := make(map[string]int) - for _, task := range tasks { - for _, network := range task.Spec.Networks { - result[network.Target]++ - } - } - return result, nil -} - -// CheckRunningTaskImages returns the times each image is running as a task. -func (d *Daemon) CheckRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) { - cli, err := d.NewClient() - c.Assert(err, checker.IsNil) - defer cli.Close() - - filterArgs := filters.NewArgs() - filterArgs.Add("desired-state", "running") - - options := types.TaskListOptions{ - Filters: filterArgs, - } - - tasks, err := cli.TaskList(context.Background(), options) - c.Assert(err, checker.IsNil) - - result := make(map[string]int) - for _, task := range tasks { - if task.Status.State == swarm.TaskStateRunning && task.Spec.ContainerSpec != nil { - result[task.Spec.ContainerSpec.Image]++ - } - } - return result, nil -} - -// CheckNodeReadyCount returns the number of ready node on the swarm -func (d *Daemon) CheckNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) { - nodes := d.ListNodes(c) - var readyCount int - for _, node := range nodes { - if node.Status.State == swarm.NodeStateReady { - readyCount++ - } - } - return readyCount, nil -} - -// CheckLocalNodeState returns the current swarm node state -func (d *Daemon) CheckLocalNodeState(c *check.C) (interface{}, check.CommentInterface) { - info := d.SwarmInfo(c) - return info.LocalNodeState, nil -} - -// CheckControlAvailable returns the current swarm control available -func (d *Daemon) CheckControlAvailable(c *check.C) (interface{}, check.CommentInterface) { - info := d.SwarmInfo(c) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - return info.ControlAvailable, nil -} - -// CheckLeader returns whether there is a leader on the swarm or not -func (d *Daemon) CheckLeader(c *check.C) (interface{}, check.CommentInterface) { - cli, err := d.NewClient() - c.Assert(err, checker.IsNil) - defer cli.Close() - - errList := check.Commentf("could not get node list") - - ls, err := cli.NodeList(context.Background(), types.NodeListOptions{}) - if err != nil { - return err, errList - } - - for _, node := range ls { - if node.ManagerStatus != nil && node.ManagerStatus.Leader { - return nil, nil - } - } - return fmt.Errorf("no leader"), check.Commentf("could not find leader") -} diff --git a/vendor/github.com/docker/docker/integration-cli/environment/environment.go b/vendor/github.com/docker/docker/integration-cli/environment/environment.go deleted file mode 100644 index 82cf99652..000000000 --- a/vendor/github.com/docker/docker/integration-cli/environment/environment.go +++ /dev/null @@ -1,49 +0,0 @@ -package environment // import "github.com/docker/docker/integration-cli/environment" - -import ( - "os" - "os/exec" - - "github.com/docker/docker/internal/test/environment" -) - -var ( - // DefaultClientBinary is the name of the docker binary - DefaultClientBinary = os.Getenv("TEST_CLIENT_BINARY") -) - -func init() { - if DefaultClientBinary == "" { - DefaultClientBinary = "docker" - } -} - -// Execution contains information about the current test execution and daemon -// under test -type Execution struct { - environment.Execution - dockerBinary string -} - -// DockerBinary returns the docker binary for this testing environment -func (e *Execution) DockerBinary() string { - return e.dockerBinary -} - -// New returns details about the testing environment -func New() (*Execution, error) { - env, err := environment.New() - if err != nil { - return nil, err - } - - dockerBinary, err := exec.LookPath(DefaultClientBinary) - if err != nil { - return nil, err - } - - return &Execution{ - Execution: *env, - dockerBinary: dockerBinary, - }, nil -} diff --git a/vendor/github.com/docker/docker/integration-cli/requirement/requirement.go b/vendor/github.com/docker/docker/integration-cli/requirement/requirement.go deleted file mode 100644 index 45a1bcabf..000000000 --- a/vendor/github.com/docker/docker/integration-cli/requirement/requirement.go +++ /dev/null @@ -1,34 +0,0 @@ -package requirement // import "github.com/docker/docker/integration-cli/requirement" - -import ( - "fmt" - "path" - "reflect" - "runtime" - "strings" -) - -// SkipT is the interface required to skip tests -type SkipT interface { - Skip(reason string) -} - -// Test represent a function that can be used as a requirement validation. -type Test func() bool - -// Is checks if the environment satisfies the requirements -// for the test to run or skips the tests. -func Is(s SkipT, requirements ...Test) { - for _, r := range requirements { - isValid := r() - if !isValid { - requirementFunc := runtime.FuncForPC(reflect.ValueOf(r).Pointer()).Name() - s.Skip(fmt.Sprintf("unmatched requirement %s", extractRequirement(requirementFunc))) - } - } -} - -func extractRequirement(requirementFunc string) string { - requirement := path.Base(requirementFunc) - return strings.SplitN(requirement, ".", 2)[1] -} diff --git a/vendor/github.com/docker/docker/integration/doc.go b/vendor/github.com/docker/docker/integration/doc.go deleted file mode 100644 index ee4bf5043..000000000 --- a/vendor/github.com/docker/docker/integration/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package integration provides integrations tests for Moby (API). -// These tests require a daemon (dockerd for now) to run. -package integration // import "github.com/docker/docker/integration" diff --git a/vendor/github.com/docker/docker/integration/internal/container/container.go b/vendor/github.com/docker/docker/integration/internal/container/container.go deleted file mode 100644 index 0c7657176..000000000 --- a/vendor/github.com/docker/docker/integration/internal/container/container.go +++ /dev/null @@ -1,54 +0,0 @@ -package container - -import ( - "context" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/client" - "github.com/gotestyourself/gotestyourself/assert" -) - -// TestContainerConfig holds container configuration struct that -// are used in api calls. -type TestContainerConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig -} - -// Create creates a container with the specified options -func Create(t *testing.T, ctx context.Context, client client.APIClient, ops ...func(*TestContainerConfig)) string { // nolint: golint - t.Helper() - config := &TestContainerConfig{ - Config: &container.Config{ - Image: "busybox", - Cmd: []string{"top"}, - }, - HostConfig: &container.HostConfig{}, - NetworkingConfig: &network.NetworkingConfig{}, - } - - for _, op := range ops { - op(config) - } - - c, err := client.ContainerCreate(ctx, config.Config, config.HostConfig, config.NetworkingConfig, config.Name) - assert.NilError(t, err) - - return c.ID -} - -// Run creates and start a container with the specified options -func Run(t *testing.T, ctx context.Context, client client.APIClient, ops ...func(*TestContainerConfig)) string { // nolint: golint - t.Helper() - id := Create(t, ctx, client, ops...) - - err := client.ContainerStart(ctx, id, types.ContainerStartOptions{}) - assert.NilError(t, err) - - return id -} diff --git a/vendor/github.com/docker/docker/integration/internal/container/exec.go b/vendor/github.com/docker/docker/integration/internal/container/exec.go deleted file mode 100644 index 55ad23aeb..000000000 --- a/vendor/github.com/docker/docker/integration/internal/container/exec.go +++ /dev/null @@ -1,86 +0,0 @@ -package container - -import ( - "bytes" - "context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/stdcopy" -) - -// ExecResult represents a result returned from Exec() -type ExecResult struct { - ExitCode int - outBuffer *bytes.Buffer - errBuffer *bytes.Buffer -} - -// Stdout returns stdout output of a command run by Exec() -func (res *ExecResult) Stdout() string { - return res.outBuffer.String() -} - -// Stderr returns stderr output of a command run by Exec() -func (res *ExecResult) Stderr() string { - return res.errBuffer.String() -} - -// Combined returns combined stdout and stderr output of a command run by Exec() -func (res *ExecResult) Combined() string { - return res.outBuffer.String() + res.errBuffer.String() -} - -// Exec executes a command inside a container, returning the result -// containing stdout, stderr, and exit code. Note: -// - this is a synchronous operation; -// - cmd stdin is closed. -func Exec(ctx context.Context, cli client.APIClient, id string, cmd []string) (ExecResult, error) { - // prepare exec - execConfig := types.ExecConfig{ - AttachStdout: true, - AttachStderr: true, - Cmd: cmd, - } - cresp, err := cli.ContainerExecCreate(ctx, id, execConfig) - if err != nil { - return ExecResult{}, err - } - execID := cresp.ID - - // run it, with stdout/stderr attached - aresp, err := cli.ContainerExecAttach(ctx, execID, types.ExecStartCheck{}) - if err != nil { - return ExecResult{}, err - } - defer aresp.Close() - - // read the output - var outBuf, errBuf bytes.Buffer - outputDone := make(chan error) - - go func() { - // StdCopy demultiplexes the stream into two buffers - _, err = stdcopy.StdCopy(&outBuf, &errBuf, aresp.Reader) - outputDone <- err - }() - - select { - case err := <-outputDone: - if err != nil { - return ExecResult{}, err - } - break - - case <-ctx.Done(): - return ExecResult{}, ctx.Err() - } - - // get the exit code - iresp, err := cli.ContainerExecInspect(ctx, execID) - if err != nil { - return ExecResult{}, err - } - - return ExecResult{ExitCode: iresp.ExitCode, outBuffer: &outBuf, errBuffer: &errBuf}, nil -} diff --git a/vendor/github.com/docker/docker/integration/internal/container/ops.go b/vendor/github.com/docker/docker/integration/internal/container/ops.go deleted file mode 100644 index df5598b62..000000000 --- a/vendor/github.com/docker/docker/integration/internal/container/ops.go +++ /dev/null @@ -1,136 +0,0 @@ -package container - -import ( - "fmt" - - containertypes "github.com/docker/docker/api/types/container" - networktypes "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// WithName sets the name of the container -func WithName(name string) func(*TestContainerConfig) { - return func(c *TestContainerConfig) { - c.Name = name - } -} - -// WithLinks sets the links of the container -func WithLinks(links ...string) func(*TestContainerConfig) { - return func(c *TestContainerConfig) { - c.HostConfig.Links = links - } -} - -// WithImage sets the image of the container -func WithImage(image string) func(*TestContainerConfig) { - return func(c *TestContainerConfig) { - c.Config.Image = image - } -} - -// WithCmd sets the comannds of the container -func WithCmd(cmds ...string) func(*TestContainerConfig) { - return func(c *TestContainerConfig) { - c.Config.Cmd = strslice.StrSlice(cmds) - } -} - -// WithNetworkMode sets the network mode of the container -func WithNetworkMode(mode string) func(*TestContainerConfig) { - return func(c *TestContainerConfig) { - c.HostConfig.NetworkMode = containertypes.NetworkMode(mode) - } -} - -// WithExposedPorts sets the exposed ports of the container -func WithExposedPorts(ports ...string) func(*TestContainerConfig) { - return func(c *TestContainerConfig) { - c.Config.ExposedPorts = map[nat.Port]struct{}{} - for _, port := range ports { - c.Config.ExposedPorts[nat.Port(port)] = struct{}{} - } - } -} - -// WithTty sets the TTY mode of the container -func WithTty(tty bool) func(*TestContainerConfig) { - return func(c *TestContainerConfig) { - c.Config.Tty = tty - } -} - -// WithWorkingDir sets the working dir of the container -func WithWorkingDir(dir string) func(*TestContainerConfig) { - return func(c *TestContainerConfig) { - c.Config.WorkingDir = dir - } -} - -// WithVolume sets the volume of the container -func WithVolume(name string) func(*TestContainerConfig) { - return func(c *TestContainerConfig) { - if c.Config.Volumes == nil { - c.Config.Volumes = map[string]struct{}{} - } - c.Config.Volumes[name] = struct{}{} - } -} - -// WithBind sets the bind mount of the container -func WithBind(src, target string) func(*TestContainerConfig) { - return func(c *TestContainerConfig) { - c.HostConfig.Binds = append(c.HostConfig.Binds, fmt.Sprintf("%s:%s", src, target)) - } -} - -// WithIPv4 sets the specified ip for the specified network of the container -func WithIPv4(network, ip string) func(*TestContainerConfig) { - return func(c *TestContainerConfig) { - if c.NetworkingConfig.EndpointsConfig == nil { - c.NetworkingConfig.EndpointsConfig = map[string]*networktypes.EndpointSettings{} - } - if v, ok := c.NetworkingConfig.EndpointsConfig[network]; !ok || v == nil { - c.NetworkingConfig.EndpointsConfig[network] = &networktypes.EndpointSettings{} - } - if c.NetworkingConfig.EndpointsConfig[network].IPAMConfig == nil { - c.NetworkingConfig.EndpointsConfig[network].IPAMConfig = &networktypes.EndpointIPAMConfig{} - } - c.NetworkingConfig.EndpointsConfig[network].IPAMConfig.IPv4Address = ip - } -} - -// WithIPv6 sets the specified ip6 for the specified network of the container -func WithIPv6(network, ip string) func(*TestContainerConfig) { - return func(c *TestContainerConfig) { - if c.NetworkingConfig.EndpointsConfig == nil { - c.NetworkingConfig.EndpointsConfig = map[string]*networktypes.EndpointSettings{} - } - if v, ok := c.NetworkingConfig.EndpointsConfig[network]; !ok || v == nil { - c.NetworkingConfig.EndpointsConfig[network] = &networktypes.EndpointSettings{} - } - if c.NetworkingConfig.EndpointsConfig[network].IPAMConfig == nil { - c.NetworkingConfig.EndpointsConfig[network].IPAMConfig = &networktypes.EndpointIPAMConfig{} - } - c.NetworkingConfig.EndpointsConfig[network].IPAMConfig.IPv6Address = ip - } -} - -// WithLogDriver sets the log driver to use for the container -func WithLogDriver(driver string) func(*TestContainerConfig) { - return func(c *TestContainerConfig) { - if c.HostConfig == nil { - c.HostConfig = &containertypes.HostConfig{} - } - c.HostConfig.LogConfig.Type = driver - } -} - -// WithAutoRemove sets the container to be removed on exit -func WithAutoRemove(c *TestContainerConfig) { - if c.HostConfig == nil { - c.HostConfig = &containertypes.HostConfig{} - } - c.HostConfig.AutoRemove = true -} diff --git a/vendor/github.com/docker/docker/integration/internal/container/states.go b/vendor/github.com/docker/docker/integration/internal/container/states.go deleted file mode 100644 index 1ee73e01a..000000000 --- a/vendor/github.com/docker/docker/integration/internal/container/states.go +++ /dev/null @@ -1,41 +0,0 @@ -package container - -import ( - "context" - "strings" - - "github.com/docker/docker/client" - "github.com/gotestyourself/gotestyourself/poll" -) - -// IsStopped verifies the container is in stopped state. -func IsStopped(ctx context.Context, client client.APIClient, containerID string) func(log poll.LogT) poll.Result { - return func(log poll.LogT) poll.Result { - inspect, err := client.ContainerInspect(ctx, containerID) - - switch { - case err != nil: - return poll.Error(err) - case !inspect.State.Running: - return poll.Success() - default: - return poll.Continue("waiting for container to be stopped") - } - } -} - -// IsInState verifies the container is in one of the specified state, e.g., "running", "exited", etc. -func IsInState(ctx context.Context, client client.APIClient, containerID string, state ...string) func(log poll.LogT) poll.Result { - return func(log poll.LogT) poll.Result { - inspect, err := client.ContainerInspect(ctx, containerID) - if err != nil { - return poll.Error(err) - } - for _, v := range state { - if inspect.State.Status == v { - return poll.Success() - } - } - return poll.Continue("waiting for container to be one of (%s), currently %s", strings.Join(state, ", "), inspect.State.Status) - } -} diff --git a/vendor/github.com/docker/docker/integration/internal/network/network.go b/vendor/github.com/docker/docker/integration/internal/network/network.go deleted file mode 100644 index b9550362f..000000000 --- a/vendor/github.com/docker/docker/integration/internal/network/network.go +++ /dev/null @@ -1,35 +0,0 @@ -package network - -import ( - "context" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" - "github.com/gotestyourself/gotestyourself/assert" -) - -func createNetwork(ctx context.Context, client client.APIClient, name string, ops ...func(*types.NetworkCreate)) (string, error) { - config := types.NetworkCreate{} - - for _, op := range ops { - op(&config) - } - - n, err := client.NetworkCreate(ctx, name, config) - return n.ID, err -} - -// Create creates a network with the specified options -func Create(ctx context.Context, client client.APIClient, name string, ops ...func(*types.NetworkCreate)) (string, error) { - return createNetwork(ctx, client, name, ops...) -} - -// CreateNoError creates a network with the specified options and verifies there were no errors -func CreateNoError(t *testing.T, ctx context.Context, client client.APIClient, name string, ops ...func(*types.NetworkCreate)) string { // nolint: golint - t.Helper() - - name, err := createNetwork(ctx, client, name, ops...) - assert.NilError(t, err) - return name -} diff --git a/vendor/github.com/docker/docker/integration/internal/network/ops.go b/vendor/github.com/docker/docker/integration/internal/network/ops.go deleted file mode 100644 index f7639ff30..000000000 --- a/vendor/github.com/docker/docker/integration/internal/network/ops.go +++ /dev/null @@ -1,57 +0,0 @@ -package network - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" -) - -// WithDriver sets the driver of the network -func WithDriver(driver string) func(*types.NetworkCreate) { - return func(n *types.NetworkCreate) { - n.Driver = driver - } -} - -// WithIPv6 Enables IPv6 on the network -func WithIPv6() func(*types.NetworkCreate) { - return func(n *types.NetworkCreate) { - n.EnableIPv6 = true - } -} - -// WithMacvlan sets the network as macvlan with the specified parent -func WithMacvlan(parent string) func(*types.NetworkCreate) { - return func(n *types.NetworkCreate) { - n.Driver = "macvlan" - if parent != "" { - n.Options = map[string]string{ - "parent": parent, - } - } - } -} - -// WithOption adds the specified key/value pair to network's options -func WithOption(key, value string) func(*types.NetworkCreate) { - return func(n *types.NetworkCreate) { - if n.Options == nil { - n.Options = map[string]string{} - } - n.Options[key] = value - } -} - -// WithIPAM adds an IPAM with the specified Subnet and Gateway to the network -func WithIPAM(subnet, gateway string) func(*types.NetworkCreate) { - return func(n *types.NetworkCreate) { - if n.IPAM == nil { - n.IPAM = &network.IPAM{} - } - - n.IPAM.Config = append(n.IPAM.Config, network.IPAMConfig{ - Subnet: subnet, - Gateway: gateway, - AuxAddress: map[string]string{}, - }) - } -} diff --git a/vendor/github.com/docker/docker/integration/internal/requirement/requirement.go b/vendor/github.com/docker/docker/integration/internal/requirement/requirement.go deleted file mode 100644 index cd498ab87..000000000 --- a/vendor/github.com/docker/docker/integration/internal/requirement/requirement.go +++ /dev/null @@ -1,53 +0,0 @@ -package requirement // import "github.com/docker/docker/integration/internal/requirement" - -import ( - "net/http" - "strings" - "testing" - "time" - - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/gotestyourself/gotestyourself/icmd" -) - -// HasHubConnectivity checks to see if https://hub.docker.com is -// accessible from the present environment -func HasHubConnectivity(t *testing.T) bool { - t.Helper() - // Set a timeout on the GET at 15s - var timeout = 15 * time.Second - var url = "https://hub.docker.com" - - client := http.Client{Timeout: timeout} - resp, err := client.Get(url) - if err != nil && strings.Contains(err.Error(), "use of closed network connection") { - t.Fatalf("Timeout for GET request on %s", url) - } - if resp != nil { - resp.Body.Close() - } - return err == nil -} - -func overlayFSSupported() bool { - result := icmd.RunCommand("/bin/sh", "-c", "cat /proc/filesystems") - if result.Error != nil { - return false - } - return strings.Contains(result.Combined(), "overlay\n") -} - -// Overlay2Supported returns true if the current system supports overlay2 as graphdriver -func Overlay2Supported(kernelVersion string) bool { - if !overlayFSSupported() { - return false - } - - daemonV, err := kernel.ParseRelease(kernelVersion) - if err != nil { - return false - } - requiredV := kernel.VersionInfo{Kernel: 4} - return kernel.CompareKernelVersion(*daemonV, requiredV) > -1 - -} diff --git a/vendor/github.com/docker/docker/integration/internal/swarm/service.go b/vendor/github.com/docker/docker/integration/internal/swarm/service.go deleted file mode 100644 index 5567ad6ed..000000000 --- a/vendor/github.com/docker/docker/integration/internal/swarm/service.go +++ /dev/null @@ -1,200 +0,0 @@ -package swarm - -import ( - "context" - "runtime" - "testing" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - swarmtypes "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/internal/test/daemon" - "github.com/docker/docker/internal/test/environment" - "github.com/gotestyourself/gotestyourself/assert" - "github.com/gotestyourself/gotestyourself/poll" - "github.com/gotestyourself/gotestyourself/skip" -) - -// ServicePoll tweaks the pollSettings for `service` -func ServicePoll(config *poll.Settings) { - // Override the default pollSettings for `service` resource here ... - config.Timeout = 30 * time.Second - config.Delay = 100 * time.Millisecond - if runtime.GOARCH == "arm64" || runtime.GOARCH == "arm" { - config.Timeout = 90 * time.Second - } -} - -// NetworkPoll tweaks the pollSettings for `network` -func NetworkPoll(config *poll.Settings) { - // Override the default pollSettings for `network` resource here ... - config.Timeout = 30 * time.Second - config.Delay = 100 * time.Millisecond - - if runtime.GOARCH == "arm64" || runtime.GOARCH == "arm" { - config.Timeout = 50 * time.Second - } -} - -// ContainerPoll tweaks the pollSettings for `container` -func ContainerPoll(config *poll.Settings) { - // Override the default pollSettings for `container` resource here ... - - if runtime.GOARCH == "arm64" || runtime.GOARCH == "arm" { - config.Timeout = 30 * time.Second - config.Delay = 100 * time.Millisecond - } -} - -// NewSwarm creates a swarm daemon for testing -func NewSwarm(t *testing.T, testEnv *environment.Execution, ops ...func(*daemon.Daemon)) *daemon.Daemon { - t.Helper() - skip.If(t, testEnv.IsRemoteDaemon) - if testEnv.DaemonInfo.ExperimentalBuild { - ops = append(ops, daemon.WithExperimental) - } - d := daemon.New(t, ops...) - d.StartAndSwarmInit(t) - return d -} - -// ServiceSpecOpt is used with `CreateService` to pass in service spec modifiers -type ServiceSpecOpt func(*swarmtypes.ServiceSpec) - -// CreateService creates a service on the passed in swarm daemon. -func CreateService(t *testing.T, d *daemon.Daemon, opts ...ServiceSpecOpt) string { - t.Helper() - spec := defaultServiceSpec() - for _, o := range opts { - o(&spec) - } - - client := d.NewClientT(t) - defer client.Close() - - resp, err := client.ServiceCreate(context.Background(), spec, types.ServiceCreateOptions{}) - assert.NilError(t, err, "error creating service") - return resp.ID -} - -func defaultServiceSpec() swarmtypes.ServiceSpec { - var spec swarmtypes.ServiceSpec - ServiceWithImage("busybox:latest")(&spec) - ServiceWithCommand([]string{"/bin/top"})(&spec) - ServiceWithReplicas(1)(&spec) - return spec -} - -// ServiceWithInit sets whether the service should use init or not -func ServiceWithInit(b *bool) func(*swarmtypes.ServiceSpec) { - return func(spec *swarmtypes.ServiceSpec) { - ensureContainerSpec(spec) - spec.TaskTemplate.ContainerSpec.Init = b - } -} - -// ServiceWithImage sets the image to use for the service -func ServiceWithImage(image string) func(*swarmtypes.ServiceSpec) { - return func(spec *swarmtypes.ServiceSpec) { - ensureContainerSpec(spec) - spec.TaskTemplate.ContainerSpec.Image = image - } -} - -// ServiceWithCommand sets the command to use for the service -func ServiceWithCommand(cmd []string) ServiceSpecOpt { - return func(spec *swarmtypes.ServiceSpec) { - ensureContainerSpec(spec) - spec.TaskTemplate.ContainerSpec.Command = cmd - } -} - -// ServiceWithConfig adds the config reference to the service -func ServiceWithConfig(configRef *swarmtypes.ConfigReference) ServiceSpecOpt { - return func(spec *swarmtypes.ServiceSpec) { - ensureContainerSpec(spec) - spec.TaskTemplate.ContainerSpec.Configs = append(spec.TaskTemplate.ContainerSpec.Configs, configRef) - } -} - -// ServiceWithSecret adds the secret reference to the service -func ServiceWithSecret(secretRef *swarmtypes.SecretReference) ServiceSpecOpt { - return func(spec *swarmtypes.ServiceSpec) { - ensureContainerSpec(spec) - spec.TaskTemplate.ContainerSpec.Secrets = append(spec.TaskTemplate.ContainerSpec.Secrets, secretRef) - } -} - -// ServiceWithReplicas sets the replicas for the service -func ServiceWithReplicas(n uint64) ServiceSpecOpt { - return func(spec *swarmtypes.ServiceSpec) { - spec.Mode = swarmtypes.ServiceMode{ - Replicated: &swarmtypes.ReplicatedService{ - Replicas: &n, - }, - } - } -} - -// ServiceWithName sets the name of the service -func ServiceWithName(name string) ServiceSpecOpt { - return func(spec *swarmtypes.ServiceSpec) { - spec.Annotations.Name = name - } -} - -// ServiceWithNetwork sets the network of the service -func ServiceWithNetwork(network string) ServiceSpecOpt { - return func(spec *swarmtypes.ServiceSpec) { - spec.TaskTemplate.Networks = append(spec.TaskTemplate.Networks, - swarmtypes.NetworkAttachmentConfig{Target: network}) - } -} - -// ServiceWithEndpoint sets the Endpoint of the service -func ServiceWithEndpoint(endpoint *swarmtypes.EndpointSpec) ServiceSpecOpt { - return func(spec *swarmtypes.ServiceSpec) { - spec.EndpointSpec = endpoint - } -} - -// GetRunningTasks gets the list of running tasks for a service -func GetRunningTasks(t *testing.T, d *daemon.Daemon, serviceID string) []swarmtypes.Task { - t.Helper() - client := d.NewClientT(t) - defer client.Close() - - filterArgs := filters.NewArgs() - filterArgs.Add("desired-state", "running") - filterArgs.Add("service", serviceID) - - options := types.TaskListOptions{ - Filters: filterArgs, - } - tasks, err := client.TaskList(context.Background(), options) - assert.NilError(t, err) - return tasks -} - -// ExecTask runs the passed in exec config on the given task -func ExecTask(t *testing.T, d *daemon.Daemon, task swarmtypes.Task, config types.ExecConfig) types.HijackedResponse { - t.Helper() - client := d.NewClientT(t) - defer client.Close() - - ctx := context.Background() - resp, err := client.ContainerExecCreate(ctx, task.Status.ContainerStatus.ContainerID, config) - assert.NilError(t, err, "error creating exec") - - startCheck := types.ExecStartCheck{} - attach, err := client.ContainerExecAttach(ctx, resp.ID, startCheck) - assert.NilError(t, err, "error attaching to exec") - return attach -} - -func ensureContainerSpec(spec *swarmtypes.ServiceSpec) { - if spec.TaskTemplate.ContainerSpec == nil { - spec.TaskTemplate.ContainerSpec = &swarmtypes.ContainerSpec{} - } -} diff --git a/vendor/github.com/docker/docker/integration/network/helpers.go b/vendor/github.com/docker/docker/integration/network/helpers.go deleted file mode 100644 index df609dd41..000000000 --- a/vendor/github.com/docker/docker/integration/network/helpers.go +++ /dev/null @@ -1,85 +0,0 @@ -package network - -import ( - "context" - "fmt" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/gotestyourself/gotestyourself/assert/cmp" - "github.com/gotestyourself/gotestyourself/icmd" -) - -// CreateMasterDummy creates a dummy network interface -func CreateMasterDummy(t *testing.T, master string) { - // ip link add type dummy - icmd.RunCommand("ip", "link", "add", master, "type", "dummy").Assert(t, icmd.Success) - icmd.RunCommand("ip", "link", "set", master, "up").Assert(t, icmd.Success) -} - -// CreateVlanInterface creates a vlan network interface -func CreateVlanInterface(t *testing.T, master, slave, id string) { - // ip link add link name . type vlan id - icmd.RunCommand("ip", "link", "add", "link", master, "name", slave, "type", "vlan", "id", id).Assert(t, icmd.Success) - // ip link set up - icmd.RunCommand("ip", "link", "set", slave, "up").Assert(t, icmd.Success) -} - -// DeleteInterface deletes a network interface -func DeleteInterface(t *testing.T, ifName string) { - icmd.RunCommand("ip", "link", "delete", ifName).Assert(t, icmd.Success) - icmd.RunCommand("iptables", "-t", "nat", "--flush").Assert(t, icmd.Success) - icmd.RunCommand("iptables", "--flush").Assert(t, icmd.Success) -} - -// LinkExists verifies that a link exists -func LinkExists(t *testing.T, master string) { - // verify the specified link exists, ip link show - icmd.RunCommand("ip", "link", "show", master).Assert(t, icmd.Success) -} - -// IsNetworkAvailable provides a comparison to check if a docker network is available -func IsNetworkAvailable(c client.NetworkAPIClient, name string) cmp.Comparison { - return func() cmp.Result { - networks, err := c.NetworkList(context.Background(), types.NetworkListOptions{}) - if err != nil { - return cmp.ResultFromError(err) - } - for _, network := range networks { - if network.Name == name { - return cmp.ResultSuccess - } - } - return cmp.ResultFailure(fmt.Sprintf("could not find network %s", name)) - } -} - -// IsNetworkNotAvailable provides a comparison to check if a docker network is not available -func IsNetworkNotAvailable(c client.NetworkAPIClient, name string) cmp.Comparison { - return func() cmp.Result { - networks, err := c.NetworkList(context.Background(), types.NetworkListOptions{}) - if err != nil { - return cmp.ResultFromError(err) - } - for _, network := range networks { - if network.Name == name { - return cmp.ResultFailure(fmt.Sprintf("network %s is still present", name)) - } - } - return cmp.ResultSuccess - } -} - -// CheckKernelMajorVersionGreaterOrEqualThen returns whether the kernel version is greater or equal than the one provided -func CheckKernelMajorVersionGreaterOrEqualThen(kernelVersion int, majorVersion int) bool { - kv, err := kernel.GetKernelVersion() - if err != nil { - return false - } - if kv.Kernel < kernelVersion || (kv.Kernel == kernelVersion && kv.Major < majorVersion) { - return false - } - return true -} diff --git a/vendor/github.com/docker/docker/integration/plugin/logging/cmd/close_on_start/main.go b/vendor/github.com/docker/docker/integration/plugin/logging/cmd/close_on_start/main.go deleted file mode 100644 index 6891d6a99..000000000 --- a/vendor/github.com/docker/docker/integration/plugin/logging/cmd/close_on_start/main.go +++ /dev/null @@ -1,48 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "net" - "net/http" - "os" -) - -type start struct { - File string -} - -func main() { - l, err := net.Listen("unix", "/run/docker/plugins/plugin.sock") - if err != nil { - panic(err) - } - - mux := http.NewServeMux() - mux.HandleFunc("/LogDriver.StartLogging", func(w http.ResponseWriter, req *http.Request) { - startReq := &start{} - if err := json.NewDecoder(req.Body).Decode(startReq); err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - f, err := os.OpenFile(startReq.File, os.O_RDONLY, 0600) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - // Close the file immediately, this allows us to test what happens in the daemon when the plugin has closed the - // file or, for example, the plugin has crashed. - f.Close() - - w.WriteHeader(http.StatusOK) - fmt.Fprintln(w, `{}`) - }) - server := http.Server{ - Addr: l.Addr().String(), - Handler: mux, - } - - server.Serve(l) -} diff --git a/vendor/github.com/docker/docker/integration/plugin/logging/cmd/dummy/main.go b/vendor/github.com/docker/docker/integration/plugin/logging/cmd/dummy/main.go deleted file mode 100644 index f91b4f3b0..000000000 --- a/vendor/github.com/docker/docker/integration/plugin/logging/cmd/dummy/main.go +++ /dev/null @@ -1,19 +0,0 @@ -package main - -import ( - "net" - "net/http" -) - -func main() { - l, err := net.Listen("unix", "/run/docker/plugins/plugin.sock") - if err != nil { - panic(err) - } - - server := http.Server{ - Addr: l.Addr().String(), - Handler: http.NewServeMux(), - } - server.Serve(l) -} diff --git a/vendor/github.com/docker/docker/integration/plugin/volumes/cmd/dummy/main.go b/vendor/github.com/docker/docker/integration/plugin/volumes/cmd/dummy/main.go deleted file mode 100644 index f91b4f3b0..000000000 --- a/vendor/github.com/docker/docker/integration/plugin/volumes/cmd/dummy/main.go +++ /dev/null @@ -1,19 +0,0 @@ -package main - -import ( - "net" - "net/http" -) - -func main() { - l, err := net.Listen("unix", "/run/docker/plugins/plugin.sock") - if err != nil { - panic(err) - } - - server := http.Server{ - Addr: l.Addr().String(), - Handler: http.NewServeMux(), - } - server.Serve(l) -} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/config.go b/vendor/github.com/docker/docker/internal/test/daemon/config.go deleted file mode 100644 index c57010db9..000000000 --- a/vendor/github.com/docker/docker/internal/test/daemon/config.go +++ /dev/null @@ -1,82 +0,0 @@ -package daemon - -import ( - "context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/internal/test" - "github.com/gotestyourself/gotestyourself/assert" -) - -// ConfigConstructor defines a swarm config constructor -type ConfigConstructor func(*swarm.Config) - -// CreateConfig creates a config given the specified spec -func (d *Daemon) CreateConfig(t assert.TestingT, configSpec swarm.ConfigSpec) string { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - scr, err := cli.ConfigCreate(context.Background(), configSpec) - assert.NilError(t, err) - return scr.ID -} - -// ListConfigs returns the list of the current swarm configs -func (d *Daemon) ListConfigs(t assert.TestingT) []swarm.Config { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - configs, err := cli.ConfigList(context.Background(), types.ConfigListOptions{}) - assert.NilError(t, err) - return configs -} - -// GetConfig returns a swarm config identified by the specified id -func (d *Daemon) GetConfig(t assert.TestingT, id string) *swarm.Config { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - config, _, err := cli.ConfigInspectWithRaw(context.Background(), id) - assert.NilError(t, err) - return &config -} - -// DeleteConfig removes the swarm config identified by the specified id -func (d *Daemon) DeleteConfig(t assert.TestingT, id string) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - err := cli.ConfigRemove(context.Background(), id) - assert.NilError(t, err) -} - -// UpdateConfig updates the swarm config identified by the specified id -// Currently, only label update is supported. -func (d *Daemon) UpdateConfig(t assert.TestingT, id string, f ...ConfigConstructor) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - config := d.GetConfig(t, id) - for _, fn := range f { - fn(config) - } - - err := cli.ConfigUpdate(context.Background(), config.ID, config.Version, config.Spec) - assert.NilError(t, err) -} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/container.go b/vendor/github.com/docker/docker/internal/test/daemon/container.go deleted file mode 100644 index 6a0ced944..000000000 --- a/vendor/github.com/docker/docker/internal/test/daemon/container.go +++ /dev/null @@ -1,40 +0,0 @@ -package daemon - -import ( - "context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/internal/test" - "github.com/gotestyourself/gotestyourself/assert" -) - -// ActiveContainers returns the list of ids of the currently running containers -func (d *Daemon) ActiveContainers(t assert.TestingT) []string { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) - assert.NilError(t, err) - - ids := make([]string, len(containers)) - for i, c := range containers { - ids[i] = c.ID - } - return ids -} - -// FindContainerIP returns the ip of the specified container -func (d *Daemon) FindContainerIP(t assert.TestingT, id string) string { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - i, err := cli.ContainerInspect(context.Background(), id) - assert.NilError(t, err) - return i.NetworkSettings.IPAddress -} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/daemon.go b/vendor/github.com/docker/docker/internal/test/daemon/daemon.go deleted file mode 100644 index a0d7ed485..000000000 --- a/vendor/github.com/docker/docker/internal/test/daemon/daemon.go +++ /dev/null @@ -1,681 +0,0 @@ -package daemon // import "github.com/docker/docker/internal/test/daemon" - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/client" - "github.com/docker/docker/internal/test" - "github.com/docker/docker/internal/test/request" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/gotestyourself/gotestyourself/assert" - "github.com/pkg/errors" -) - -type testingT interface { - assert.TestingT - logT - Fatalf(string, ...interface{}) -} - -type logT interface { - Logf(string, ...interface{}) -} - -const defaultDockerdBinary = "dockerd" - -var errDaemonNotStarted = errors.New("daemon not started") - -// SockRoot holds the path of the default docker integration daemon socket -var SockRoot = filepath.Join(os.TempDir(), "docker-integration") - -type clientConfig struct { - transport *http.Transport - scheme string - addr string -} - -// Daemon represents a Docker daemon for the testing framework -type Daemon struct { - GlobalFlags []string - Root string - Folder string - Wait chan error - UseDefaultHost bool - UseDefaultTLSHost bool - - id string - logFile *os.File - cmd *exec.Cmd - storageDriver string - userlandProxy bool - execRoot string - experimental bool - init bool - dockerdBinary string - log logT - - // swarm related field - swarmListenAddr string - SwarmPort int // FIXME(vdemeester) should probably not be exported - - // cached information - CachedInfo types.Info -} - -// New returns a Daemon instance to be used for testing. -// This will create a directory such as d123456789 in the folder specified by $DOCKER_INTEGRATION_DAEMON_DEST or $DEST. -// The daemon will not automatically start. -func New(t testingT, ops ...func(*Daemon)) *Daemon { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - dest := os.Getenv("DOCKER_INTEGRATION_DAEMON_DEST") - if dest == "" { - dest = os.Getenv("DEST") - } - assert.Check(t, dest != "", "Please set the DOCKER_INTEGRATION_DAEMON_DEST or the DEST environment variable") - - storageDriver := os.Getenv("DOCKER_GRAPHDRIVER") - - assert.NilError(t, os.MkdirAll(SockRoot, 0700), "could not create daemon socket root") - - id := fmt.Sprintf("d%s", stringid.TruncateID(stringid.GenerateRandomID())) - dir := filepath.Join(dest, id) - daemonFolder, err := filepath.Abs(dir) - assert.NilError(t, err, "Could not make %q an absolute path", dir) - daemonRoot := filepath.Join(daemonFolder, "root") - - assert.NilError(t, os.MkdirAll(daemonRoot, 0755), "Could not create daemon root %q", dir) - - userlandProxy := true - if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { - if val, err := strconv.ParseBool(env); err != nil { - userlandProxy = val - } - } - d := &Daemon{ - id: id, - Folder: daemonFolder, - Root: daemonRoot, - storageDriver: storageDriver, - userlandProxy: userlandProxy, - execRoot: filepath.Join(os.TempDir(), "docker-execroot", id), - dockerdBinary: defaultDockerdBinary, - swarmListenAddr: defaultSwarmListenAddr, - SwarmPort: DefaultSwarmPort, - log: t, - } - - for _, op := range ops { - op(d) - } - - return d -} - -// RootDir returns the root directory of the daemon. -func (d *Daemon) RootDir() string { - return d.Root -} - -// ID returns the generated id of the daemon -func (d *Daemon) ID() string { - return d.id -} - -// StorageDriver returns the configured storage driver of the daemon -func (d *Daemon) StorageDriver() string { - return d.storageDriver -} - -// Sock returns the socket path of the daemon -func (d *Daemon) Sock() string { - return fmt.Sprintf("unix://" + d.sockPath()) -} - -func (d *Daemon) sockPath() string { - return filepath.Join(SockRoot, d.id+".sock") -} - -// LogFileName returns the path the daemon's log file -func (d *Daemon) LogFileName() string { - return d.logFile.Name() -} - -// ReadLogFile returns the content of the daemon log file -func (d *Daemon) ReadLogFile() ([]byte, error) { - return ioutil.ReadFile(d.logFile.Name()) -} - -// NewClient creates new client based on daemon's socket path -// FIXME(vdemeester): replace NewClient with NewClientT -func (d *Daemon) NewClient() (*client.Client, error) { - return client.NewClientWithOpts( - client.FromEnv, - client.WithHost(d.Sock())) -} - -// NewClientT creates new client based on daemon's socket path -// FIXME(vdemeester): replace NewClient with NewClientT -func (d *Daemon) NewClientT(t assert.TestingT) *client.Client { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - c, err := client.NewClientWithOpts( - client.FromEnv, - client.WithHost(d.Sock())) - assert.NilError(t, err, "cannot create daemon client") - return c -} - -// Cleanup cleans the daemon files : exec root (network namespaces, ...), swarmkit files -func (d *Daemon) Cleanup(t testingT) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - // Cleanup swarmkit wal files if present - cleanupRaftDir(t, d.Root) - cleanupNetworkNamespace(t, d.execRoot) -} - -// Start starts the daemon and return once it is ready to receive requests. -func (d *Daemon) Start(t testingT, args ...string) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - if err := d.StartWithError(args...); err != nil { - t.Fatalf("Error starting daemon with arguments: %v", args) - } -} - -// StartWithError starts the daemon and return once it is ready to receive requests. -// It returns an error in case it couldn't start. -func (d *Daemon) StartWithError(args ...string) error { - logFile, err := os.OpenFile(filepath.Join(d.Folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) - if err != nil { - return errors.Wrapf(err, "[%s] Could not create %s/docker.log", d.id, d.Folder) - } - - return d.StartWithLogFile(logFile, args...) -} - -// StartWithLogFile will start the daemon and attach its streams to a given file. -func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { - d.handleUserns() - dockerdBinary, err := exec.LookPath(d.dockerdBinary) - if err != nil { - return errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id) - } - args := append(d.GlobalFlags, - "--containerd", "/var/run/docker/containerd/docker-containerd.sock", - "--data-root", d.Root, - "--exec-root", d.execRoot, - "--pidfile", fmt.Sprintf("%s/docker.pid", d.Folder), - fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), - ) - if d.experimental { - args = append(args, "--experimental") - } - if d.init { - args = append(args, "--init") - } - if !(d.UseDefaultHost || d.UseDefaultTLSHost) { - args = append(args, []string{"--host", d.Sock()}...) - } - if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { - args = append(args, []string{"--userns-remap", root}...) - } - - // If we don't explicitly set the log-level or debug flag(-D) then - // turn on debug mode - foundLog := false - foundSd := false - for _, a := range providedArgs { - if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { - foundLog = true - } - if strings.Contains(a, "--storage-driver") { - foundSd = true - } - } - if !foundLog { - args = append(args, "--debug") - } - if d.storageDriver != "" && !foundSd { - args = append(args, "--storage-driver", d.storageDriver) - } - - args = append(args, providedArgs...) - d.cmd = exec.Command(dockerdBinary, args...) - d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") - d.cmd.Stdout = out - d.cmd.Stderr = out - d.logFile = out - - if err := d.cmd.Start(); err != nil { - return errors.Errorf("[%s] could not start daemon container: %v", d.id, err) - } - - wait := make(chan error) - - go func() { - wait <- d.cmd.Wait() - d.log.Logf("[%s] exiting daemon", d.id) - close(wait) - }() - - d.Wait = wait - - tick := time.Tick(500 * time.Millisecond) - // make sure daemon is ready to receive requests - startTime := time.Now().Unix() - for { - d.log.Logf("[%s] waiting for daemon to start", d.id) - if time.Now().Unix()-startTime > 5 { - // After 5 seconds, give up - return errors.Errorf("[%s] Daemon exited and never started", d.id) - } - select { - case <-time.After(2 * time.Second): - return errors.Errorf("[%s] timeout: daemon does not respond", d.id) - case <-tick: - clientConfig, err := d.getClientConfig() - if err != nil { - return err - } - - client := &http.Client{ - Transport: clientConfig.transport, - } - - req, err := http.NewRequest("GET", "/_ping", nil) - if err != nil { - return errors.Wrapf(err, "[%s] could not create new request", d.id) - } - req.URL.Host = clientConfig.addr - req.URL.Scheme = clientConfig.scheme - resp, err := client.Do(req) - if err != nil { - continue - } - resp.Body.Close() - if resp.StatusCode != http.StatusOK { - d.log.Logf("[%s] received status != 200 OK: %s\n", d.id, resp.Status) - } - d.log.Logf("[%s] daemon started\n", d.id) - d.Root, err = d.queryRootDir() - if err != nil { - return errors.Errorf("[%s] error querying daemon for root directory: %v", d.id, err) - } - return nil - case <-d.Wait: - return errors.Errorf("[%s] Daemon exited during startup", d.id) - } - } -} - -// StartWithBusybox will first start the daemon with Daemon.Start() -// then save the busybox image from the main daemon and load it into this Daemon instance. -func (d *Daemon) StartWithBusybox(t testingT, arg ...string) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - d.Start(t, arg...) - d.LoadBusybox(t) -} - -// Kill will send a SIGKILL to the daemon -func (d *Daemon) Kill() error { - if d.cmd == nil || d.Wait == nil { - return errDaemonNotStarted - } - - defer func() { - d.logFile.Close() - d.cmd = nil - }() - - if err := d.cmd.Process.Kill(); err != nil { - return err - } - - return os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)) -} - -// Pid returns the pid of the daemon -func (d *Daemon) Pid() int { - return d.cmd.Process.Pid -} - -// Interrupt stops the daemon by sending it an Interrupt signal -func (d *Daemon) Interrupt() error { - return d.Signal(os.Interrupt) -} - -// Signal sends the specified signal to the daemon if running -func (d *Daemon) Signal(signal os.Signal) error { - if d.cmd == nil || d.Wait == nil { - return errDaemonNotStarted - } - return d.cmd.Process.Signal(signal) -} - -// DumpStackAndQuit sends SIGQUIT to the daemon, which triggers it to dump its -// stack to its log file and exit -// This is used primarily for gathering debug information on test timeout -func (d *Daemon) DumpStackAndQuit() { - if d.cmd == nil || d.cmd.Process == nil { - return - } - SignalDaemonDump(d.cmd.Process.Pid) -} - -// Stop will send a SIGINT every second and wait for the daemon to stop. -// If it times out, a SIGKILL is sent. -// Stop will not delete the daemon directory. If a purged daemon is needed, -// instantiate a new one with NewDaemon. -// If an error occurs while starting the daemon, the test will fail. -func (d *Daemon) Stop(t testingT) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - err := d.StopWithError() - if err != nil { - if err != errDaemonNotStarted { - t.Fatalf("Error while stopping the daemon %s : %v", d.id, err) - } else { - t.Logf("Daemon %s is not started", d.id) - } - } -} - -// StopWithError will send a SIGINT every second and wait for the daemon to stop. -// If it timeouts, a SIGKILL is sent. -// Stop will not delete the daemon directory. If a purged daemon is needed, -// instantiate a new one with NewDaemon. -func (d *Daemon) StopWithError() error { - if d.cmd == nil || d.Wait == nil { - return errDaemonNotStarted - } - - defer func() { - d.logFile.Close() - d.cmd = nil - }() - - i := 1 - tick := time.Tick(time.Second) - - if err := d.cmd.Process.Signal(os.Interrupt); err != nil { - if strings.Contains(err.Error(), "os: process already finished") { - return errDaemonNotStarted - } - return errors.Errorf("could not send signal: %v", err) - } -out1: - for { - select { - case err := <-d.Wait: - return err - case <-time.After(20 * time.Second): - // time for stopping jobs and run onShutdown hooks - d.log.Logf("[%s] daemon started", d.id) - break out1 - } - } - -out2: - for { - select { - case err := <-d.Wait: - return err - case <-tick: - i++ - if i > 5 { - d.log.Logf("tried to interrupt daemon for %d times, now try to kill it", i) - break out2 - } - d.log.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid) - if err := d.cmd.Process.Signal(os.Interrupt); err != nil { - return errors.Errorf("could not send signal: %v", err) - } - } - } - - if err := d.cmd.Process.Kill(); err != nil { - d.log.Logf("Could not kill daemon: %v", err) - return err - } - - d.cmd.Wait() - - return os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)) -} - -// Restart will restart the daemon by first stopping it and the starting it. -// If an error occurs while starting the daemon, the test will fail. -func (d *Daemon) Restart(t testingT, args ...string) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - d.Stop(t) - d.Start(t, args...) -} - -// RestartWithError will restart the daemon by first stopping it and then starting it. -func (d *Daemon) RestartWithError(arg ...string) error { - if err := d.StopWithError(); err != nil { - return err - } - return d.StartWithError(arg...) -} - -func (d *Daemon) handleUserns() { - // in the case of tests running a user namespace-enabled daemon, we have resolved - // d.Root to be the actual final path of the graph dir after the "uid.gid" of - // remapped root is added--we need to subtract it from the path before calling - // start or else we will continue making subdirectories rather than truly restarting - // with the same location/root: - if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { - d.Root = filepath.Dir(d.Root) - } -} - -// ReloadConfig asks the daemon to reload its configuration -func (d *Daemon) ReloadConfig() error { - if d.cmd == nil || d.cmd.Process == nil { - return errors.New("daemon is not running") - } - - errCh := make(chan error) - started := make(chan struct{}) - go func() { - _, body, err := request.Get("/events", request.Host(d.Sock())) - close(started) - if err != nil { - errCh <- err - } - defer body.Close() - dec := json.NewDecoder(body) - for { - var e events.Message - if err := dec.Decode(&e); err != nil { - errCh <- err - return - } - if e.Type != events.DaemonEventType { - continue - } - if e.Action != "reload" { - continue - } - close(errCh) // notify that we are done - return - } - }() - - <-started - if err := signalDaemonReload(d.cmd.Process.Pid); err != nil { - return errors.Errorf("error signaling daemon reload: %v", err) - } - select { - case err := <-errCh: - if err != nil { - return errors.Errorf("error waiting for daemon reload event: %v", err) - } - case <-time.After(30 * time.Second): - return errors.New("timeout waiting for daemon reload event") - } - return nil -} - -// LoadBusybox image into the daemon -func (d *Daemon) LoadBusybox(t assert.TestingT) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - clientHost, err := client.NewEnvClient() - assert.NilError(t, err, "failed to create client") - defer clientHost.Close() - - ctx := context.Background() - reader, err := clientHost.ImageSave(ctx, []string{"busybox:latest"}) - assert.NilError(t, err, "failed to download busybox") - defer reader.Close() - - client, err := d.NewClient() - assert.NilError(t, err, "failed to create client") - defer client.Close() - - resp, err := client.ImageLoad(ctx, reader, true) - assert.NilError(t, err, "failed to load busybox") - defer resp.Body.Close() -} - -func (d *Daemon) getClientConfig() (*clientConfig, error) { - var ( - transport *http.Transport - scheme string - addr string - proto string - ) - if d.UseDefaultTLSHost { - option := &tlsconfig.Options{ - CAFile: "fixtures/https/ca.pem", - CertFile: "fixtures/https/client-cert.pem", - KeyFile: "fixtures/https/client-key.pem", - } - tlsConfig, err := tlsconfig.Client(*option) - if err != nil { - return nil, err - } - transport = &http.Transport{ - TLSClientConfig: tlsConfig, - } - addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort) - scheme = "https" - proto = "tcp" - } else if d.UseDefaultHost { - addr = opts.DefaultUnixSocket - proto = "unix" - scheme = "http" - transport = &http.Transport{} - } else { - addr = d.sockPath() - proto = "unix" - scheme = "http" - transport = &http.Transport{} - } - - if err := sockets.ConfigureTransport(transport, proto, addr); err != nil { - return nil, err - } - transport.DisableKeepAlives = true - - return &clientConfig{ - transport: transport, - scheme: scheme, - addr: addr, - }, nil -} - -func (d *Daemon) queryRootDir() (string, error) { - // update daemon root by asking /info endpoint (to support user - // namespaced daemon with root remapped uid.gid directory) - clientConfig, err := d.getClientConfig() - if err != nil { - return "", err - } - - client := &http.Client{ - Transport: clientConfig.transport, - } - - req, err := http.NewRequest("GET", "/info", nil) - if err != nil { - return "", err - } - req.Header.Set("Content-Type", "application/json") - req.URL.Host = clientConfig.addr - req.URL.Scheme = clientConfig.scheme - - resp, err := client.Do(req) - if err != nil { - return "", err - } - body := ioutils.NewReadCloserWrapper(resp.Body, func() error { - return resp.Body.Close() - }) - - type Info struct { - DockerRootDir string - } - var b []byte - var i Info - b, err = request.ReadBody(body) - if err == nil && resp.StatusCode == http.StatusOK { - // read the docker root dir - if err = json.Unmarshal(b, &i); err == nil { - return i.DockerRootDir, nil - } - } - return "", err -} - -// Info returns the info struct for this daemon -func (d *Daemon) Info(t assert.TestingT) types.Info { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - apiclient, err := d.NewClient() - assert.NilError(t, err) - info, err := apiclient.Info(context.Background()) - assert.NilError(t, err) - return info -} - -func cleanupRaftDir(t testingT, rootPath string) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - walDir := filepath.Join(rootPath, "swarm/raft/wal") - if err := os.RemoveAll(walDir); err != nil { - t.Logf("error removing %v: %v", walDir, err) - } -} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/daemon_unix.go b/vendor/github.com/docker/docker/internal/test/daemon/daemon_unix.go deleted file mode 100644 index 9dd9e36f0..000000000 --- a/vendor/github.com/docker/docker/internal/test/daemon/daemon_unix.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !windows - -package daemon // import "github.com/docker/docker/internal/test/daemon" - -import ( - "os" - "path/filepath" - - "github.com/docker/docker/internal/test" - "golang.org/x/sys/unix" -) - -func cleanupNetworkNamespace(t testingT, execRoot string) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - // Cleanup network namespaces in the exec root of this - // daemon because this exec root is specific to this - // daemon instance and has no chance of getting - // cleaned up when a new daemon is instantiated with a - // new exec root. - netnsPath := filepath.Join(execRoot, "netns") - filepath.Walk(netnsPath, func(path string, info os.FileInfo, err error) error { - if err := unix.Unmount(path, unix.MNT_FORCE); err != nil { - t.Logf("unmount of %s failed: %v", path, err) - } - os.Remove(path) - return nil - }) -} - -// SignalDaemonDump sends a signal to the daemon to write a dump file -func SignalDaemonDump(pid int) { - unix.Kill(pid, unix.SIGQUIT) -} - -func signalDaemonReload(pid int) error { - return unix.Kill(pid, unix.SIGHUP) -} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/daemon_windows.go b/vendor/github.com/docker/docker/internal/test/daemon/daemon_windows.go deleted file mode 100644 index cb6bb6a4c..000000000 --- a/vendor/github.com/docker/docker/internal/test/daemon/daemon_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -package daemon // import "github.com/docker/docker/internal/test/daemon" - -import ( - "fmt" - "strconv" - - "golang.org/x/sys/windows" -) - -// SignalDaemonDump sends a signal to the daemon to write a dump file -func SignalDaemonDump(pid int) { - ev, _ := windows.UTF16PtrFromString("Global\\docker-daemon-" + strconv.Itoa(pid)) - h2, err := windows.OpenEvent(0x0002, false, ev) - if h2 == 0 || err != nil { - return - } - windows.PulseEvent(h2) -} - -func signalDaemonReload(pid int) error { - return fmt.Errorf("daemon reload not supported") -} - -func cleanupNetworkNamespace(t testingT, execRoot string) { -} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/node.go b/vendor/github.com/docker/docker/internal/test/daemon/node.go deleted file mode 100644 index 5015c75eb..000000000 --- a/vendor/github.com/docker/docker/internal/test/daemon/node.go +++ /dev/null @@ -1,82 +0,0 @@ -package daemon - -import ( - "context" - "strings" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/internal/test" - "github.com/gotestyourself/gotestyourself/assert" -) - -// NodeConstructor defines a swarm node constructor -type NodeConstructor func(*swarm.Node) - -// GetNode returns a swarm node identified by the specified id -func (d *Daemon) GetNode(t assert.TestingT, id string) *swarm.Node { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - node, _, err := cli.NodeInspectWithRaw(context.Background(), id) - assert.NilError(t, err) - assert.Check(t, node.ID == id) - return &node -} - -// RemoveNode removes the specified node -func (d *Daemon) RemoveNode(t assert.TestingT, id string, force bool) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - options := types.NodeRemoveOptions{ - Force: force, - } - err := cli.NodeRemove(context.Background(), id, options) - assert.NilError(t, err) -} - -// UpdateNode updates a swarm node with the specified node constructor -func (d *Daemon) UpdateNode(t assert.TestingT, id string, f ...NodeConstructor) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - for i := 0; ; i++ { - node := d.GetNode(t, id) - for _, fn := range f { - fn(node) - } - - err := cli.NodeUpdate(context.Background(), node.ID, node.Version, node.Spec) - if i < 10 && err != nil && strings.Contains(err.Error(), "update out of sequence") { - time.Sleep(100 * time.Millisecond) - continue - } - assert.NilError(t, err) - return - } -} - -// ListNodes returns the list of the current swarm nodes -func (d *Daemon) ListNodes(t assert.TestingT) []swarm.Node { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - nodes, err := cli.NodeList(context.Background(), types.NodeListOptions{}) - assert.NilError(t, err) - - return nodes -} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/ops.go b/vendor/github.com/docker/docker/internal/test/daemon/ops.go deleted file mode 100644 index 34db073b5..000000000 --- a/vendor/github.com/docker/docker/internal/test/daemon/ops.go +++ /dev/null @@ -1,44 +0,0 @@ -package daemon - -import "github.com/docker/docker/internal/test/environment" - -// WithExperimental sets the daemon in experimental mode -func WithExperimental(d *Daemon) { - d.experimental = true - d.init = true -} - -// WithInit sets the daemon init -func WithInit(d *Daemon) { - d.init = true -} - -// WithDockerdBinary sets the dockerd binary to the specified one -func WithDockerdBinary(dockerdBinary string) func(*Daemon) { - return func(d *Daemon) { - d.dockerdBinary = dockerdBinary - } -} - -// WithSwarmPort sets the swarm port to use for swarm mode -func WithSwarmPort(port int) func(*Daemon) { - return func(d *Daemon) { - d.SwarmPort = port - } -} - -// WithSwarmListenAddr sets the swarm listen addr to use for swarm mode -func WithSwarmListenAddr(listenAddr string) func(*Daemon) { - return func(d *Daemon) { - d.swarmListenAddr = listenAddr - } -} - -// WithEnvironment sets options from internal/test/environment.Execution struct -func WithEnvironment(e environment.Execution) func(*Daemon) { - return func(d *Daemon) { - if e.DaemonInfo.ExperimentalBuild { - d.experimental = true - } - } -} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/plugin.go b/vendor/github.com/docker/docker/internal/test/daemon/plugin.go deleted file mode 100644 index 9a7cc345e..000000000 --- a/vendor/github.com/docker/docker/internal/test/daemon/plugin.go +++ /dev/null @@ -1,77 +0,0 @@ -package daemon - -import ( - "context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" - "github.com/gotestyourself/gotestyourself/poll" -) - -// PluginIsRunning provides a poller to check if the specified plugin is running -func (d *Daemon) PluginIsRunning(name string) func(poll.LogT) poll.Result { - return withClient(d, withPluginInspect(name, func(plugin *types.Plugin, t poll.LogT) poll.Result { - if plugin.Enabled { - return poll.Success() - } - return poll.Continue("plugin %q is not enabled", name) - })) -} - -// PluginIsNotRunning provides a poller to check if the specified plugin is not running -func (d *Daemon) PluginIsNotRunning(name string) func(poll.LogT) poll.Result { - return withClient(d, withPluginInspect(name, func(plugin *types.Plugin, t poll.LogT) poll.Result { - if !plugin.Enabled { - return poll.Success() - } - return poll.Continue("plugin %q is enabled", name) - })) -} - -// PluginIsNotPresent provides a poller to check if the specified plugin is not present -func (d *Daemon) PluginIsNotPresent(name string) func(poll.LogT) poll.Result { - return withClient(d, func(c client.APIClient, t poll.LogT) poll.Result { - _, _, err := c.PluginInspectWithRaw(context.Background(), name) - if client.IsErrNotFound(err) { - return poll.Success() - } - if err != nil { - return poll.Error(err) - } - return poll.Continue("plugin %q exists", name) - }) -} - -// PluginReferenceIs provides a poller to check if the specified plugin has the specified reference -func (d *Daemon) PluginReferenceIs(name, expectedRef string) func(poll.LogT) poll.Result { - return withClient(d, withPluginInspect(name, func(plugin *types.Plugin, t poll.LogT) poll.Result { - if plugin.PluginReference == expectedRef { - return poll.Success() - } - return poll.Continue("plugin %q reference is not %q", name, expectedRef) - })) -} - -func withPluginInspect(name string, f func(*types.Plugin, poll.LogT) poll.Result) func(client.APIClient, poll.LogT) poll.Result { - return func(c client.APIClient, t poll.LogT) poll.Result { - plugin, _, err := c.PluginInspectWithRaw(context.Background(), name) - if client.IsErrNotFound(err) { - return poll.Continue("plugin %q not found", name) - } - if err != nil { - return poll.Error(err) - } - return f(plugin, t) - } - -} - -func withClient(d *Daemon, f func(client.APIClient, poll.LogT) poll.Result) func(poll.LogT) poll.Result { - return func(t poll.LogT) poll.Result { - c, err := d.NewClient() - if err != nil { - poll.Error(err) - } - return f(c, t) - } -} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/secret.go b/vendor/github.com/docker/docker/internal/test/daemon/secret.go deleted file mode 100644 index 615489bfd..000000000 --- a/vendor/github.com/docker/docker/internal/test/daemon/secret.go +++ /dev/null @@ -1,84 +0,0 @@ -package daemon - -import ( - "context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/internal/test" - "github.com/gotestyourself/gotestyourself/assert" -) - -// SecretConstructor defines a swarm secret constructor -type SecretConstructor func(*swarm.Secret) - -// CreateSecret creates a secret given the specified spec -func (d *Daemon) CreateSecret(t assert.TestingT, secretSpec swarm.SecretSpec) string { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - scr, err := cli.SecretCreate(context.Background(), secretSpec) - assert.NilError(t, err) - - return scr.ID -} - -// ListSecrets returns the list of the current swarm secrets -func (d *Daemon) ListSecrets(t assert.TestingT) []swarm.Secret { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - secrets, err := cli.SecretList(context.Background(), types.SecretListOptions{}) - assert.NilError(t, err) - return secrets -} - -// GetSecret returns a swarm secret identified by the specified id -func (d *Daemon) GetSecret(t assert.TestingT, id string) *swarm.Secret { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - secret, _, err := cli.SecretInspectWithRaw(context.Background(), id) - assert.NilError(t, err) - return &secret -} - -// DeleteSecret removes the swarm secret identified by the specified id -func (d *Daemon) DeleteSecret(t assert.TestingT, id string) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - err := cli.SecretRemove(context.Background(), id) - assert.NilError(t, err) -} - -// UpdateSecret updates the swarm secret identified by the specified id -// Currently, only label update is supported. -func (d *Daemon) UpdateSecret(t assert.TestingT, id string, f ...SecretConstructor) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - secret := d.GetSecret(t, id) - for _, fn := range f { - fn(secret) - } - - err := cli.SecretUpdate(context.Background(), secret.ID, secret.Version, secret.Spec) - - assert.NilError(t, err) -} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/service.go b/vendor/github.com/docker/docker/internal/test/daemon/service.go deleted file mode 100644 index 77614d0da..000000000 --- a/vendor/github.com/docker/docker/internal/test/daemon/service.go +++ /dev/null @@ -1,131 +0,0 @@ -package daemon - -import ( - "context" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/internal/test" - "github.com/gotestyourself/gotestyourself/assert" -) - -// ServiceConstructor defines a swarm service constructor function -type ServiceConstructor func(*swarm.Service) - -func (d *Daemon) createServiceWithOptions(t assert.TestingT, opts types.ServiceCreateOptions, f ...ServiceConstructor) string { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - var service swarm.Service - for _, fn := range f { - fn(&service) - } - - cli := d.NewClientT(t) - defer cli.Close() - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - res, err := cli.ServiceCreate(ctx, service.Spec, opts) - assert.NilError(t, err) - return res.ID -} - -// CreateService creates a swarm service given the specified service constructor -func (d *Daemon) CreateService(t assert.TestingT, f ...ServiceConstructor) string { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - return d.createServiceWithOptions(t, types.ServiceCreateOptions{}, f...) -} - -// GetService returns the swarm service corresponding to the specified id -func (d *Daemon) GetService(t assert.TestingT, id string) *swarm.Service { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - service, _, err := cli.ServiceInspectWithRaw(context.Background(), id, types.ServiceInspectOptions{}) - assert.NilError(t, err) - return &service -} - -// GetServiceTasks returns the swarm tasks for the specified service -func (d *Daemon) GetServiceTasks(t assert.TestingT, service string) []swarm.Task { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - filterArgs := filters.NewArgs() - filterArgs.Add("desired-state", "running") - filterArgs.Add("service", service) - - options := types.TaskListOptions{ - Filters: filterArgs, - } - - tasks, err := cli.TaskList(context.Background(), options) - assert.NilError(t, err) - return tasks -} - -// UpdateService updates a swarm service with the specified service constructor -func (d *Daemon) UpdateService(t assert.TestingT, service *swarm.Service, f ...ServiceConstructor) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - for _, fn := range f { - fn(service) - } - - _, err := cli.ServiceUpdate(context.Background(), service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{}) - assert.NilError(t, err) -} - -// RemoveService removes the specified service -func (d *Daemon) RemoveService(t assert.TestingT, id string) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - err := cli.ServiceRemove(context.Background(), id) - assert.NilError(t, err) -} - -// ListServices returns the list of the current swarm services -func (d *Daemon) ListServices(t assert.TestingT) []swarm.Service { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - services, err := cli.ServiceList(context.Background(), types.ServiceListOptions{}) - assert.NilError(t, err) - return services -} - -// GetTask returns the swarm task identified by the specified id -func (d *Daemon) GetTask(t assert.TestingT, id string) swarm.Task { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - task, _, err := cli.TaskInspectWithRaw(context.Background(), id) - assert.NilError(t, err) - return task -} diff --git a/vendor/github.com/docker/docker/internal/test/daemon/swarm.go b/vendor/github.com/docker/docker/internal/test/daemon/swarm.go deleted file mode 100644 index 3e803eeeb..000000000 --- a/vendor/github.com/docker/docker/internal/test/daemon/swarm.go +++ /dev/null @@ -1,194 +0,0 @@ -package daemon - -import ( - "context" - "fmt" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/internal/test" - "github.com/gotestyourself/gotestyourself/assert" - "github.com/pkg/errors" -) - -const ( - // DefaultSwarmPort is the default port use for swarm in the tests - DefaultSwarmPort = 2477 - defaultSwarmListenAddr = "0.0.0.0" -) - -// StartAndSwarmInit starts the daemon (with busybox) and init the swarm -func (d *Daemon) StartAndSwarmInit(t testingT) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - // avoid networking conflicts - args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} - d.StartWithBusybox(t, args...) - - d.SwarmInit(t, swarm.InitRequest{}) -} - -// StartAndSwarmJoin starts the daemon (with busybox) and join the specified swarm as worker or manager -func (d *Daemon) StartAndSwarmJoin(t testingT, leader *Daemon, manager bool) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - // avoid networking conflicts - args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} - d.StartWithBusybox(t, args...) - - tokens := leader.JoinTokens(t) - token := tokens.Worker - if manager { - token = tokens.Manager - } - d.SwarmJoin(t, swarm.JoinRequest{ - RemoteAddrs: []string{leader.SwarmListenAddr()}, - JoinToken: token, - }) -} - -// SpecConstructor defines a swarm spec constructor -type SpecConstructor func(*swarm.Spec) - -// SwarmListenAddr returns the listen-addr used for the daemon -func (d *Daemon) SwarmListenAddr() string { - return fmt.Sprintf("%s:%d", d.swarmListenAddr, d.SwarmPort) -} - -// NodeID returns the swarm mode node ID -func (d *Daemon) NodeID() string { - return d.CachedInfo.Swarm.NodeID -} - -// SwarmInit initializes a new swarm cluster. -func (d *Daemon) SwarmInit(t assert.TestingT, req swarm.InitRequest) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - if req.ListenAddr == "" { - req.ListenAddr = fmt.Sprintf("%s:%d", d.swarmListenAddr, d.SwarmPort) - } - cli := d.NewClientT(t) - defer cli.Close() - _, err := cli.SwarmInit(context.Background(), req) - assert.NilError(t, err, "initializing swarm") - d.CachedInfo = d.Info(t) -} - -// SwarmJoin joins a daemon to an existing cluster. -func (d *Daemon) SwarmJoin(t assert.TestingT, req swarm.JoinRequest) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - if req.ListenAddr == "" { - req.ListenAddr = fmt.Sprintf("%s:%d", d.swarmListenAddr, d.SwarmPort) - } - cli := d.NewClientT(t) - defer cli.Close() - err := cli.SwarmJoin(context.Background(), req) - assert.NilError(t, err, "initializing swarm") - d.CachedInfo = d.Info(t) -} - -// SwarmLeave forces daemon to leave current cluster. -func (d *Daemon) SwarmLeave(force bool) error { - cli, err := d.NewClient() - if err != nil { - return fmt.Errorf("leaving swarm: failed to create client %v", err) - } - defer cli.Close() - err = cli.SwarmLeave(context.Background(), force) - if err != nil { - err = fmt.Errorf("leaving swarm: %v", err) - } - return err -} - -// SwarmInfo returns the swarm information of the daemon -func (d *Daemon) SwarmInfo(t assert.TestingT) swarm.Info { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - info, err := cli.Info(context.Background()) - assert.NilError(t, err, "get swarm info") - return info.Swarm -} - -// SwarmUnlock tries to unlock a locked swarm -func (d *Daemon) SwarmUnlock(req swarm.UnlockRequest) error { - cli, err := d.NewClient() - if err != nil { - return fmt.Errorf("unlocking swarm: failed to create client %v", err) - } - defer cli.Close() - err = cli.SwarmUnlock(context.Background(), req) - if err != nil { - err = errors.Wrap(err, "unlocking swarm") - } - return err -} - -// GetSwarm returns the current swarm object -func (d *Daemon) GetSwarm(t assert.TestingT) swarm.Swarm { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - sw, err := cli.SwarmInspect(context.Background()) - assert.NilError(t, err) - return sw -} - -// UpdateSwarm updates the current swarm object with the specified spec constructors -func (d *Daemon) UpdateSwarm(t assert.TestingT, f ...SpecConstructor) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - sw := d.GetSwarm(t) - for _, fn := range f { - fn(&sw.Spec) - } - - err := cli.SwarmUpdate(context.Background(), sw.Version, sw.Spec, swarm.UpdateFlags{}) - assert.NilError(t, err) -} - -// RotateTokens update the swarm to rotate tokens -func (d *Daemon) RotateTokens(t assert.TestingT) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - sw, err := cli.SwarmInspect(context.Background()) - assert.NilError(t, err) - - flags := swarm.UpdateFlags{ - RotateManagerToken: true, - RotateWorkerToken: true, - } - - err = cli.SwarmUpdate(context.Background(), sw.Version, sw.Spec, flags) - assert.NilError(t, err) -} - -// JoinTokens returns the current swarm join tokens -func (d *Daemon) JoinTokens(t assert.TestingT) swarm.JoinTokens { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - cli := d.NewClientT(t) - defer cli.Close() - - sw, err := cli.SwarmInspect(context.Background()) - assert.NilError(t, err) - return sw.JoinTokens -} diff --git a/vendor/github.com/docker/docker/internal/test/environment/clean.go b/vendor/github.com/docker/docker/internal/test/environment/clean.go deleted file mode 100644 index e92006fc4..000000000 --- a/vendor/github.com/docker/docker/internal/test/environment/clean.go +++ /dev/null @@ -1,217 +0,0 @@ -package environment // import "github.com/docker/docker/internal/test/environment" - -import ( - "context" - "regexp" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/client" - "github.com/docker/docker/internal/test" - "github.com/gotestyourself/gotestyourself/assert" -) - -type testingT interface { - assert.TestingT - logT - Fatalf(string, ...interface{}) -} - -type logT interface { - Logf(string, ...interface{}) -} - -// Clean the environment, preserving protected objects (images, containers, ...) -// and removing everything else. It's meant to run after any tests so that they don't -// depend on each others. -func (e *Execution) Clean(t assert.TestingT) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - client := e.APIClient() - - platform := e.OSType - if (platform != "windows") || (platform == "windows" && e.DaemonInfo.Isolation == "hyperv") { - unpauseAllContainers(t, client) - } - deleteAllContainers(t, client, e.protectedElements.containers) - deleteAllImages(t, client, e.protectedElements.images) - deleteAllVolumes(t, client, e.protectedElements.volumes) - deleteAllNetworks(t, client, platform, e.protectedElements.networks) - if platform == "linux" { - deleteAllPlugins(t, client, e.protectedElements.plugins) - } -} - -func unpauseAllContainers(t assert.TestingT, client client.ContainerAPIClient) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - ctx := context.Background() - containers := getPausedContainers(ctx, t, client) - if len(containers) > 0 { - for _, container := range containers { - err := client.ContainerUnpause(ctx, container.ID) - assert.Check(t, err, "failed to unpause container %s", container.ID) - } - } -} - -func getPausedContainers(ctx context.Context, t assert.TestingT, client client.ContainerAPIClient) []types.Container { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - filter := filters.NewArgs() - filter.Add("status", "paused") - containers, err := client.ContainerList(ctx, types.ContainerListOptions{ - Filters: filter, - Quiet: true, - All: true, - }) - assert.Check(t, err, "failed to list containers") - return containers -} - -var alreadyExists = regexp.MustCompile(`Error response from daemon: removal of container (\w+) is already in progress`) - -func deleteAllContainers(t assert.TestingT, apiclient client.ContainerAPIClient, protectedContainers map[string]struct{}) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - ctx := context.Background() - containers := getAllContainers(ctx, t, apiclient) - if len(containers) == 0 { - return - } - - for _, container := range containers { - if _, ok := protectedContainers[container.ID]; ok { - continue - } - err := apiclient.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{ - Force: true, - RemoveVolumes: true, - }) - if err == nil || client.IsErrNotFound(err) || alreadyExists.MatchString(err.Error()) || isErrNotFoundSwarmClassic(err) { - continue - } - assert.Check(t, err, "failed to remove %s", container.ID) - } -} - -func getAllContainers(ctx context.Context, t assert.TestingT, client client.ContainerAPIClient) []types.Container { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - containers, err := client.ContainerList(ctx, types.ContainerListOptions{ - Quiet: true, - All: true, - }) - assert.Check(t, err, "failed to list containers") - return containers -} - -func deleteAllImages(t assert.TestingT, apiclient client.ImageAPIClient, protectedImages map[string]struct{}) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - images, err := apiclient.ImageList(context.Background(), types.ImageListOptions{}) - assert.Check(t, err, "failed to list images") - - ctx := context.Background() - for _, image := range images { - tags := tagsFromImageSummary(image) - if len(tags) == 0 { - removeImage(ctx, t, apiclient, image.ID) - continue - } - for _, tag := range tags { - if _, ok := protectedImages[tag]; !ok { - removeImage(ctx, t, apiclient, tag) - } - } - } -} - -func removeImage(ctx context.Context, t assert.TestingT, apiclient client.ImageAPIClient, ref string) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - _, err := apiclient.ImageRemove(ctx, ref, types.ImageRemoveOptions{ - Force: true, - }) - if client.IsErrNotFound(err) { - return - } - assert.Check(t, err, "failed to remove image %s", ref) -} - -func deleteAllVolumes(t assert.TestingT, c client.VolumeAPIClient, protectedVolumes map[string]struct{}) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - volumes, err := c.VolumeList(context.Background(), filters.Args{}) - assert.Check(t, err, "failed to list volumes") - - for _, v := range volumes.Volumes { - if _, ok := protectedVolumes[v.Name]; ok { - continue - } - err := c.VolumeRemove(context.Background(), v.Name, true) - // Docker EE may list volumes that no longer exist. - if isErrNotFoundSwarmClassic(err) { - continue - } - assert.Check(t, err, "failed to remove volume %s", v.Name) - } -} - -func deleteAllNetworks(t assert.TestingT, c client.NetworkAPIClient, daemonPlatform string, protectedNetworks map[string]struct{}) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - networks, err := c.NetworkList(context.Background(), types.NetworkListOptions{}) - assert.Check(t, err, "failed to list networks") - - for _, n := range networks { - if n.Name == "bridge" || n.Name == "none" || n.Name == "host" { - continue - } - if _, ok := protectedNetworks[n.ID]; ok { - continue - } - if daemonPlatform == "windows" && strings.ToLower(n.Name) == "nat" { - // nat is a pre-defined network on Windows and cannot be removed - continue - } - err := c.NetworkRemove(context.Background(), n.ID) - assert.Check(t, err, "failed to remove network %s", n.ID) - } -} - -func deleteAllPlugins(t assert.TestingT, c client.PluginAPIClient, protectedPlugins map[string]struct{}) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - plugins, err := c.PluginList(context.Background(), filters.Args{}) - // Docker EE does not allow cluster-wide plugin management. - if client.IsErrNotImplemented(err) { - return - } - assert.Check(t, err, "failed to list plugins") - - for _, p := range plugins { - if _, ok := protectedPlugins[p.Name]; ok { - continue - } - err := c.PluginRemove(context.Background(), p.Name, types.PluginRemoveOptions{Force: true}) - assert.Check(t, err, "failed to remove plugin %s", p.ID) - } -} - -// Swarm classic aggregates node errors and returns a 500 so we need to check -// the error string instead of just IsErrNotFound(). -func isErrNotFoundSwarmClassic(err error) bool { - return err != nil && strings.Contains(strings.ToLower(err.Error()), "no such") -} diff --git a/vendor/github.com/docker/docker/internal/test/environment/environment.go b/vendor/github.com/docker/docker/internal/test/environment/environment.go deleted file mode 100644 index 74c8e2ce0..000000000 --- a/vendor/github.com/docker/docker/internal/test/environment/environment.go +++ /dev/null @@ -1,158 +0,0 @@ -package environment // import "github.com/docker/docker/internal/test/environment" - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" - "github.com/docker/docker/internal/test/fixtures/load" - "github.com/pkg/errors" -) - -// Execution contains information about the current test execution and daemon -// under test -type Execution struct { - client client.APIClient - DaemonInfo types.Info - OSType string - PlatformDefaults PlatformDefaults - protectedElements protectedElements -} - -// PlatformDefaults are defaults values for the platform of the daemon under test -type PlatformDefaults struct { - BaseImage string - VolumesConfigPath string - ContainerStoragePath string -} - -// New creates a new Execution struct -func New() (*Execution, error) { - client, err := client.NewClientWithOpts(client.FromEnv) - if err != nil { - return nil, errors.Wrapf(err, "failed to create client") - } - - info, err := client.Info(context.Background()) - if err != nil { - return nil, errors.Wrapf(err, "failed to get info from daemon") - } - - osType := getOSType(info) - - return &Execution{ - client: client, - DaemonInfo: info, - OSType: osType, - PlatformDefaults: getPlatformDefaults(info, osType), - protectedElements: newProtectedElements(), - }, nil -} - -func getOSType(info types.Info) string { - // Docker EE does not set the OSType so allow the user to override this value. - userOsType := os.Getenv("TEST_OSTYPE") - if userOsType != "" { - return userOsType - } - return info.OSType -} - -func getPlatformDefaults(info types.Info, osType string) PlatformDefaults { - volumesPath := filepath.Join(info.DockerRootDir, "volumes") - containersPath := filepath.Join(info.DockerRootDir, "containers") - - switch osType { - case "linux": - return PlatformDefaults{ - BaseImage: "scratch", - VolumesConfigPath: toSlash(volumesPath), - ContainerStoragePath: toSlash(containersPath), - } - case "windows": - baseImage := "microsoft/windowsservercore" - if override := os.Getenv("WINDOWS_BASE_IMAGE"); override != "" { - baseImage = override - fmt.Println("INFO: Windows Base image is ", baseImage) - } - return PlatformDefaults{ - BaseImage: baseImage, - VolumesConfigPath: filepath.FromSlash(volumesPath), - ContainerStoragePath: filepath.FromSlash(containersPath), - } - default: - panic(fmt.Sprintf("unknown OSType for daemon: %s", osType)) - } -} - -// Make sure in context of daemon, not the local platform. Note we can't -// use filepath.FromSlash or ToSlash here as they are a no-op on Unix. -func toSlash(path string) string { - return strings.Replace(path, `\`, `/`, -1) -} - -// IsLocalDaemon is true if the daemon under test is on the same -// host as the test process. -// -// Deterministically working out the environment in which CI is running -// to evaluate whether the daemon is local or remote is not possible through -// a build tag. -// -// For example Windows to Linux CI under Jenkins tests the 64-bit -// Windows binary build with the daemon build tag, but calls a remote -// Linux daemon. -// -// We can't just say if Windows then assume the daemon is local as at -// some point, we will be testing the Windows CLI against a Windows daemon. -// -// Similarly, it will be perfectly valid to also run CLI tests from -// a Linux CLI (built with the daemon tag) against a Windows daemon. -func (e *Execution) IsLocalDaemon() bool { - return os.Getenv("DOCKER_REMOTE_DAEMON") == "" -} - -// IsRemoteDaemon is true if the daemon under test is on different host -// as the test process. -func (e *Execution) IsRemoteDaemon() bool { - return !e.IsLocalDaemon() -} - -// DaemonAPIVersion returns the negotiated daemon api version -func (e *Execution) DaemonAPIVersion() string { - version, err := e.APIClient().ServerVersion(context.TODO()) - if err != nil { - return "" - } - return version.APIVersion -} - -// Print the execution details to stdout -// TODO: print everything -func (e *Execution) Print() { - if e.IsLocalDaemon() { - fmt.Println("INFO: Testing against a local daemon") - } else { - fmt.Println("INFO: Testing against a remote daemon") - } -} - -// APIClient returns an APIClient connected to the daemon under test -func (e *Execution) APIClient() client.APIClient { - return e.client -} - -// EnsureFrozenImagesLinux loads frozen test images into the daemon -// if they aren't already loaded -func EnsureFrozenImagesLinux(testEnv *Execution) error { - if testEnv.OSType == "linux" { - err := load.FrozenImagesLinux(testEnv.APIClient(), frozenImages...) - if err != nil { - return errors.Wrap(err, "error loading frozen images") - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/internal/test/environment/protect.go b/vendor/github.com/docker/docker/internal/test/environment/protect.go deleted file mode 100644 index 6d57dedb1..000000000 --- a/vendor/github.com/docker/docker/internal/test/environment/protect.go +++ /dev/null @@ -1,254 +0,0 @@ -package environment // import "github.com/docker/docker/internal/test/environment" - -import ( - "context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - dclient "github.com/docker/docker/client" - "github.com/docker/docker/internal/test" - "github.com/gotestyourself/gotestyourself/assert" -) - -var frozenImages = []string{"busybox:latest", "busybox:glibc", "hello-world:frozen", "debian:jessie"} - -type protectedElements struct { - containers map[string]struct{} - images map[string]struct{} - networks map[string]struct{} - plugins map[string]struct{} - volumes map[string]struct{} -} - -func newProtectedElements() protectedElements { - return protectedElements{ - containers: map[string]struct{}{}, - images: map[string]struct{}{}, - networks: map[string]struct{}{}, - plugins: map[string]struct{}{}, - volumes: map[string]struct{}{}, - } -} - -// ProtectAll protects the existing environment (containers, images, networks, -// volumes, and, on Linux, plugins) from being cleaned up at the end of test -// runs -func ProtectAll(t testingT, testEnv *Execution) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - ProtectContainers(t, testEnv) - ProtectImages(t, testEnv) - ProtectNetworks(t, testEnv) - ProtectVolumes(t, testEnv) - if testEnv.OSType == "linux" { - ProtectPlugins(t, testEnv) - } -} - -// ProtectContainer adds the specified container(s) to be protected in case of -// clean -func (e *Execution) ProtectContainer(t testingT, containers ...string) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - for _, container := range containers { - e.protectedElements.containers[container] = struct{}{} - } -} - -// ProtectContainers protects existing containers from being cleaned up at the -// end of test runs -func ProtectContainers(t testingT, testEnv *Execution) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - containers := getExistingContainers(t, testEnv) - testEnv.ProtectContainer(t, containers...) -} - -func getExistingContainers(t assert.TestingT, testEnv *Execution) []string { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - client := testEnv.APIClient() - containerList, err := client.ContainerList(context.Background(), types.ContainerListOptions{ - All: true, - }) - assert.NilError(t, err, "failed to list containers") - - var containers []string - for _, container := range containerList { - containers = append(containers, container.ID) - } - return containers -} - -// ProtectImage adds the specified image(s) to be protected in case of clean -func (e *Execution) ProtectImage(t testingT, images ...string) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - for _, image := range images { - e.protectedElements.images[image] = struct{}{} - } -} - -// ProtectImages protects existing images and on linux frozen images from being -// cleaned up at the end of test runs -func ProtectImages(t testingT, testEnv *Execution) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - images := getExistingImages(t, testEnv) - - if testEnv.OSType == "linux" { - images = append(images, frozenImages...) - } - testEnv.ProtectImage(t, images...) -} - -func getExistingImages(t assert.TestingT, testEnv *Execution) []string { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - client := testEnv.APIClient() - filter := filters.NewArgs() - filter.Add("dangling", "false") - imageList, err := client.ImageList(context.Background(), types.ImageListOptions{ - All: true, - Filters: filter, - }) - assert.NilError(t, err, "failed to list images") - - var images []string - for _, image := range imageList { - images = append(images, tagsFromImageSummary(image)...) - } - return images -} - -func tagsFromImageSummary(image types.ImageSummary) []string { - var result []string - for _, tag := range image.RepoTags { - if tag != ":" { - result = append(result, tag) - } - } - for _, digest := range image.RepoDigests { - if digest != "@" { - result = append(result, digest) - } - } - return result -} - -// ProtectNetwork adds the specified network(s) to be protected in case of -// clean -func (e *Execution) ProtectNetwork(t testingT, networks ...string) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - for _, network := range networks { - e.protectedElements.networks[network] = struct{}{} - } -} - -// ProtectNetworks protects existing networks from being cleaned up at the end -// of test runs -func ProtectNetworks(t testingT, testEnv *Execution) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - networks := getExistingNetworks(t, testEnv) - testEnv.ProtectNetwork(t, networks...) -} - -func getExistingNetworks(t assert.TestingT, testEnv *Execution) []string { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - client := testEnv.APIClient() - networkList, err := client.NetworkList(context.Background(), types.NetworkListOptions{}) - assert.NilError(t, err, "failed to list networks") - - var networks []string - for _, network := range networkList { - networks = append(networks, network.ID) - } - return networks -} - -// ProtectPlugin adds the specified plugin(s) to be protected in case of clean -func (e *Execution) ProtectPlugin(t testingT, plugins ...string) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - for _, plugin := range plugins { - e.protectedElements.plugins[plugin] = struct{}{} - } -} - -// ProtectPlugins protects existing plugins from being cleaned up at the end of -// test runs -func ProtectPlugins(t testingT, testEnv *Execution) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - plugins := getExistingPlugins(t, testEnv) - testEnv.ProtectPlugin(t, plugins...) -} - -func getExistingPlugins(t assert.TestingT, testEnv *Execution) []string { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - client := testEnv.APIClient() - pluginList, err := client.PluginList(context.Background(), filters.Args{}) - // Docker EE does not allow cluster-wide plugin management. - if dclient.IsErrNotImplemented(err) { - return []string{} - } - assert.NilError(t, err, "failed to list plugins") - - var plugins []string - for _, plugin := range pluginList { - plugins = append(plugins, plugin.Name) - } - return plugins -} - -// ProtectVolume adds the specified volume(s) to be protected in case of clean -func (e *Execution) ProtectVolume(t testingT, volumes ...string) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - for _, volume := range volumes { - e.protectedElements.volumes[volume] = struct{}{} - } -} - -// ProtectVolumes protects existing volumes from being cleaned up at the end of -// test runs -func ProtectVolumes(t testingT, testEnv *Execution) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - volumes := getExistingVolumes(t, testEnv) - testEnv.ProtectVolume(t, volumes...) -} - -func getExistingVolumes(t assert.TestingT, testEnv *Execution) []string { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - client := testEnv.APIClient() - volumeList, err := client.VolumeList(context.Background(), filters.Args{}) - assert.NilError(t, err, "failed to list volumes") - - var volumes []string - for _, volume := range volumeList.Volumes { - volumes = append(volumes, volume.Name) - } - return volumes -} diff --git a/vendor/github.com/docker/docker/internal/test/fakecontext/context.go b/vendor/github.com/docker/docker/internal/test/fakecontext/context.go deleted file mode 100644 index 8b11da207..000000000 --- a/vendor/github.com/docker/docker/internal/test/fakecontext/context.go +++ /dev/null @@ -1,131 +0,0 @@ -package fakecontext // import "github.com/docker/docker/internal/test/fakecontext" - -import ( - "bytes" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/internal/test" - "github.com/docker/docker/pkg/archive" -) - -type testingT interface { - Fatal(args ...interface{}) - Fatalf(string, ...interface{}) -} - -// New creates a fake build context -func New(t testingT, dir string, modifiers ...func(*Fake) error) *Fake { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - fakeContext := &Fake{Dir: dir} - if dir == "" { - if err := newDir(fakeContext); err != nil { - t.Fatal(err) - } - } - - for _, modifier := range modifiers { - if err := modifier(fakeContext); err != nil { - t.Fatal(err) - } - } - - return fakeContext -} - -func newDir(fake *Fake) error { - tmp, err := ioutil.TempDir("", "fake-context") - if err != nil { - return err - } - if err := os.Chmod(tmp, 0755); err != nil { - return err - } - fake.Dir = tmp - return nil -} - -// WithFile adds the specified file (with content) in the build context -func WithFile(name, content string) func(*Fake) error { - return func(ctx *Fake) error { - return ctx.Add(name, content) - } -} - -// WithDockerfile adds the specified content as Dockerfile in the build context -func WithDockerfile(content string) func(*Fake) error { - return WithFile("Dockerfile", content) -} - -// WithFiles adds the specified files in the build context, content is a string -func WithFiles(files map[string]string) func(*Fake) error { - return func(fakeContext *Fake) error { - for file, content := range files { - if err := fakeContext.Add(file, content); err != nil { - return err - } - } - return nil - } -} - -// WithBinaryFiles adds the specified files in the build context, content is binary -func WithBinaryFiles(files map[string]*bytes.Buffer) func(*Fake) error { - return func(fakeContext *Fake) error { - for file, content := range files { - if err := fakeContext.Add(file, content.String()); err != nil { - return err - } - } - return nil - } -} - -// Fake creates directories that can be used as a build context -type Fake struct { - Dir string -} - -// Add a file at a path, creating directories where necessary -func (f *Fake) Add(file, content string) error { - return f.addFile(file, []byte(content)) -} - -func (f *Fake) addFile(file string, content []byte) error { - fp := filepath.Join(f.Dir, filepath.FromSlash(file)) - dirpath := filepath.Dir(fp) - if dirpath != "." { - if err := os.MkdirAll(dirpath, 0755); err != nil { - return err - } - } - return ioutil.WriteFile(fp, content, 0644) - -} - -// Delete a file at a path -func (f *Fake) Delete(file string) error { - fp := filepath.Join(f.Dir, filepath.FromSlash(file)) - return os.RemoveAll(fp) -} - -// Close deletes the context -func (f *Fake) Close() error { - return os.RemoveAll(f.Dir) -} - -// AsTarReader returns a ReadCloser with the contents of Dir as a tar archive. -func (f *Fake) AsTarReader(t testingT) io.ReadCloser { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - reader, err := archive.TarWithOptions(f.Dir, &archive.TarOptions{}) - if err != nil { - t.Fatalf("Failed to create tar from %s: %s", f.Dir, err) - } - return reader -} diff --git a/vendor/github.com/docker/docker/internal/test/fakegit/fakegit.go b/vendor/github.com/docker/docker/internal/test/fakegit/fakegit.go deleted file mode 100644 index 59f4bcb05..000000000 --- a/vendor/github.com/docker/docker/internal/test/fakegit/fakegit.go +++ /dev/null @@ -1,136 +0,0 @@ -package fakegit // import "github.com/docker/docker/internal/test/fakegit" - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "os/exec" - "path/filepath" - - "github.com/docker/docker/internal/test" - "github.com/docker/docker/internal/test/fakecontext" - "github.com/docker/docker/internal/test/fakestorage" - "github.com/gotestyourself/gotestyourself/assert" -) - -type testingT interface { - assert.TestingT - logT - skipT - Fatal(args ...interface{}) - Fatalf(string, ...interface{}) -} - -type logT interface { - Logf(string, ...interface{}) -} - -type skipT interface { - Skip(reason string) -} - -type gitServer interface { - URL() string - Close() error -} - -type localGitServer struct { - *httptest.Server -} - -func (r *localGitServer) Close() error { - r.Server.Close() - return nil -} - -func (r *localGitServer) URL() string { - return r.Server.URL -} - -// FakeGit is a fake git server -type FakeGit struct { - root string - server gitServer - RepoURL string -} - -// Close closes the server, implements Closer interface -func (g *FakeGit) Close() { - g.server.Close() - os.RemoveAll(g.root) -} - -// New create a fake git server that can be used for git related tests -func New(c testingT, name string, files map[string]string, enforceLocalServer bool) *FakeGit { - if ht, ok := c.(test.HelperT); ok { - ht.Helper() - } - ctx := fakecontext.New(c, "", fakecontext.WithFiles(files)) - defer ctx.Close() - curdir, err := os.Getwd() - if err != nil { - c.Fatal(err) - } - defer os.Chdir(curdir) - - if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { - c.Fatalf("error trying to init repo: %s (%s)", err, output) - } - err = os.Chdir(ctx.Dir) - if err != nil { - c.Fatal(err) - } - if output, err := exec.Command("git", "config", "user.name", "Fake User").CombinedOutput(); err != nil { - c.Fatalf("error trying to set 'user.name': %s (%s)", err, output) - } - if output, err := exec.Command("git", "config", "user.email", "fake.user@example.com").CombinedOutput(); err != nil { - c.Fatalf("error trying to set 'user.email': %s (%s)", err, output) - } - if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { - c.Fatalf("error trying to add files to repo: %s (%s)", err, output) - } - if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { - c.Fatalf("error trying to commit to repo: %s (%s)", err, output) - } - - root, err := ioutil.TempDir("", "docker-test-git-repo") - if err != nil { - c.Fatal(err) - } - repoPath := filepath.Join(root, name+".git") - if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { - os.RemoveAll(root) - c.Fatalf("error trying to clone --bare: %s (%s)", err, output) - } - err = os.Chdir(repoPath) - if err != nil { - os.RemoveAll(root) - c.Fatal(err) - } - if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { - os.RemoveAll(root) - c.Fatalf("error trying to git update-server-info: %s (%s)", err, output) - } - err = os.Chdir(curdir) - if err != nil { - os.RemoveAll(root) - c.Fatal(err) - } - - var server gitServer - if !enforceLocalServer { - // use fakeStorage server, which might be local or remote (at test daemon) - server = fakestorage.New(c, root) - } else { - // always start a local http server on CLI test machine - httpServer := httptest.NewServer(http.FileServer(http.Dir(root))) - server = &localGitServer{httpServer} - } - return &FakeGit{ - root: root, - server: server, - RepoURL: fmt.Sprintf("%s/%s.git", server.URL(), name), - } -} diff --git a/vendor/github.com/docker/docker/internal/test/fakestorage/fixtures.go b/vendor/github.com/docker/docker/internal/test/fakestorage/fixtures.go deleted file mode 100644 index a694834f7..000000000 --- a/vendor/github.com/docker/docker/internal/test/fakestorage/fixtures.go +++ /dev/null @@ -1,92 +0,0 @@ -package fakestorage // import "github.com/docker/docker/internal/test/fakestorage" - -import ( - "context" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "sync" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/internal/test" - "github.com/docker/docker/pkg/archive" - "github.com/gotestyourself/gotestyourself/assert" -) - -var ensureHTTPServerOnce sync.Once - -func ensureHTTPServerImage(t testingT) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - var doIt bool - ensureHTTPServerOnce.Do(func() { - doIt = true - }) - - if !doIt { - return - } - - defer testEnv.ProtectImage(t, "httpserver:latest") - - tmp, err := ioutil.TempDir("", "docker-http-server-test") - if err != nil { - t.Fatalf("could not build http server: %v", err) - } - defer os.RemoveAll(tmp) - - goos := testEnv.OSType - if goos == "" { - goos = "linux" - } - goarch := os.Getenv("DOCKER_ENGINE_GOARCH") - if goarch == "" { - goarch = "amd64" - } - - cpCmd, lookErr := exec.LookPath("cp") - if lookErr != nil { - t.Fatalf("could not build http server: %v", lookErr) - } - - if _, err = os.Stat("../contrib/httpserver/httpserver"); os.IsNotExist(err) { - goCmd, lookErr := exec.LookPath("go") - if lookErr != nil { - t.Fatalf("could not build http server: %v", lookErr) - } - - cmd := exec.Command(goCmd, "build", "-o", filepath.Join(tmp, "httpserver"), "github.com/docker/docker/contrib/httpserver") - cmd.Env = append(os.Environ(), []string{ - "CGO_ENABLED=0", - "GOOS=" + goos, - "GOARCH=" + goarch, - }...) - var out []byte - if out, err = cmd.CombinedOutput(); err != nil { - t.Fatalf("could not build http server: %s", string(out)) - } - } else { - if out, err := exec.Command(cpCmd, "../contrib/httpserver/httpserver", filepath.Join(tmp, "httpserver")).CombinedOutput(); err != nil { - t.Fatalf("could not copy http server: %v", string(out)) - } - } - - if out, err := exec.Command(cpCmd, "../contrib/httpserver/Dockerfile", filepath.Join(tmp, "Dockerfile")).CombinedOutput(); err != nil { - t.Fatalf("could not build http server: %v", string(out)) - } - - c := testEnv.APIClient() - reader, err := archive.TarWithOptions(tmp, &archive.TarOptions{}) - assert.NilError(t, err) - resp, err := c.ImageBuild(context.Background(), reader, types.ImageBuildOptions{ - Remove: true, - ForceRemove: true, - Tags: []string{"httpserver"}, - }) - assert.NilError(t, err) - _, err = io.Copy(ioutil.Discard, resp.Body) - assert.NilError(t, err) -} diff --git a/vendor/github.com/docker/docker/internal/test/fakestorage/storage.go b/vendor/github.com/docker/docker/internal/test/fakestorage/storage.go deleted file mode 100644 index adce3512c..000000000 --- a/vendor/github.com/docker/docker/internal/test/fakestorage/storage.go +++ /dev/null @@ -1,200 +0,0 @@ -package fakestorage // import "github.com/docker/docker/internal/test/fakestorage" - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "os" - "strings" - - "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/client" - "github.com/docker/docker/internal/test" - "github.com/docker/docker/internal/test/environment" - "github.com/docker/docker/internal/test/fakecontext" - "github.com/docker/docker/internal/test/request" - "github.com/docker/docker/internal/testutil" - "github.com/docker/go-connections/nat" - "github.com/gotestyourself/gotestyourself/assert" -) - -var testEnv *environment.Execution - -type testingT interface { - assert.TestingT - logT - skipT - Fatal(args ...interface{}) - Fatalf(string, ...interface{}) -} - -type logT interface { - Logf(string, ...interface{}) -} - -type skipT interface { - Skip(reason string) -} - -// Fake is a static file server. It might be running locally or remotely -// on test host. -type Fake interface { - Close() error - URL() string - CtxDir() string -} - -// SetTestEnvironment sets a static test environment -// TODO: decouple this package from environment -func SetTestEnvironment(env *environment.Execution) { - testEnv = env -} - -// New returns a static file server that will be use as build context. -func New(t testingT, dir string, modifiers ...func(*fakecontext.Fake) error) Fake { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - if testEnv == nil { - t.Fatal("fakstorage package requires SetTestEnvironment() to be called before use.") - } - ctx := fakecontext.New(t, dir, modifiers...) - switch { - case testEnv.IsRemoteDaemon() && strings.HasPrefix(request.DaemonHost(), "unix:///"): - t.Skip(fmt.Sprintf("e2e run : daemon is remote but docker host points to a unix socket")) - case testEnv.IsLocalDaemon(): - return newLocalFakeStorage(ctx) - default: - return newRemoteFileServer(t, ctx, testEnv.APIClient()) - } - return nil -} - -// localFileStorage is a file storage on the running machine -type localFileStorage struct { - *fakecontext.Fake - *httptest.Server -} - -func (s *localFileStorage) URL() string { - return s.Server.URL -} - -func (s *localFileStorage) CtxDir() string { - return s.Fake.Dir -} - -func (s *localFileStorage) Close() error { - defer s.Server.Close() - return s.Fake.Close() -} - -func newLocalFakeStorage(ctx *fakecontext.Fake) *localFileStorage { - handler := http.FileServer(http.Dir(ctx.Dir)) - server := httptest.NewServer(handler) - return &localFileStorage{ - Fake: ctx, - Server: server, - } -} - -// remoteFileServer is a containerized static file server started on the remote -// testing machine to be used in URL-accepting docker build functionality. -type remoteFileServer struct { - host string // hostname/port web server is listening to on docker host e.g. 0.0.0.0:43712 - container string - image string - client client.APIClient - ctx *fakecontext.Fake -} - -func (f *remoteFileServer) URL() string { - u := url.URL{ - Scheme: "http", - Host: f.host} - return u.String() -} - -func (f *remoteFileServer) CtxDir() string { - return f.ctx.Dir -} - -func (f *remoteFileServer) Close() error { - defer func() { - if f.ctx != nil { - f.ctx.Close() - } - if f.image != "" { - if _, err := f.client.ImageRemove(context.Background(), f.image, types.ImageRemoveOptions{ - Force: true, - }); err != nil { - fmt.Fprintf(os.Stderr, "Error closing remote file server : %v\n", err) - } - } - if err := f.client.Close(); err != nil { - fmt.Fprintf(os.Stderr, "Error closing remote file server : %v\n", err) - } - }() - if f.container == "" { - return nil - } - return f.client.ContainerRemove(context.Background(), f.container, types.ContainerRemoveOptions{ - Force: true, - RemoveVolumes: true, - }) -} - -func newRemoteFileServer(t testingT, ctx *fakecontext.Fake, c client.APIClient) *remoteFileServer { - var ( - image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(testutil.GenerateRandomAlphaOnlyString(10))) - container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(testutil.GenerateRandomAlphaOnlyString(10))) - ) - - ensureHTTPServerImage(t) - - // Build the image - if err := ctx.Add("Dockerfile", `FROM httpserver -COPY . /static`); err != nil { - t.Fatal(err) - } - resp, err := c.ImageBuild(context.Background(), ctx.AsTarReader(t), types.ImageBuildOptions{ - NoCache: true, - Tags: []string{image}, - }) - assert.NilError(t, err) - _, err = io.Copy(ioutil.Discard, resp.Body) - assert.NilError(t, err) - - // Start the container - b, err := c.ContainerCreate(context.Background(), &containertypes.Config{ - Image: image, - }, &containertypes.HostConfig{}, nil, container) - assert.NilError(t, err) - err = c.ContainerStart(context.Background(), b.ID, types.ContainerStartOptions{}) - assert.NilError(t, err) - - // Find out the system assigned port - i, err := c.ContainerInspect(context.Background(), b.ID) - assert.NilError(t, err) - newP, err := nat.NewPort("tcp", "80") - assert.NilError(t, err) - ports, exists := i.NetworkSettings.Ports[newP] - if !exists || len(ports) != 1 { - t.Fatalf("unable to find port 80/tcp for %s", container) - } - host := ports[0].HostIP - port := ports[0].HostPort - - return &remoteFileServer{ - container: container, - image: image, - host: fmt.Sprintf("%s:%s", host, port), - ctx: ctx, - client: c, - } -} diff --git a/vendor/github.com/docker/docker/internal/test/fixtures/load/frozen.go b/vendor/github.com/docker/docker/internal/test/fixtures/load/frozen.go deleted file mode 100644 index 94f3680f9..000000000 --- a/vendor/github.com/docker/docker/internal/test/fixtures/load/frozen.go +++ /dev/null @@ -1,196 +0,0 @@ -package load // import "github.com/docker/docker/internal/test/fixtures/load" - -import ( - "bufio" - "bytes" - "context" - "os" - "os/exec" - "path/filepath" - "strings" - "sync" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/term" - "github.com/pkg/errors" -) - -const frozenImgDir = "/docker-frozen-images" - -// FrozenImagesLinux loads the frozen image set for the integration suite -// If the images are not available locally it will download them -// TODO: This loads whatever is in the frozen image dir, regardless of what -// images were passed in. If the images need to be downloaded, then it will respect -// the passed in images -func FrozenImagesLinux(client client.APIClient, images ...string) error { - var loadImages []struct{ srcName, destName string } - for _, img := range images { - if !imageExists(client, img) { - srcName := img - // hello-world:latest gets re-tagged as hello-world:frozen - // there are some tests that use hello-world:latest specifically so it pulls - // the image and hello-world:frozen is used for when we just want a super - // small image - if img == "hello-world:frozen" { - srcName = "hello-world:latest" - } - loadImages = append(loadImages, struct{ srcName, destName string }{ - srcName: srcName, - destName: img, - }) - } - } - if len(loadImages) == 0 { - // everything is loaded, we're done - return nil - } - - ctx := context.Background() - fi, err := os.Stat(frozenImgDir) - if err != nil || !fi.IsDir() { - srcImages := make([]string, 0, len(loadImages)) - for _, img := range loadImages { - srcImages = append(srcImages, img.srcName) - } - if err := pullImages(ctx, client, srcImages); err != nil { - return errors.Wrap(err, "error pulling image list") - } - } else { - if err := loadFrozenImages(ctx, client); err != nil { - return err - } - } - - for _, img := range loadImages { - if img.srcName != img.destName { - if err := client.ImageTag(ctx, img.srcName, img.destName); err != nil { - return errors.Wrapf(err, "failed to tag %s as %s", img.srcName, img.destName) - } - if _, err := client.ImageRemove(ctx, img.srcName, types.ImageRemoveOptions{}); err != nil { - return errors.Wrapf(err, "failed to remove %s", img.srcName) - } - } - } - return nil -} - -func imageExists(client client.APIClient, name string) bool { - _, _, err := client.ImageInspectWithRaw(context.Background(), name) - return err == nil -} - -func loadFrozenImages(ctx context.Context, client client.APIClient) error { - tar, err := exec.LookPath("tar") - if err != nil { - return errors.Wrap(err, "could not find tar binary") - } - tarCmd := exec.Command(tar, "-cC", frozenImgDir, ".") - out, err := tarCmd.StdoutPipe() - if err != nil { - return errors.Wrap(err, "error getting stdout pipe for tar command") - } - - errBuf := bytes.NewBuffer(nil) - tarCmd.Stderr = errBuf - tarCmd.Start() - defer tarCmd.Wait() - - resp, err := client.ImageLoad(ctx, out, true) - if err != nil { - return errors.Wrap(err, "failed to load frozen images") - } - defer resp.Body.Close() - fd, isTerminal := term.GetFdInfo(os.Stdout) - return jsonmessage.DisplayJSONMessagesStream(resp.Body, os.Stdout, fd, isTerminal, nil) -} - -func pullImages(ctx context.Context, client client.APIClient, images []string) error { - cwd, err := os.Getwd() - if err != nil { - return errors.Wrap(err, "error getting path to dockerfile") - } - dockerfile := os.Getenv("DOCKERFILE") - if dockerfile == "" { - dockerfile = "Dockerfile" - } - dockerfilePath := filepath.Join(filepath.Dir(filepath.Clean(cwd)), dockerfile) - pullRefs, err := readFrozenImageList(dockerfilePath, images) - if err != nil { - return errors.Wrap(err, "error reading frozen image list") - } - - var wg sync.WaitGroup - chErr := make(chan error, len(images)) - for tag, ref := range pullRefs { - wg.Add(1) - go func(tag, ref string) { - defer wg.Done() - if err := pullTagAndRemove(ctx, client, ref, tag); err != nil { - chErr <- err - return - } - }(tag, ref) - } - wg.Wait() - close(chErr) - return <-chErr -} - -func pullTagAndRemove(ctx context.Context, client client.APIClient, ref string, tag string) error { - resp, err := client.ImagePull(ctx, ref, types.ImagePullOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to pull %s", ref) - } - defer resp.Close() - fd, isTerminal := term.GetFdInfo(os.Stdout) - if err := jsonmessage.DisplayJSONMessagesStream(resp, os.Stdout, fd, isTerminal, nil); err != nil { - return err - } - - if err := client.ImageTag(ctx, ref, tag); err != nil { - return errors.Wrapf(err, "failed to tag %s as %s", ref, tag) - } - _, err = client.ImageRemove(ctx, ref, types.ImageRemoveOptions{}) - return errors.Wrapf(err, "failed to remove %s", ref) - -} - -func readFrozenImageList(dockerfilePath string, images []string) (map[string]string, error) { - f, err := os.Open(dockerfilePath) - if err != nil { - return nil, errors.Wrap(err, "error reading dockerfile") - } - defer f.Close() - ls := make(map[string]string) - - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := strings.Fields(scanner.Text()) - if len(line) < 3 { - continue - } - if !(line[0] == "RUN" && line[1] == "./contrib/download-frozen-image-v2.sh") { - continue - } - - for scanner.Scan() { - img := strings.TrimSpace(scanner.Text()) - img = strings.TrimSuffix(img, "\\") - img = strings.TrimSpace(img) - split := strings.Split(img, "@") - if len(split) < 2 { - break - } - - for _, i := range images { - if split[0] == i { - ls[i] = img - break - } - } - } - } - return ls, nil -} diff --git a/vendor/github.com/docker/docker/internal/test/fixtures/plugin/basic/basic.go b/vendor/github.com/docker/docker/internal/test/fixtures/plugin/basic/basic.go deleted file mode 100644 index 892272826..000000000 --- a/vendor/github.com/docker/docker/internal/test/fixtures/plugin/basic/basic.go +++ /dev/null @@ -1,34 +0,0 @@ -package main - -import ( - "fmt" - "net" - "net/http" - "os" - "path/filepath" -) - -func main() { - p, err := filepath.Abs(filepath.Join("run", "docker", "plugins")) - if err != nil { - panic(err) - } - if err := os.MkdirAll(p, 0755); err != nil { - panic(err) - } - l, err := net.Listen("unix", filepath.Join(p, "basic.sock")) - if err != nil { - panic(err) - } - - mux := http.NewServeMux() - server := http.Server{ - Addr: l.Addr().String(), - Handler: http.NewServeMux(), - } - mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1.1+json") - fmt.Println(w, `{"Implements": ["dummy"]}`) - }) - server.Serve(l) -} diff --git a/vendor/github.com/docker/docker/internal/test/fixtures/plugin/plugin.go b/vendor/github.com/docker/docker/internal/test/fixtures/plugin/plugin.go deleted file mode 100644 index 523a261ad..000000000 --- a/vendor/github.com/docker/docker/internal/test/fixtures/plugin/plugin.go +++ /dev/null @@ -1,216 +0,0 @@ -package plugin // import "github.com/docker/docker/internal/test/fixtures/plugin" - -import ( - "context" - "encoding/json" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/plugin" - "github.com/docker/docker/registry" - "github.com/pkg/errors" -) - -// CreateOpt is is passed used to change the default plugin config before -// creating it -type CreateOpt func(*Config) - -// Config wraps types.PluginConfig to provide some extra state for options -// extra customizations on the plugin details, such as using a custom binary to -// create the plugin with. -type Config struct { - *types.PluginConfig - binPath string -} - -// WithBinary is a CreateOpt to set an custom binary to create the plugin with. -// This binary must be statically compiled. -func WithBinary(bin string) CreateOpt { - return func(cfg *Config) { - cfg.binPath = bin - } -} - -// CreateClient is the interface used for `BuildPlugin` to interact with the -// daemon. -type CreateClient interface { - PluginCreate(context.Context, io.Reader, types.PluginCreateOptions) error -} - -// Create creates a new plugin with the specified name -func Create(ctx context.Context, c CreateClient, name string, opts ...CreateOpt) error { - tmpDir, err := ioutil.TempDir("", "create-test-plugin") - if err != nil { - return err - } - defer os.RemoveAll(tmpDir) - - tar, err := makePluginBundle(tmpDir, opts...) - if err != nil { - return err - } - defer tar.Close() - - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - return c.PluginCreate(ctx, tar, types.PluginCreateOptions{RepoName: name}) -} - -// CreateInRegistry makes a plugin (locally) and pushes it to a registry. -// This does not use a dockerd instance to create or push the plugin. -// If you just want to create a plugin in some daemon, use `Create`. -// -// This can be useful when testing plugins on swarm where you don't really want -// the plugin to exist on any of the daemons (immediately) and there needs to be -// some way to distribute the plugin. -func CreateInRegistry(ctx context.Context, repo string, auth *types.AuthConfig, opts ...CreateOpt) error { - tmpDir, err := ioutil.TempDir("", "create-test-plugin-local") - if err != nil { - return err - } - defer os.RemoveAll(tmpDir) - - inPath := filepath.Join(tmpDir, "plugin") - if err := os.MkdirAll(inPath, 0755); err != nil { - return errors.Wrap(err, "error creating plugin root") - } - - tar, err := makePluginBundle(inPath, opts...) - if err != nil { - return err - } - defer tar.Close() - - dummyExec := func(m *plugin.Manager) (plugin.Executor, error) { - return nil, nil - } - - regService, err := registry.NewService(registry.ServiceOptions{V2Only: true}) - if err != nil { - return err - } - - managerConfig := plugin.ManagerConfig{ - Store: plugin.NewStore(), - RegistryService: regService, - Root: filepath.Join(tmpDir, "root"), - ExecRoot: "/run/docker", // manager init fails if not set - CreateExecutor: dummyExec, - LogPluginEvent: func(id, name, action string) {}, // panics when not set - } - manager, err := plugin.NewManager(managerConfig) - if err != nil { - return errors.Wrap(err, "error creating plugin manager") - } - - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - if err := manager.CreateFromContext(ctx, tar, &types.PluginCreateOptions{RepoName: repo}); err != nil { - return err - } - - if auth == nil { - auth = &types.AuthConfig{} - } - err = manager.Push(ctx, repo, nil, auth, ioutil.Discard) - return errors.Wrap(err, "error pushing plugin") -} - -func makePluginBundle(inPath string, opts ...CreateOpt) (io.ReadCloser, error) { - p := &types.PluginConfig{ - Interface: types.PluginConfigInterface{ - Socket: "basic.sock", - Types: []types.PluginInterfaceType{{Capability: "docker.dummy/1.0"}}, - }, - Entrypoint: []string{"/basic"}, - } - cfg := &Config{ - PluginConfig: p, - } - for _, o := range opts { - o(cfg) - } - if cfg.binPath == "" { - binPath, err := ensureBasicPluginBin() - if err != nil { - return nil, err - } - cfg.binPath = binPath - } - - configJSON, err := json.Marshal(p) - if err != nil { - return nil, err - } - if err := ioutil.WriteFile(filepath.Join(inPath, "config.json"), configJSON, 0644); err != nil { - return nil, err - } - if err := os.MkdirAll(filepath.Join(inPath, "rootfs", filepath.Dir(p.Entrypoint[0])), 0755); err != nil { - return nil, errors.Wrap(err, "error creating plugin rootfs dir") - } - - // Ensure the mount target paths exist - for _, m := range p.Mounts { - var stat os.FileInfo - if m.Source != nil { - stat, err = os.Stat(*m.Source) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - } - - if stat == nil || stat.IsDir() { - var mode os.FileMode = 0755 - if stat != nil { - mode = stat.Mode() - } - if err := os.MkdirAll(filepath.Join(inPath, "rootfs", m.Destination), mode); err != nil { - return nil, errors.Wrap(err, "error preparing plugin mount destination path") - } - } else { - if err := os.MkdirAll(filepath.Join(inPath, "rootfs", filepath.Dir(m.Destination)), 0755); err != nil { - return nil, errors.Wrap(err, "error preparing plugin mount destination dir") - } - f, err := os.Create(filepath.Join(inPath, "rootfs", m.Destination)) - if err != nil && !os.IsExist(err) { - return nil, errors.Wrap(err, "error preparing plugin mount destination file") - } - if f != nil { - f.Close() - } - } - } - if err := archive.NewDefaultArchiver().CopyFileWithTar(cfg.binPath, filepath.Join(inPath, "rootfs", p.Entrypoint[0])); err != nil { - return nil, errors.Wrap(err, "error copying plugin binary to rootfs path") - } - tar, err := archive.Tar(inPath, archive.Uncompressed) - return tar, errors.Wrap(err, "error making plugin archive") -} - -func ensureBasicPluginBin() (string, error) { - name := "docker-basic-plugin" - p, err := exec.LookPath(name) - if err == nil { - return p, nil - } - - goBin, err := exec.LookPath("go") - if err != nil { - return "", err - } - installPath := filepath.Join(os.Getenv("GOPATH"), "bin", name) - sourcePath := filepath.Join("github.com", "docker", "docker", "internal", "test", "fixtures", "plugin", "basic") - cmd := exec.Command(goBin, "build", "-o", installPath, sourcePath) - cmd.Env = append(cmd.Env, "GOPATH="+os.Getenv("GOPATH"), "CGO_ENABLED=0") - if out, err := cmd.CombinedOutput(); err != nil { - return "", errors.Wrapf(err, "error building basic plugin bin: %s", string(out)) - } - return installPath, nil -} diff --git a/vendor/github.com/docker/docker/internal/test/helper.go b/vendor/github.com/docker/docker/internal/test/helper.go deleted file mode 100644 index 1b9fd7509..000000000 --- a/vendor/github.com/docker/docker/internal/test/helper.go +++ /dev/null @@ -1,6 +0,0 @@ -package test - -// HelperT is a subset of testing.T that implements the Helper function -type HelperT interface { - Helper() -} diff --git a/vendor/github.com/docker/docker/internal/test/registry/ops.go b/vendor/github.com/docker/docker/internal/test/registry/ops.go deleted file mode 100644 index c004f3742..000000000 --- a/vendor/github.com/docker/docker/internal/test/registry/ops.go +++ /dev/null @@ -1,26 +0,0 @@ -package registry - -// Schema1 sets the registry to serve v1 api -func Schema1(c *Config) { - c.schema1 = true -} - -// Htpasswd sets the auth method with htpasswd -func Htpasswd(c *Config) { - c.auth = "htpasswd" -} - -// Token sets the auth method to token, with the specified token url -func Token(tokenURL string) func(*Config) { - return func(c *Config) { - c.auth = "token" - c.tokenURL = tokenURL - } -} - -// URL sets the registry url -func URL(registryURL string) func(*Config) { - return func(c *Config) { - c.registryURL = registryURL - } -} diff --git a/vendor/github.com/docker/docker/internal/test/registry/registry.go b/vendor/github.com/docker/docker/internal/test/registry/registry.go deleted file mode 100644 index 2e89c32e5..000000000 --- a/vendor/github.com/docker/docker/internal/test/registry/registry.go +++ /dev/null @@ -1,255 +0,0 @@ -package registry // import "github.com/docker/docker/internal/test/registry" - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "os/exec" - "path/filepath" - "time" - - "github.com/docker/docker/internal/test" - "github.com/gotestyourself/gotestyourself/assert" - "github.com/opencontainers/go-digest" -) - -const ( - // V2binary is the name of the registry v2 binary - V2binary = "registry-v2" - // V2binarySchema1 is the name of the registry that serve schema1 - V2binarySchema1 = "registry-v2-schema1" - // DefaultURL is the default url that will be used by the registry (if not specified otherwise) - DefaultURL = "127.0.0.1:5000" -) - -type testingT interface { - assert.TestingT - logT - Fatal(...interface{}) - Fatalf(string, ...interface{}) -} - -type logT interface { - Logf(string, ...interface{}) -} - -// V2 represent a registry version 2 -type V2 struct { - cmd *exec.Cmd - registryURL string - dir string - auth string - username string - password string - email string -} - -// Config contains the test registry configuration -type Config struct { - schema1 bool - auth string - tokenURL string - registryURL string -} - -// NewV2 creates a v2 registry server -func NewV2(t testingT, ops ...func(*Config)) *V2 { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - c := &Config{ - registryURL: DefaultURL, - } - for _, op := range ops { - op(c) - } - tmp, err := ioutil.TempDir("", "registry-test-") - assert.NilError(t, err) - template := `version: 0.1 -loglevel: debug -storage: - filesystem: - rootdirectory: %s -http: - addr: %s -%s` - var ( - authTemplate string - username string - password string - email string - ) - switch c.auth { - case "htpasswd": - htpasswdPath := filepath.Join(tmp, "htpasswd") - // generated with: htpasswd -Bbn testuser testpassword - userpasswd := "testuser:$2y$05$sBsSqk0OpSD1uTZkHXc4FeJ0Z70wLQdAX/82UiHuQOKbNbBrzs63m" - username = "testuser" - password = "testpassword" - email = "test@test.org" - err := ioutil.WriteFile(htpasswdPath, []byte(userpasswd), os.FileMode(0644)) - assert.NilError(t, err) - authTemplate = fmt.Sprintf(`auth: - htpasswd: - realm: basic-realm - path: %s -`, htpasswdPath) - case "token": - authTemplate = fmt.Sprintf(`auth: - token: - realm: %s - service: "registry" - issuer: "auth-registry" - rootcertbundle: "fixtures/registry/cert.pem" -`, c.tokenURL) - } - - confPath := filepath.Join(tmp, "config.yaml") - config, err := os.Create(confPath) - assert.NilError(t, err) - defer config.Close() - - if _, err := fmt.Fprintf(config, template, tmp, c.registryURL, authTemplate); err != nil { - // FIXME(vdemeester) use a defer/clean func - os.RemoveAll(tmp) - t.Fatal(err) - } - - binary := V2binary - if c.schema1 { - binary = V2binarySchema1 - } - cmd := exec.Command(binary, confPath) - if err := cmd.Start(); err != nil { - // FIXME(vdemeester) use a defer/clean func - os.RemoveAll(tmp) - t.Fatal(err) - } - return &V2{ - cmd: cmd, - dir: tmp, - auth: c.auth, - username: username, - password: password, - email: email, - registryURL: c.registryURL, - } -} - -// WaitReady waits for the registry to be ready to serve requests (or fail after a while) -func (r *V2) WaitReady(t testingT) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - var err error - for i := 0; i != 50; i++ { - if err = r.Ping(); err == nil { - return - } - time.Sleep(100 * time.Millisecond) - } - t.Fatalf("timeout waiting for test registry to become available: %v", err) -} - -// Ping sends an http request to the current registry, and fail if it doesn't respond correctly -func (r *V2) Ping() error { - // We always ping through HTTP for our test registry. - resp, err := http.Get(fmt.Sprintf("http://%s/v2/", r.registryURL)) - if err != nil { - return err - } - resp.Body.Close() - - fail := resp.StatusCode != http.StatusOK - if r.auth != "" { - // unauthorized is a _good_ status when pinging v2/ and it needs auth - fail = fail && resp.StatusCode != http.StatusUnauthorized - } - if fail { - return fmt.Errorf("registry ping replied with an unexpected status code %d", resp.StatusCode) - } - return nil -} - -// Close kills the registry server -func (r *V2) Close() { - r.cmd.Process.Kill() - r.cmd.Process.Wait() - os.RemoveAll(r.dir) -} - -func (r *V2) getBlobFilename(blobDigest digest.Digest) string { - // Split the digest into its algorithm and hex components. - dgstAlg, dgstHex := blobDigest.Algorithm(), blobDigest.Hex() - - // The path to the target blob data looks something like: - // baseDir + "docker/registry/v2/blobs/sha256/a3/a3ed...46d4/data" - return fmt.Sprintf("%s/docker/registry/v2/blobs/%s/%s/%s/data", r.dir, dgstAlg, dgstHex[:2], dgstHex) -} - -// ReadBlobContents read the file corresponding to the specified digest -func (r *V2) ReadBlobContents(t assert.TestingT, blobDigest digest.Digest) []byte { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - // Load the target manifest blob. - manifestBlob, err := ioutil.ReadFile(r.getBlobFilename(blobDigest)) - assert.NilError(t, err, "unable to read blob") - return manifestBlob -} - -// WriteBlobContents write the file corresponding to the specified digest with the given content -func (r *V2) WriteBlobContents(t assert.TestingT, blobDigest digest.Digest, data []byte) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - err := ioutil.WriteFile(r.getBlobFilename(blobDigest), data, os.FileMode(0644)) - assert.NilError(t, err, "unable to write malicious data blob") -} - -// TempMoveBlobData moves the existing data file aside, so that we can replace it with a -// malicious blob of data for example. -func (r *V2) TempMoveBlobData(t testingT, blobDigest digest.Digest) (undo func()) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - tempFile, err := ioutil.TempFile("", "registry-temp-blob-") - assert.NilError(t, err, "unable to get temporary blob file") - tempFile.Close() - - blobFilename := r.getBlobFilename(blobDigest) - - // Move the existing data file aside, so that we can replace it with a - // another blob of data. - if err := os.Rename(blobFilename, tempFile.Name()); err != nil { - // FIXME(vdemeester) use a defer/clean func - os.Remove(tempFile.Name()) - t.Fatalf("unable to move data blob: %s", err) - } - - return func() { - os.Rename(tempFile.Name(), blobFilename) - os.Remove(tempFile.Name()) - } -} - -// Username returns the configured user name of the server -func (r *V2) Username() string { - return r.username -} - -// Password returns the configured password of the server -func (r *V2) Password() string { - return r.password -} - -// Email returns the configured email of the server -func (r *V2) Email() string { - return r.email -} - -// Path returns the path where the registry write data -func (r *V2) Path() string { - return filepath.Join(r.dir, "docker", "registry", "v2") -} diff --git a/vendor/github.com/docker/docker/internal/test/registry/registry_mock.go b/vendor/github.com/docker/docker/internal/test/registry/registry_mock.go deleted file mode 100644 index d139401a6..000000000 --- a/vendor/github.com/docker/docker/internal/test/registry/registry_mock.go +++ /dev/null @@ -1,71 +0,0 @@ -package registry // import "github.com/docker/docker/internal/test/registry" - -import ( - "net/http" - "net/http/httptest" - "regexp" - "strings" - "sync" - - "github.com/docker/docker/internal/test" -) - -type handlerFunc func(w http.ResponseWriter, r *http.Request) - -// Mock represent a registry mock -type Mock struct { - server *httptest.Server - hostport string - handlers map[string]handlerFunc - mu sync.Mutex -} - -// RegisterHandler register the specified handler for the registry mock -func (tr *Mock) RegisterHandler(path string, h handlerFunc) { - tr.mu.Lock() - defer tr.mu.Unlock() - tr.handlers[path] = h -} - -// NewMock creates a registry mock -func NewMock(t testingT) (*Mock, error) { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - testReg := &Mock{handlers: make(map[string]handlerFunc)} - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - url := r.URL.String() - - var matched bool - var err error - for re, function := range testReg.handlers { - matched, err = regexp.MatchString(re, url) - if err != nil { - t.Fatal("Error with handler regexp") - } - if matched { - function(w, r) - break - } - } - - if !matched { - t.Fatalf("Unable to match %s with regexp", url) - } - })) - - testReg.server = ts - testReg.hostport = strings.Replace(ts.URL, "http://", "", 1) - return testReg, nil -} - -// URL returns the url of the registry -func (tr *Mock) URL() string { - return tr.hostport -} - -// Close closes mock and releases resources -func (tr *Mock) Close() { - tr.server.Close() -} diff --git a/vendor/github.com/docker/docker/internal/test/request/npipe.go b/vendor/github.com/docker/docker/internal/test/request/npipe.go deleted file mode 100644 index e6ab03945..000000000 --- a/vendor/github.com/docker/docker/internal/test/request/npipe.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows - -package request - -import ( - "net" - "time" -) - -func npipeDial(path string, timeout time.Duration) (net.Conn, error) { - panic("npipe protocol only supported on Windows") -} diff --git a/vendor/github.com/docker/docker/internal/test/request/npipe_windows.go b/vendor/github.com/docker/docker/internal/test/request/npipe_windows.go deleted file mode 100644 index a268aac92..000000000 --- a/vendor/github.com/docker/docker/internal/test/request/npipe_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -package request - -import ( - "net" - "time" - - "github.com/Microsoft/go-winio" -) - -func npipeDial(path string, timeout time.Duration) (net.Conn, error) { - return winio.DialPipe(path, &timeout) -} diff --git a/vendor/github.com/docker/docker/internal/test/request/ops.go b/vendor/github.com/docker/docker/internal/test/request/ops.go deleted file mode 100644 index c85308c47..000000000 --- a/vendor/github.com/docker/docker/internal/test/request/ops.go +++ /dev/null @@ -1,78 +0,0 @@ -package request - -import ( - "bytes" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "strings" -) - -// Options defines request options, like request modifiers and which host to target -type Options struct { - host string - requestModifiers []func(*http.Request) error -} - -// Host creates a modifier that sets the specified host as the request URL host -func Host(host string) func(*Options) { - return func(o *Options) { - o.host = host - } -} - -// With adds a request modifier to the options -func With(f func(*http.Request) error) func(*Options) { - return func(o *Options) { - o.requestModifiers = append(o.requestModifiers, f) - } -} - -// Method creates a modifier that sets the specified string as the request method -func Method(method string) func(*Options) { - return With(func(req *http.Request) error { - req.Method = method - return nil - }) -} - -// RawString sets the specified string as body for the request -func RawString(content string) func(*Options) { - return RawContent(ioutil.NopCloser(strings.NewReader(content))) -} - -// RawContent sets the specified reader as body for the request -func RawContent(reader io.ReadCloser) func(*Options) { - return With(func(req *http.Request) error { - req.Body = reader - return nil - }) -} - -// ContentType sets the specified Content-Type request header -func ContentType(contentType string) func(*Options) { - return With(func(req *http.Request) error { - req.Header.Set("Content-Type", contentType) - return nil - }) -} - -// JSON sets the Content-Type request header to json -func JSON(o *Options) { - ContentType("application/json")(o) -} - -// JSONBody creates a modifier that encodes the specified data to a JSON string and set it as request body. It also sets -// the Content-Type header of the request. -func JSONBody(data interface{}) func(*Options) { - return With(func(req *http.Request) error { - jsonData := bytes.NewBuffer(nil) - if err := json.NewEncoder(jsonData).Encode(data); err != nil { - return err - } - req.Body = ioutil.NopCloser(jsonData) - req.Header.Set("Content-Type", "application/json") - return nil - }) -} diff --git a/vendor/github.com/docker/docker/internal/test/request/request.go b/vendor/github.com/docker/docker/internal/test/request/request.go deleted file mode 100644 index 00450d94a..000000000 --- a/vendor/github.com/docker/docker/internal/test/request/request.go +++ /dev/null @@ -1,218 +0,0 @@ -package request // import "github.com/docker/docker/internal/test/request" - -import ( - "context" - "crypto/tls" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "path/filepath" - "time" - - "github.com/docker/docker/client" - "github.com/docker/docker/internal/test" - "github.com/docker/docker/internal/test/environment" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/gotestyourself/gotestyourself/assert" - "github.com/pkg/errors" -) - -// NewAPIClient returns a docker API client configured from environment variables -func NewAPIClient(t assert.TestingT, ops ...func(*client.Client) error) client.APIClient { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - ops = append([]func(*client.Client) error{client.FromEnv}, ops...) - clt, err := client.NewClientWithOpts(ops...) - assert.NilError(t, err) - return clt -} - -// DaemonTime provides the current time on the daemon host -func DaemonTime(ctx context.Context, t assert.TestingT, client client.APIClient, testEnv *environment.Execution) time.Time { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - if testEnv.IsLocalDaemon() { - return time.Now() - } - - info, err := client.Info(ctx) - assert.NilError(t, err) - - dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) - assert.NilError(t, err, "invalid time format in GET /info response") - return dt -} - -// DaemonUnixTime returns the current time on the daemon host with nanoseconds precision. -// It return the time formatted how the client sends timestamps to the server. -func DaemonUnixTime(ctx context.Context, t assert.TestingT, client client.APIClient, testEnv *environment.Execution) string { - if ht, ok := t.(test.HelperT); ok { - ht.Helper() - } - dt := DaemonTime(ctx, t, client, testEnv) - return fmt.Sprintf("%d.%09d", dt.Unix(), int64(dt.Nanosecond())) -} - -// Post creates and execute a POST request on the specified host and endpoint, with the specified request modifiers -func Post(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) { - return Do(endpoint, append(modifiers, Method(http.MethodPost))...) -} - -// Delete creates and execute a DELETE request on the specified host and endpoint, with the specified request modifiers -func Delete(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) { - return Do(endpoint, append(modifiers, Method(http.MethodDelete))...) -} - -// Get creates and execute a GET request on the specified host and endpoint, with the specified request modifiers -func Get(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) { - return Do(endpoint, modifiers...) -} - -// Do creates and execute a request on the specified endpoint, with the specified request modifiers -func Do(endpoint string, modifiers ...func(*Options)) (*http.Response, io.ReadCloser, error) { - opts := &Options{ - host: DaemonHost(), - } - for _, mod := range modifiers { - mod(opts) - } - req, err := newRequest(endpoint, opts) - if err != nil { - return nil, nil, err - } - client, err := newHTTPClient(opts.host) - if err != nil { - return nil, nil, err - } - resp, err := client.Do(req) - var body io.ReadCloser - if resp != nil { - body = ioutils.NewReadCloserWrapper(resp.Body, func() error { - defer resp.Body.Close() - return nil - }) - } - return resp, body, err -} - -// ReadBody read the specified ReadCloser content and returns it -func ReadBody(b io.ReadCloser) ([]byte, error) { - defer b.Close() - return ioutil.ReadAll(b) -} - -// newRequest creates a new http Request to the specified host and endpoint, with the specified request modifiers -func newRequest(endpoint string, opts *Options) (*http.Request, error) { - hostURL, err := client.ParseHostURL(opts.host) - if err != nil { - return nil, errors.Wrapf(err, "failed parsing url %q", opts.host) - } - req, err := http.NewRequest("GET", endpoint, nil) - if err != nil { - return nil, errors.Wrap(err, "failed to create request") - } - - if os.Getenv("DOCKER_TLS_VERIFY") != "" { - req.URL.Scheme = "https" - } else { - req.URL.Scheme = "http" - } - req.URL.Host = hostURL.Host - - for _, config := range opts.requestModifiers { - if err := config(req); err != nil { - return nil, err - } - } - - return req, nil -} - -// newHTTPClient creates an http client for the specific host -// TODO: Share more code with client.defaultHTTPClient -func newHTTPClient(host string) (*http.Client, error) { - // FIXME(vdemeester) 10*time.Second timeout of SockRequest… ? - hostURL, err := client.ParseHostURL(host) - if err != nil { - return nil, err - } - transport := new(http.Transport) - if hostURL.Scheme == "tcp" && os.Getenv("DOCKER_TLS_VERIFY") != "" { - // Setup the socket TLS configuration. - tlsConfig, err := getTLSConfig() - if err != nil { - return nil, err - } - transport = &http.Transport{TLSClientConfig: tlsConfig} - } - transport.DisableKeepAlives = true - err = sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host) - return &http.Client{Transport: transport}, err -} - -func getTLSConfig() (*tls.Config, error) { - dockerCertPath := os.Getenv("DOCKER_CERT_PATH") - - if dockerCertPath == "" { - return nil, errors.New("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable") - } - - option := &tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - } - tlsConfig, err := tlsconfig.Client(*option) - if err != nil { - return nil, err - } - - return tlsConfig, nil -} - -// DaemonHost return the daemon host string for this test execution -func DaemonHost() string { - daemonURLStr := "unix://" + opts.DefaultUnixSocket - if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" { - daemonURLStr = daemonHostVar - } - return daemonURLStr -} - -// SockConn opens a connection on the specified socket -func SockConn(timeout time.Duration, daemon string) (net.Conn, error) { - daemonURL, err := url.Parse(daemon) - if err != nil { - return nil, errors.Wrapf(err, "could not parse url %q", daemon) - } - - var c net.Conn - switch daemonURL.Scheme { - case "npipe": - return npipeDial(daemonURL.Path, timeout) - case "unix": - return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) - case "tcp": - if os.Getenv("DOCKER_TLS_VERIFY") != "" { - // Setup the socket TLS configuration. - tlsConfig, err := getTLSConfig() - if err != nil { - return nil, err - } - dialer := &net.Dialer{Timeout: timeout} - return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig) - } - return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) - default: - return c, errors.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon) - } -} diff --git a/vendor/github.com/docker/docker/internal/testutil/helpers.go b/vendor/github.com/docker/docker/internal/testutil/helpers.go deleted file mode 100644 index 38cd1693f..000000000 --- a/vendor/github.com/docker/docker/internal/testutil/helpers.go +++ /dev/null @@ -1,17 +0,0 @@ -package testutil // import "github.com/docker/docker/internal/testutil" - -import ( - "io" -) - -// DevZero acts like /dev/zero but in an OS-independent fashion. -var DevZero io.Reader = devZero{} - -type devZero struct{} - -func (d devZero) Read(p []byte) (n int, err error) { - for i := range p { - p[i] = 0 - } - return len(p), nil -} diff --git a/vendor/github.com/docker/docker/internal/testutil/stringutils.go b/vendor/github.com/docker/docker/internal/testutil/stringutils.go deleted file mode 100644 index 574aeb51f..000000000 --- a/vendor/github.com/docker/docker/internal/testutil/stringutils.go +++ /dev/null @@ -1,14 +0,0 @@ -package testutil // import "github.com/docker/docker/internal/testutil" - -import "math/rand" - -// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. -func GenerateRandomAlphaOnlyString(n int) string { - // make a really long string - letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - b := make([]byte, n) - for i := range b { - b[i] = letters[rand.Intn(len(letters))] - } - return string(b) -} diff --git a/vendor/github.com/docker/docker/layer/empty.go b/vendor/github.com/docker/docker/layer/empty.go deleted file mode 100644 index c81c70214..000000000 --- a/vendor/github.com/docker/docker/layer/empty.go +++ /dev/null @@ -1,61 +0,0 @@ -package layer // import "github.com/docker/docker/layer" - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" -) - -// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - -// (1024 NULL bytes) -const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") - -type emptyLayer struct{} - -// EmptyLayer is a layer that corresponds to empty tar. -var EmptyLayer = &emptyLayer{} - -func (el *emptyLayer) TarStream() (io.ReadCloser, error) { - buf := new(bytes.Buffer) - tarWriter := tar.NewWriter(buf) - tarWriter.Close() - return ioutil.NopCloser(buf), nil -} - -func (el *emptyLayer) TarStreamFrom(p ChainID) (io.ReadCloser, error) { - if p == "" { - return el.TarStream() - } - return nil, fmt.Errorf("can't get parent tar stream of an empty layer") -} - -func (el *emptyLayer) ChainID() ChainID { - return ChainID(DigestSHA256EmptyTar) -} - -func (el *emptyLayer) DiffID() DiffID { - return DigestSHA256EmptyTar -} - -func (el *emptyLayer) Parent() Layer { - return nil -} - -func (el *emptyLayer) Size() (size int64, err error) { - return 0, nil -} - -func (el *emptyLayer) DiffSize() (size int64, err error) { - return 0, nil -} - -func (el *emptyLayer) Metadata() (map[string]string, error) { - return make(map[string]string), nil -} - -// IsEmpty returns true if the layer is an EmptyLayer -func IsEmpty(diffID DiffID) bool { - return diffID == DigestSHA256EmptyTar -} diff --git a/vendor/github.com/docker/docker/layer/filestore.go b/vendor/github.com/docker/docker/layer/filestore.go deleted file mode 100644 index b1cbb8016..000000000 --- a/vendor/github.com/docker/docker/layer/filestore.go +++ /dev/null @@ -1,355 +0,0 @@ -package layer // import "github.com/docker/docker/layer" - -import ( - "compress/gzip" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - - "github.com/docker/distribution" - "github.com/docker/docker/pkg/ioutils" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -var ( - stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) - supportedAlgorithms = []digest.Algorithm{ - digest.SHA256, - // digest.SHA384, // Currently not used - // digest.SHA512, // Currently not used - } -) - -type fileMetadataStore struct { - root string -} - -type fileMetadataTransaction struct { - store *fileMetadataStore - ws *ioutils.AtomicWriteSet -} - -// newFSMetadataStore returns an instance of a metadata store -// which is backed by files on disk using the provided root -// as the root of metadata files. -func newFSMetadataStore(root string) (*fileMetadataStore, error) { - if err := os.MkdirAll(root, 0700); err != nil { - return nil, err - } - return &fileMetadataStore{ - root: root, - }, nil -} - -func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { - dgst := digest.Digest(layer) - return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) -} - -func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { - return filepath.Join(fms.getLayerDirectory(layer), filename) -} - -func (fms *fileMetadataStore) getMountDirectory(mount string) string { - return filepath.Join(fms.root, "mounts", mount) -} - -func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { - return filepath.Join(fms.getMountDirectory(mount), filename) -} - -func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, error) { - tmpDir := filepath.Join(fms.root, "tmp") - if err := os.MkdirAll(tmpDir, 0755); err != nil { - return nil, err - } - ws, err := ioutils.NewAtomicWriteSet(tmpDir) - if err != nil { - return nil, err - } - - return &fileMetadataTransaction{ - store: fms, - ws: ws, - }, nil -} - -func (fm *fileMetadataTransaction) SetSize(size int64) error { - content := fmt.Sprintf("%d", size) - return fm.ws.WriteFile("size", []byte(content), 0644) -} - -func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { - return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644) -} - -func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { - return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644) -} - -func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { - return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644) -} - -func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { - jsonRef, err := json.Marshal(ref) - if err != nil { - return err - } - return fm.ws.WriteFile("descriptor.json", jsonRef, 0644) -} - -func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { - f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return nil, err - } - var wc io.WriteCloser - if compressInput { - wc = gzip.NewWriter(f) - } else { - wc = f - } - - return ioutils.NewWriteCloserWrapper(wc, func() error { - wc.Close() - return f.Close() - }), nil -} - -func (fm *fileMetadataTransaction) Commit(layer ChainID) error { - finalDir := fm.store.getLayerDirectory(layer) - if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { - return err - } - - return fm.ws.Commit(finalDir) -} - -func (fm *fileMetadataTransaction) Cancel() error { - return fm.ws.Cancel() -} - -func (fm *fileMetadataTransaction) String() string { - return fm.ws.String() -} - -func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) - if err != nil { - return 0, err - } - - size, err := strconv.ParseInt(string(content), 10, 64) - if err != nil { - return 0, err - } - - return size, nil -} - -func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - - dgst, err := digest.Parse(strings.TrimSpace(string(content))) - if err != nil { - return "", err - } - - return ChainID(dgst), nil -} - -func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) - if err != nil { - return "", err - } - - dgst, err := digest.Parse(strings.TrimSpace(string(content))) - if err != nil { - return "", err - } - - return DiffID(dgst), nil -} - -func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) - if err != nil { - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid cache id value") - } - - return content, nil -} - -func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json")) - if err != nil { - if os.IsNotExist(err) { - // only return empty descriptor to represent what is stored - return distribution.Descriptor{}, nil - } - return distribution.Descriptor{}, err - } - - var ref distribution.Descriptor - err = json.Unmarshal(content, &ref) - if err != nil { - return distribution.Descriptor{}, err - } - return ref, err -} - -func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { - fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) - if err != nil { - return nil, err - } - f, err := gzip.NewReader(fz) - if err != nil { - fz.Close() - return nil, err - } - - return ioutils.NewReadCloserWrapper(f, func() error { - f.Close() - return fz.Close() - }), nil -} - -func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { - return err - } - return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) -} - -func (fms *fileMetadataStore) SetInitID(mount string, init string) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { - return err - } - return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) -} - -func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { - return err - } - return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) -} - -func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) - if err != nil { - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid mount id value") - } - - return content, nil -} - -func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid init id value") - } - - return content, nil -} - -func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { - content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - - dgst, err := digest.Parse(strings.TrimSpace(string(content))) - if err != nil { - return "", err - } - - return ChainID(dgst), nil -} - -func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { - var ids []ChainID - for _, algorithm := range supportedAlgorithms { - fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) - if err != nil { - if os.IsNotExist(err) { - continue - } - return nil, nil, err - } - - for _, fi := range fileInfos { - if fi.IsDir() && fi.Name() != "mounts" { - dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) - if err := dgst.Validate(); err != nil { - logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) - } else { - ids = append(ids, ChainID(dgst)) - } - } - } - } - - fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) - if err != nil { - if os.IsNotExist(err) { - return ids, []string{}, nil - } - return nil, nil, err - } - - var mounts []string - for _, fi := range fileInfos { - if fi.IsDir() { - mounts = append(mounts, fi.Name()) - } - } - - return ids, mounts, nil -} - -func (fms *fileMetadataStore) Remove(layer ChainID) error { - return os.RemoveAll(fms.getLayerDirectory(layer)) -} - -func (fms *fileMetadataStore) RemoveMount(mount string) error { - return os.RemoveAll(fms.getMountDirectory(mount)) -} diff --git a/vendor/github.com/docker/docker/layer/filestore_unix.go b/vendor/github.com/docker/docker/layer/filestore_unix.go deleted file mode 100644 index 68e7f9077..000000000 --- a/vendor/github.com/docker/docker/layer/filestore_unix.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package layer // import "github.com/docker/docker/layer" - -import "runtime" - -// setOS writes the "os" file to the layer filestore -func (fm *fileMetadataTransaction) setOS(os string) error { - return nil -} - -// getOS reads the "os" file from the layer filestore -func (fms *fileMetadataStore) getOS(layer ChainID) (string, error) { - return runtime.GOOS, nil -} diff --git a/vendor/github.com/docker/docker/layer/filestore_windows.go b/vendor/github.com/docker/docker/layer/filestore_windows.go deleted file mode 100644 index cecad426c..000000000 --- a/vendor/github.com/docker/docker/layer/filestore_windows.go +++ /dev/null @@ -1,35 +0,0 @@ -package layer // import "github.com/docker/docker/layer" - -import ( - "fmt" - "io/ioutil" - "os" - "strings" -) - -// setOS writes the "os" file to the layer filestore -func (fm *fileMetadataTransaction) setOS(os string) error { - if os == "" { - return nil - } - return fm.ws.WriteFile("os", []byte(os), 0644) -} - -// getOS reads the "os" file from the layer filestore -func (fms *fileMetadataStore) getOS(layer ChainID) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "os")) - if err != nil { - // For backwards compatibility, the os file may not exist. Default to "windows" if missing. - if os.IsNotExist(err) { - return "windows", nil - } - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if content != "windows" && content != "linux" { - return "", fmt.Errorf("invalid operating system value: %s", content) - } - - return content, nil -} diff --git a/vendor/github.com/docker/docker/layer/layer.go b/vendor/github.com/docker/docker/layer/layer.go deleted file mode 100644 index d0c7fa860..000000000 --- a/vendor/github.com/docker/docker/layer/layer.go +++ /dev/null @@ -1,237 +0,0 @@ -// Package layer is package for managing read-only -// and read-write mounts on the union file system -// driver. Read-only mounts are referenced using a -// content hash and are protected from mutation in -// the exposed interface. The tar format is used -// to create read-only layers and export both -// read-only and writable layers. The exported -// tar data for a read-only layer should match -// the tar used to create the layer. -package layer // import "github.com/docker/docker/layer" - -import ( - "errors" - "io" - - "github.com/docker/distribution" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/containerfs" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -var ( - // ErrLayerDoesNotExist is used when an operation is - // attempted on a layer which does not exist. - ErrLayerDoesNotExist = errors.New("layer does not exist") - - // ErrLayerNotRetained is used when a release is - // attempted on a layer which is not retained. - ErrLayerNotRetained = errors.New("layer not retained") - - // ErrMountDoesNotExist is used when an operation is - // attempted on a mount layer which does not exist. - ErrMountDoesNotExist = errors.New("mount does not exist") - - // ErrMountNameConflict is used when a mount is attempted - // to be created but there is already a mount with the name - // used for creation. - ErrMountNameConflict = errors.New("mount already exists with name") - - // ErrActiveMount is used when an operation on a - // mount is attempted but the layer is still - // mounted and the operation cannot be performed. - ErrActiveMount = errors.New("mount still active") - - // ErrNotMounted is used when requesting an active - // mount but the layer is not mounted. - ErrNotMounted = errors.New("not mounted") - - // ErrMaxDepthExceeded is used when a layer is attempted - // to be created which would result in a layer depth - // greater than the 125 max. - ErrMaxDepthExceeded = errors.New("max depth exceeded") - - // ErrNotSupported is used when the action is not supported - // on the current host operating system. - ErrNotSupported = errors.New("not support on this host operating system") -) - -// ChainID is the content-addressable ID of a layer. -type ChainID digest.Digest - -// String returns a string rendition of a layer ID -func (id ChainID) String() string { - return string(id) -} - -// DiffID is the hash of an individual layer tar. -type DiffID digest.Digest - -// String returns a string rendition of a layer DiffID -func (diffID DiffID) String() string { - return string(diffID) -} - -// TarStreamer represents an object which may -// have its contents exported as a tar stream. -type TarStreamer interface { - // TarStream returns a tar archive stream - // for the contents of a layer. - TarStream() (io.ReadCloser, error) -} - -// Layer represents a read-only layer -type Layer interface { - TarStreamer - - // TarStreamFrom returns a tar archive stream for all the layer chain with - // arbitrary depth. - TarStreamFrom(ChainID) (io.ReadCloser, error) - - // ChainID returns the content hash of the entire layer chain. The hash - // chain is made up of DiffID of top layer and all of its parents. - ChainID() ChainID - - // DiffID returns the content hash of the layer - // tar stream used to create this layer. - DiffID() DiffID - - // Parent returns the next layer in the layer chain. - Parent() Layer - - // Size returns the size of the entire layer chain. The size - // is calculated from the total size of all files in the layers. - Size() (int64, error) - - // DiffSize returns the size difference of the top layer - // from parent layer. - DiffSize() (int64, error) - - // Metadata returns the low level storage metadata associated - // with layer. - Metadata() (map[string]string, error) -} - -// RWLayer represents a layer which is -// read and writable -type RWLayer interface { - TarStreamer - - // Name of mounted layer - Name() string - - // Parent returns the layer which the writable - // layer was created from. - Parent() Layer - - // Mount mounts the RWLayer and returns the filesystem path - // the to the writable layer. - Mount(mountLabel string) (containerfs.ContainerFS, error) - - // Unmount unmounts the RWLayer. This should be called - // for every mount. If there are multiple mount calls - // this operation will only decrement the internal mount counter. - Unmount() error - - // Size represents the size of the writable layer - // as calculated by the total size of the files - // changed in the mutable layer. - Size() (int64, error) - - // Changes returns the set of changes for the mutable layer - // from the base layer. - Changes() ([]archive.Change, error) - - // Metadata returns the low level metadata for the mutable layer - Metadata() (map[string]string, error) -} - -// Metadata holds information about a -// read-only layer -type Metadata struct { - // ChainID is the content hash of the layer - ChainID ChainID - - // DiffID is the hash of the tar data used to - // create the layer - DiffID DiffID - - // Size is the size of the layer and all parents - Size int64 - - // DiffSize is the size of the top layer - DiffSize int64 -} - -// MountInit is a function to initialize a -// writable mount. Changes made here will -// not be included in the Tar stream of the -// RWLayer. -type MountInit func(root containerfs.ContainerFS) error - -// CreateRWLayerOpts contains optional arguments to be passed to CreateRWLayer -type CreateRWLayerOpts struct { - MountLabel string - InitFunc MountInit - StorageOpt map[string]string -} - -// Store represents a backend for managing both -// read-only and read-write layers. -type Store interface { - Register(io.Reader, ChainID) (Layer, error) - Get(ChainID) (Layer, error) - Map() map[ChainID]Layer - Release(Layer) ([]Metadata, error) - - CreateRWLayer(id string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) - GetRWLayer(id string) (RWLayer, error) - GetMountID(id string) (string, error) - ReleaseRWLayer(RWLayer) ([]Metadata, error) - - Cleanup() error - DriverStatus() [][2]string - DriverName() string -} - -// DescribableStore represents a layer store capable of storing -// descriptors for layers. -type DescribableStore interface { - RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error) -} - -// CreateChainID returns ID for a layerDigest slice -func CreateChainID(dgsts []DiffID) ChainID { - return createChainIDFromParent("", dgsts...) -} - -func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { - if len(dgsts) == 0 { - return parent - } - if parent == "" { - return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) - } - // H = "H(n-1) SHA256(n)" - dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) - return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) -} - -// ReleaseAndLog releases the provided layer from the given layer -// store, logging any error and release metadata -func ReleaseAndLog(ls Store, l Layer) { - metadata, err := ls.Release(l) - if err != nil { - logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) - } - LogReleaseMetadata(metadata) -} - -// LogReleaseMetadata logs a metadata array, uses this to -// ensure consistent logging for release metadata -func LogReleaseMetadata(metadatas []Metadata) { - for _, metadata := range metadatas { - logrus.Infof("Layer %s cleaned up", metadata.ChainID) - } -} diff --git a/vendor/github.com/docker/docker/layer/layer_store.go b/vendor/github.com/docker/docker/layer/layer_store.go deleted file mode 100644 index bf0705afc..000000000 --- a/vendor/github.com/docker/docker/layer/layer_store.go +++ /dev/null @@ -1,750 +0,0 @@ -package layer // import "github.com/docker/docker/layer" - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "sync" - - "github.com/docker/distribution" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/system" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// maxLayerDepth represents the maximum number of -// layers which can be chained together. 125 was -// chosen to account for the 127 max in some -// graphdrivers plus the 2 additional layers -// used to create a rwlayer. -const maxLayerDepth = 125 - -type layerStore struct { - store *fileMetadataStore - driver graphdriver.Driver - useTarSplit bool - - layerMap map[ChainID]*roLayer - layerL sync.Mutex - - mounts map[string]*mountedLayer - mountL sync.Mutex - os string -} - -// StoreOptions are the options used to create a new Store instance -type StoreOptions struct { - Root string - MetadataStorePathTemplate string - GraphDriver string - GraphDriverOptions []string - IDMappings *idtools.IDMappings - PluginGetter plugingetter.PluginGetter - ExperimentalEnabled bool - OS string -} - -// NewStoreFromOptions creates a new Store instance -func NewStoreFromOptions(options StoreOptions) (Store, error) { - driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{ - Root: options.Root, - DriverOptions: options.GraphDriverOptions, - UIDMaps: options.IDMappings.UIDs(), - GIDMaps: options.IDMappings.GIDs(), - ExperimentalEnabled: options.ExperimentalEnabled, - }) - if err != nil { - return nil, fmt.Errorf("error initializing graphdriver: %v", err) - } - logrus.Debugf("Initialized graph driver %s", driver) - - root := fmt.Sprintf(options.MetadataStorePathTemplate, driver) - - return newStoreFromGraphDriver(root, driver, options.OS) -} - -// newStoreFromGraphDriver creates a new Store instance using the provided -// metadata store and graph driver. The metadata store will be used to restore -// the Store. -func newStoreFromGraphDriver(root string, driver graphdriver.Driver, os string) (Store, error) { - if !system.IsOSSupported(os) { - return nil, fmt.Errorf("failed to initialize layer store as operating system '%s' is not supported", os) - } - caps := graphdriver.Capabilities{} - if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok { - caps = capDriver.Capabilities() - } - - ms, err := newFSMetadataStore(root) - if err != nil { - return nil, err - } - - ls := &layerStore{ - store: ms, - driver: driver, - layerMap: map[ChainID]*roLayer{}, - mounts: map[string]*mountedLayer{}, - useTarSplit: !caps.ReproducesExactDiffs, - os: os, - } - - ids, mounts, err := ms.List() - if err != nil { - return nil, err - } - - for _, id := range ids { - l, err := ls.loadLayer(id) - if err != nil { - logrus.Debugf("Failed to load layer %s: %s", id, err) - continue - } - if l.parent != nil { - l.parent.referenceCount++ - } - } - - for _, mount := range mounts { - if err := ls.loadMount(mount); err != nil { - logrus.Debugf("Failed to load mount %s: %s", mount, err) - } - } - - return ls, nil -} - -func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { - cl, ok := ls.layerMap[layer] - if ok { - return cl, nil - } - - diff, err := ls.store.GetDiffID(layer) - if err != nil { - return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) - } - - size, err := ls.store.GetSize(layer) - if err != nil { - return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) - } - - cacheID, err := ls.store.GetCacheID(layer) - if err != nil { - return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) - } - - parent, err := ls.store.GetParent(layer) - if err != nil { - return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) - } - - descriptor, err := ls.store.GetDescriptor(layer) - if err != nil { - return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err) - } - - os, err := ls.store.getOS(layer) - if err != nil { - return nil, fmt.Errorf("failed to get operating system for %s: %s", layer, err) - } - - if os != ls.os { - return nil, fmt.Errorf("failed to load layer with os %s into layerstore for %s", os, ls.os) - } - - cl = &roLayer{ - chainID: layer, - diffID: diff, - size: size, - cacheID: cacheID, - layerStore: ls, - references: map[Layer]struct{}{}, - descriptor: descriptor, - } - - if parent != "" { - p, err := ls.loadLayer(parent) - if err != nil { - return nil, err - } - cl.parent = p - } - - ls.layerMap[cl.chainID] = cl - - return cl, nil -} - -func (ls *layerStore) loadMount(mount string) error { - if _, ok := ls.mounts[mount]; ok { - return nil - } - - mountID, err := ls.store.GetMountID(mount) - if err != nil { - return err - } - - initID, err := ls.store.GetInitID(mount) - if err != nil { - return err - } - - parent, err := ls.store.GetMountParent(mount) - if err != nil { - return err - } - - ml := &mountedLayer{ - name: mount, - mountID: mountID, - initID: initID, - layerStore: ls, - references: map[RWLayer]*referencedRWLayer{}, - } - - if parent != "" { - p, err := ls.loadLayer(parent) - if err != nil { - return err - } - ml.parent = p - - p.referenceCount++ - } - - ls.mounts[ml.name] = ml - - return nil -} - -func (ls *layerStore) applyTar(tx *fileMetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { - digester := digest.Canonical.Digester() - tr := io.TeeReader(ts, digester.Hash()) - - rdr := tr - if ls.useTarSplit { - tsw, err := tx.TarSplitWriter(true) - if err != nil { - return err - } - metaPacker := storage.NewJSONPacker(tsw) - defer tsw.Close() - - // we're passing nil here for the file putter, because the ApplyDiff will - // handle the extraction of the archive - rdr, err = asm.NewInputTarStream(tr, metaPacker, nil) - if err != nil { - return err - } - } - - applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr) - if err != nil { - return err - } - - // Discard trailing data but ensure metadata is picked up to reconstruct stream - io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed - - layer.size = applySize - layer.diffID = DiffID(digester.Digest()) - - logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) - - return nil -} - -func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { - return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{}) -} - -func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { - // err is used to hold the error which will always trigger - // cleanup of creates sources but may not be an error returned - // to the caller (already exists). - var err error - var pid string - var p *roLayer - - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return nil, ErrLayerDoesNotExist - } - pid = p.cacheID - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - if p.depth() >= maxLayerDepth { - err = ErrMaxDepthExceeded - return nil, err - } - } - - // Create new roLayer - layer := &roLayer{ - parent: p, - cacheID: stringid.GenerateRandomID(), - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - descriptor: descriptor, - } - - if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil { - return nil, err - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) - if err := ls.driver.Remove(layer.cacheID); err != nil { - logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) - } - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - if err = ls.applyTar(tx, ts, pid, layer); err != nil { - return nil, err - } - - if layer.parent == nil { - layer.chainID = ChainID(layer.diffID) - } else { - layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) - } - - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return the error - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil -} - -func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { - l, ok := ls.layerMap[layer] - if !ok { - return nil - } - - l.referenceCount++ - - return l -} - -func (ls *layerStore) get(l ChainID) *roLayer { - ls.layerL.Lock() - defer ls.layerL.Unlock() - return ls.getWithoutLock(l) -} - -func (ls *layerStore) Get(l ChainID) (Layer, error) { - ls.layerL.Lock() - defer ls.layerL.Unlock() - - layer := ls.getWithoutLock(l) - if layer == nil { - return nil, ErrLayerDoesNotExist - } - - return layer.getReference(), nil -} - -func (ls *layerStore) Map() map[ChainID]Layer { - ls.layerL.Lock() - defer ls.layerL.Unlock() - - layers := map[ChainID]Layer{} - - for k, v := range ls.layerMap { - layers[k] = v - } - - return layers -} - -func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { - err := ls.driver.Remove(layer.cacheID) - if err != nil { - return err - } - err = ls.store.Remove(layer.chainID) - if err != nil { - return err - } - metadata.DiffID = layer.diffID - metadata.ChainID = layer.chainID - metadata.Size, err = layer.Size() - if err != nil { - return err - } - metadata.DiffSize = layer.size - - return nil -} - -func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { - depth := 0 - removed := []Metadata{} - for { - if l.referenceCount == 0 { - panic("layer not retained") - } - l.referenceCount-- - if l.referenceCount != 0 { - return removed, nil - } - - if len(removed) == 0 && depth > 0 { - panic("cannot remove layer with child") - } - if l.hasReferences() { - panic("cannot delete referenced layer") - } - var metadata Metadata - if err := ls.deleteLayer(l, &metadata); err != nil { - return nil, err - } - - delete(ls.layerMap, l.chainID) - removed = append(removed, metadata) - - if l.parent == nil { - return removed, nil - } - - depth++ - l = l.parent - } -} - -func (ls *layerStore) Release(l Layer) ([]Metadata, error) { - ls.layerL.Lock() - defer ls.layerL.Unlock() - layer, ok := ls.layerMap[l.ChainID()] - if !ok { - return []Metadata{}, nil - } - if !layer.hasReference(l) { - return nil, ErrLayerNotRetained - } - - layer.deleteReference(l) - - return ls.releaseLayer(layer) -} - -func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) { - var ( - storageOpt map[string]string - initFunc MountInit - mountLabel string - ) - - if opts != nil { - mountLabel = opts.MountLabel - storageOpt = opts.StorageOpt - initFunc = opts.InitFunc - } - - ls.mountL.Lock() - defer ls.mountL.Unlock() - m, ok := ls.mounts[name] - if ok { - return nil, ErrMountNameConflict - } - - var err error - var pid string - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return nil, ErrLayerDoesNotExist - } - pid = p.cacheID - - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - } - - m = &mountedLayer{ - name: name, - parent: p, - mountID: ls.mountID(name), - layerStore: ls, - references: map[RWLayer]*referencedRWLayer{}, - } - - if initFunc != nil { - pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) - if err != nil { - return nil, err - } - m.initID = pid - } - - createOpts := &graphdriver.CreateOpts{ - StorageOpt: storageOpt, - } - - if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil { - return nil, err - } - if err = ls.saveMount(m); err != nil { - return nil, err - } - - return m.getReference(), nil -} - -func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - mount, ok := ls.mounts[id] - if !ok { - return nil, ErrMountDoesNotExist - } - - return mount.getReference(), nil -} - -func (ls *layerStore) GetMountID(id string) (string, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - mount, ok := ls.mounts[id] - if !ok { - return "", ErrMountDoesNotExist - } - logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) - - return mount.mountID, nil -} - -func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - m, ok := ls.mounts[l.Name()] - if !ok { - return []Metadata{}, nil - } - - if err := m.deleteReference(l); err != nil { - return nil, err - } - - if m.hasReferences() { - return []Metadata{}, nil - } - - if err := ls.driver.Remove(m.mountID); err != nil { - logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) - m.retakeReference(l) - return nil, err - } - - if m.initID != "" { - if err := ls.driver.Remove(m.initID); err != nil { - logrus.Errorf("Error removing init layer %s: %s", m.name, err) - m.retakeReference(l) - return nil, err - } - } - - if err := ls.store.RemoveMount(m.name); err != nil { - logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) - m.retakeReference(l) - return nil, err - } - - delete(ls.mounts, m.Name()) - - ls.layerL.Lock() - defer ls.layerL.Unlock() - if m.parent != nil { - return ls.releaseLayer(m.parent) - } - - return []Metadata{}, nil -} - -func (ls *layerStore) saveMount(mount *mountedLayer) error { - if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { - return err - } - - if mount.initID != "" { - if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { - return err - } - } - - if mount.parent != nil { - if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { - return err - } - } - - ls.mounts[mount.name] = mount - - return nil -} - -func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) { - // Use "-init" to maintain compatibility with graph drivers - // which are expecting this layer with this special name. If all - // graph drivers can be updated to not rely on knowing about this layer - // then the initID should be randomly generated. - initID := fmt.Sprintf("%s-init", graphID) - - createOpts := &graphdriver.CreateOpts{ - MountLabel: mountLabel, - StorageOpt: storageOpt, - } - - if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil { - return "", err - } - p, err := ls.driver.Get(initID, "") - if err != nil { - return "", err - } - - if err := initFunc(p); err != nil { - ls.driver.Put(initID) - return "", err - } - - if err := ls.driver.Put(initID); err != nil { - return "", err - } - - return initID, nil -} - -func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) { - if !ls.useTarSplit { - var parentCacheID string - if rl.parent != nil { - parentCacheID = rl.parent.cacheID - } - - return ls.driver.Diff(rl.cacheID, parentCacheID) - } - - r, err := ls.store.TarSplitReader(rl.chainID) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - go func() { - err := ls.assembleTarTo(rl.cacheID, r, nil, pw) - if err != nil { - pw.CloseWithError(err) - } else { - pw.Close() - } - }() - - return pr, nil -} - -func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { - diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) - if !ok { - diffDriver = &naiveDiffPathDriver{ls.driver} - } - - defer metadata.Close() - - // get our relative path to the container - fileGetCloser, err := diffDriver.DiffGetter(graphID) - if err != nil { - return err - } - defer fileGetCloser.Close() - - metaUnpacker := storage.NewJSONUnpacker(metadata) - upackerCounter := &unpackSizeCounter{metaUnpacker, size} - logrus.Debugf("Assembling tar data for %s", graphID) - return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) -} - -func (ls *layerStore) Cleanup() error { - return ls.driver.Cleanup() -} - -func (ls *layerStore) DriverStatus() [][2]string { - return ls.driver.Status() -} - -func (ls *layerStore) DriverName() string { - return ls.driver.String() -} - -type naiveDiffPathDriver struct { - graphdriver.Driver -} - -type fileGetPutter struct { - storage.FileGetter - driver graphdriver.Driver - id string -} - -func (w *fileGetPutter) Close() error { - return w.driver.Put(w.id) -} - -func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { - p, err := n.Driver.Get(id, "") - if err != nil { - return nil, err - } - return &fileGetPutter{storage.NewPathFileGetter(p.Path()), n.Driver, id}, nil -} diff --git a/vendor/github.com/docker/docker/layer/layer_store_windows.go b/vendor/github.com/docker/docker/layer/layer_store_windows.go deleted file mode 100644 index eca1f6a83..000000000 --- a/vendor/github.com/docker/docker/layer/layer_store_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -package layer // import "github.com/docker/docker/layer" - -import ( - "io" - - "github.com/docker/distribution" -) - -func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { - return ls.registerWithDescriptor(ts, parent, descriptor) -} diff --git a/vendor/github.com/docker/docker/layer/layer_unix.go b/vendor/github.com/docker/docker/layer/layer_unix.go deleted file mode 100644 index 002c7ff83..000000000 --- a/vendor/github.com/docker/docker/layer/layer_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build linux freebsd darwin openbsd - -package layer // import "github.com/docker/docker/layer" - -import "github.com/docker/docker/pkg/stringid" - -func (ls *layerStore) mountID(name string) string { - return stringid.GenerateRandomID() -} diff --git a/vendor/github.com/docker/docker/layer/layer_windows.go b/vendor/github.com/docker/docker/layer/layer_windows.go deleted file mode 100644 index 25ef26afc..000000000 --- a/vendor/github.com/docker/docker/layer/layer_windows.go +++ /dev/null @@ -1,46 +0,0 @@ -package layer // import "github.com/docker/docker/layer" - -import ( - "errors" -) - -// Getter is an interface to get the path to a layer on the host. -type Getter interface { - // GetLayerPath gets the path for the layer. This is different from Get() - // since that returns an interface to account for umountable layers. - GetLayerPath(id string) (string, error) -} - -// GetLayerPath returns the path to a layer -func GetLayerPath(s Store, layer ChainID) (string, error) { - ls, ok := s.(*layerStore) - if !ok { - return "", errors.New("unsupported layer store") - } - ls.layerL.Lock() - defer ls.layerL.Unlock() - - rl, ok := ls.layerMap[layer] - if !ok { - return "", ErrLayerDoesNotExist - } - - if layerGetter, ok := ls.driver.(Getter); ok { - return layerGetter.GetLayerPath(rl.cacheID) - } - path, err := ls.driver.Get(rl.cacheID, "") - if err != nil { - return "", err - } - - if err := ls.driver.Put(rl.cacheID); err != nil { - return "", err - } - - return path.Path(), nil -} - -func (ls *layerStore) mountID(name string) string { - // windows has issues if container ID doesn't match mount ID - return name -} diff --git a/vendor/github.com/docker/docker/layer/migration.go b/vendor/github.com/docker/docker/layer/migration.go deleted file mode 100644 index 2668ea96b..000000000 --- a/vendor/github.com/docker/docker/layer/migration.go +++ /dev/null @@ -1,252 +0,0 @@ -package layer // import "github.com/docker/docker/layer" - -import ( - "compress/gzip" - "errors" - "fmt" - "io" - "os" - - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// CreateRWLayerByGraphID creates a RWLayer in the layer store using -// the provided name with the given graphID. To get the RWLayer -// after migration the layer may be retrieved by the given name. -func (ls *layerStore) CreateRWLayerByGraphID(name, graphID string, parent ChainID) (err error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - m, ok := ls.mounts[name] - if ok { - if m.parent.chainID != parent { - return errors.New("name conflict, mismatched parent") - } - if m.mountID != graphID { - return errors.New("mount already exists") - } - - return nil - } - - if !ls.driver.Exists(graphID) { - return fmt.Errorf("graph ID does not exist: %q", graphID) - } - - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return ErrLayerDoesNotExist - } - - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - } - - // TODO: Ensure graphID has correct parent - - m = &mountedLayer{ - name: name, - parent: p, - mountID: graphID, - layerStore: ls, - references: map[RWLayer]*referencedRWLayer{}, - } - - // Check for existing init layer - initID := fmt.Sprintf("%s-init", graphID) - if ls.driver.Exists(initID) { - m.initID = initID - } - - return ls.saveMount(m) -} - -func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { - defer func() { - if err != nil { - logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) - diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) - } - }() - - if oldTarDataPath == "" { - err = errors.New("no tar-split file") - return - } - - tarDataFile, err := os.Open(oldTarDataPath) - if err != nil { - return - } - defer tarDataFile.Close() - uncompressed, err := gzip.NewReader(tarDataFile) - if err != nil { - return - } - - dgst := digest.Canonical.Digester() - err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) - if err != nil { - return - } - - diffID = DiffID(dgst.Digest()) - err = os.RemoveAll(newTarDataPath) - if err != nil { - return - } - err = os.Link(oldTarDataPath, newTarDataPath) - - return -} - -func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { - rawarchive, err := ls.driver.Diff(id, parent) - if err != nil { - return - } - defer rawarchive.Close() - - f, err := os.Create(newTarDataPath) - if err != nil { - return - } - defer f.Close() - mfz := gzip.NewWriter(f) - defer mfz.Close() - metaPacker := storage.NewJSONPacker(mfz) - - packerCounter := &packSizeCounter{metaPacker, &size} - - archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) - if err != nil { - return - } - dgst, err := digest.FromReader(archive) - if err != nil { - return - } - diffID = DiffID(dgst) - return -} - -func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { - // err is used to hold the error which will always trigger - // cleanup of creates sources but may not be an error returned - // to the caller (already exists). - var err error - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return nil, ErrLayerDoesNotExist - } - - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - } - - // Create new roLayer - layer := &roLayer{ - parent: p, - cacheID: graphID, - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - diffID: diffID, - size: size, - chainID: createChainIDFromParent(parent, diffID), - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - tsw, err := tx.TarSplitWriter(false) - if err != nil { - return nil, err - } - defer tsw.Close() - tdf, err := os.Open(tarDataFile) - if err != nil { - return nil, err - } - defer tdf.Close() - _, err = io.Copy(tsw, tdf) - if err != nil { - return nil, err - } - - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil -} - -type unpackSizeCounter struct { - unpacker storage.Unpacker - size *int64 -} - -func (u *unpackSizeCounter) Next() (*storage.Entry, error) { - e, err := u.unpacker.Next() - if err == nil && u.size != nil { - *u.size += e.Size - } - return e, err -} - -type packSizeCounter struct { - packer storage.Packer - size *int64 -} - -func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { - n, err := p.packer.AddEntry(e) - if err == nil && p.size != nil { - *p.size += e.Size - } - return n, err -} diff --git a/vendor/github.com/docker/docker/layer/mounted_layer.go b/vendor/github.com/docker/docker/layer/mounted_layer.go deleted file mode 100644 index d6858c662..000000000 --- a/vendor/github.com/docker/docker/layer/mounted_layer.go +++ /dev/null @@ -1,100 +0,0 @@ -package layer // import "github.com/docker/docker/layer" - -import ( - "io" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/containerfs" -) - -type mountedLayer struct { - name string - mountID string - initID string - parent *roLayer - path string - layerStore *layerStore - - references map[RWLayer]*referencedRWLayer -} - -func (ml *mountedLayer) cacheParent() string { - if ml.initID != "" { - return ml.initID - } - if ml.parent != nil { - return ml.parent.cacheID - } - return "" -} - -func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { - return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) -} - -func (ml *mountedLayer) Name() string { - return ml.name -} - -func (ml *mountedLayer) Parent() Layer { - if ml.parent != nil { - return ml.parent - } - - // Return a nil interface instead of an interface wrapping a nil - // pointer. - return nil -} - -func (ml *mountedLayer) Size() (int64, error) { - return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) -} - -func (ml *mountedLayer) Changes() ([]archive.Change, error) { - return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) -} - -func (ml *mountedLayer) Metadata() (map[string]string, error) { - return ml.layerStore.driver.GetMetadata(ml.mountID) -} - -func (ml *mountedLayer) getReference() RWLayer { - ref := &referencedRWLayer{ - mountedLayer: ml, - } - ml.references[ref] = ref - - return ref -} - -func (ml *mountedLayer) hasReferences() bool { - return len(ml.references) > 0 -} - -func (ml *mountedLayer) deleteReference(ref RWLayer) error { - if _, ok := ml.references[ref]; !ok { - return ErrLayerNotRetained - } - delete(ml.references, ref) - return nil -} - -func (ml *mountedLayer) retakeReference(r RWLayer) { - if ref, ok := r.(*referencedRWLayer); ok { - ml.references[ref] = ref - } -} - -type referencedRWLayer struct { - *mountedLayer -} - -func (rl *referencedRWLayer) Mount(mountLabel string) (containerfs.ContainerFS, error) { - return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) -} - -// Unmount decrements the activity count and unmounts the underlying layer -// Callers should only call `Unmount` once per call to `Mount`, even on error. -func (rl *referencedRWLayer) Unmount() error { - return rl.layerStore.driver.Put(rl.mountedLayer.mountID) -} diff --git a/vendor/github.com/docker/docker/layer/ro_layer.go b/vendor/github.com/docker/docker/layer/ro_layer.go deleted file mode 100644 index bc0fe1ddd..000000000 --- a/vendor/github.com/docker/docker/layer/ro_layer.go +++ /dev/null @@ -1,178 +0,0 @@ -package layer // import "github.com/docker/docker/layer" - -import ( - "fmt" - "io" - - "github.com/docker/distribution" - "github.com/opencontainers/go-digest" -) - -type roLayer struct { - chainID ChainID - diffID DiffID - parent *roLayer - cacheID string - size int64 - layerStore *layerStore - descriptor distribution.Descriptor - - referenceCount int - references map[Layer]struct{} -} - -// TarStream for roLayer guarantees that the data that is produced is the exact -// data that the layer was registered with. -func (rl *roLayer) TarStream() (io.ReadCloser, error) { - rc, err := rl.layerStore.getTarStream(rl) - if err != nil { - return nil, err - } - - vrc, err := newVerifiedReadCloser(rc, digest.Digest(rl.diffID)) - if err != nil { - return nil, err - } - return vrc, nil -} - -// TarStreamFrom does not make any guarantees to the correctness of the produced -// data. As such it should not be used when the layer content must be verified -// to be an exact match to the registered layer. -func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) { - var parentCacheID string - for pl := rl.parent; pl != nil; pl = pl.parent { - if pl.chainID == parent { - parentCacheID = pl.cacheID - break - } - } - - if parent != ChainID("") && parentCacheID == "" { - return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent) - } - return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID) -} - -func (rl *roLayer) ChainID() ChainID { - return rl.chainID -} - -func (rl *roLayer) DiffID() DiffID { - return rl.diffID -} - -func (rl *roLayer) Parent() Layer { - if rl.parent == nil { - return nil - } - return rl.parent -} - -func (rl *roLayer) Size() (size int64, err error) { - if rl.parent != nil { - size, err = rl.parent.Size() - if err != nil { - return - } - } - - return size + rl.size, nil -} - -func (rl *roLayer) DiffSize() (size int64, err error) { - return rl.size, nil -} - -func (rl *roLayer) Metadata() (map[string]string, error) { - return rl.layerStore.driver.GetMetadata(rl.cacheID) -} - -type referencedCacheLayer struct { - *roLayer -} - -func (rl *roLayer) getReference() Layer { - ref := &referencedCacheLayer{ - roLayer: rl, - } - rl.references[ref] = struct{}{} - - return ref -} - -func (rl *roLayer) hasReference(ref Layer) bool { - _, ok := rl.references[ref] - return ok -} - -func (rl *roLayer) hasReferences() bool { - return len(rl.references) > 0 -} - -func (rl *roLayer) deleteReference(ref Layer) { - delete(rl.references, ref) -} - -func (rl *roLayer) depth() int { - if rl.parent == nil { - return 1 - } - return rl.parent.depth() + 1 -} - -func storeLayer(tx *fileMetadataTransaction, layer *roLayer) error { - if err := tx.SetDiffID(layer.diffID); err != nil { - return err - } - if err := tx.SetSize(layer.size); err != nil { - return err - } - if err := tx.SetCacheID(layer.cacheID); err != nil { - return err - } - // Do not store empty descriptors - if layer.descriptor.Digest != "" { - if err := tx.SetDescriptor(layer.descriptor); err != nil { - return err - } - } - if layer.parent != nil { - if err := tx.SetParent(layer.parent.chainID); err != nil { - return err - } - } - return tx.setOS(layer.layerStore.os) -} - -func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { - return &verifiedReadCloser{ - rc: rc, - dgst: dgst, - verifier: dgst.Verifier(), - }, nil -} - -type verifiedReadCloser struct { - rc io.ReadCloser - dgst digest.Digest - verifier digest.Verifier -} - -func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { - n, err = vrc.rc.Read(p) - if n > 0 { - if n, err := vrc.verifier.Write(p[:n]); err != nil { - return n, err - } - } - if err == io.EOF { - if !vrc.verifier.Verified() { - err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) - } - } - return -} -func (vrc *verifiedReadCloser) Close() error { - return vrc.rc.Close() -} diff --git a/vendor/github.com/docker/docker/layer/ro_layer_windows.go b/vendor/github.com/docker/docker/layer/ro_layer_windows.go deleted file mode 100644 index a4f0c8088..000000000 --- a/vendor/github.com/docker/docker/layer/ro_layer_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package layer // import "github.com/docker/docker/layer" - -import "github.com/docker/distribution" - -var _ distribution.Describable = &roLayer{} - -func (rl *roLayer) Descriptor() distribution.Descriptor { - return rl.descriptor -} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_daemon.go b/vendor/github.com/docker/docker/libcontainerd/client_daemon.go deleted file mode 100644 index 0706fa4da..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/client_daemon.go +++ /dev/null @@ -1,894 +0,0 @@ -// +build !windows - -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import ( - "context" - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "reflect" - "runtime" - "strings" - "sync" - "syscall" - "time" - - "github.com/containerd/containerd" - apievents "github.com/containerd/containerd/api/events" - "github.com/containerd/containerd/api/types" - "github.com/containerd/containerd/archive" - "github.com/containerd/containerd/cio" - "github.com/containerd/containerd/content" - containerderrors "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/events" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/runtime/linux/runctypes" - "github.com/containerd/typeurl" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/ioutils" - "github.com/opencontainers/image-spec/specs-go/v1" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// InitProcessName is the name given to the first process of a -// container -const InitProcessName = "init" - -type container struct { - mu sync.Mutex - - bundleDir string - ctr containerd.Container - task containerd.Task - execs map[string]containerd.Process - oomKilled bool -} - -func (c *container) setTask(t containerd.Task) { - c.mu.Lock() - c.task = t - c.mu.Unlock() -} - -func (c *container) getTask() containerd.Task { - c.mu.Lock() - t := c.task - c.mu.Unlock() - return t -} - -func (c *container) addProcess(id string, p containerd.Process) { - c.mu.Lock() - if c.execs == nil { - c.execs = make(map[string]containerd.Process) - } - c.execs[id] = p - c.mu.Unlock() -} - -func (c *container) deleteProcess(id string) { - c.mu.Lock() - delete(c.execs, id) - c.mu.Unlock() -} - -func (c *container) getProcess(id string) containerd.Process { - c.mu.Lock() - p := c.execs[id] - c.mu.Unlock() - return p -} - -func (c *container) setOOMKilled(killed bool) { - c.mu.Lock() - c.oomKilled = killed - c.mu.Unlock() -} - -func (c *container) getOOMKilled() bool { - c.mu.Lock() - killed := c.oomKilled - c.mu.Unlock() - return killed -} - -type client struct { - sync.RWMutex // protects containers map - - remote *containerd.Client - stateDir string - logger *logrus.Entry - - namespace string - backend Backend - eventQ queue - containers map[string]*container -} - -func (c *client) reconnect() error { - c.Lock() - err := c.remote.Reconnect() - c.Unlock() - return err -} - -func (c *client) setRemote(remote *containerd.Client) { - c.Lock() - c.remote = remote - c.Unlock() -} - -func (c *client) getRemote() *containerd.Client { - c.RLock() - remote := c.remote - c.RUnlock() - return remote -} - -func (c *client) Version(ctx context.Context) (containerd.Version, error) { - return c.getRemote().Version(ctx) -} - -// Restore loads the containerd container. -// It should not be called concurrently with any other operation for the given ID. -func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallback) (alive bool, pid int, err error) { - c.Lock() - _, ok := c.containers[id] - if ok { - c.Unlock() - return false, 0, errors.WithStack(newConflictError("id already in use")) - } - - cntr := &container{} - c.containers[id] = cntr - cntr.mu.Lock() - defer cntr.mu.Unlock() - - c.Unlock() - - defer func() { - if err != nil { - c.Lock() - delete(c.containers, id) - c.Unlock() - } - }() - - var dio *cio.DirectIO - defer func() { - if err != nil && dio != nil { - dio.Cancel() - dio.Close() - } - err = wrapError(err) - }() - - ctr, err := c.getRemote().LoadContainer(ctx, id) - if err != nil { - return false, -1, errors.WithStack(wrapError(err)) - } - - attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) { - // dio must be assigned to the previously defined dio for the defer above - // to handle cleanup - dio, err = cio.NewDirectIO(ctx, fifos) - if err != nil { - return nil, err - } - return attachStdio(dio) - } - t, err := ctr.Task(ctx, attachIO) - if err != nil && !containerderrors.IsNotFound(err) { - return false, -1, errors.Wrap(wrapError(err), "error getting containerd task for container") - } - - if t != nil { - s, err := t.Status(ctx) - if err != nil { - return false, -1, errors.Wrap(wrapError(err), "error getting task status") - } - - alive = s.Status != containerd.Stopped - pid = int(t.Pid()) - } - - cntr.bundleDir = filepath.Join(c.stateDir, id) - cntr.ctr = ctr - cntr.task = t - // TODO(mlaventure): load execs - - c.logger.WithFields(logrus.Fields{ - "container": id, - "alive": alive, - "pid": pid, - }).Debug("restored container") - - return alive, pid, nil -} - -func (c *client) Create(ctx context.Context, id string, ociSpec *specs.Spec, runtimeOptions interface{}) error { - if ctr := c.getContainer(id); ctr != nil { - return errors.WithStack(newConflictError("id already in use")) - } - - bdir, err := prepareBundleDir(filepath.Join(c.stateDir, id), ociSpec) - if err != nil { - return errdefs.System(errors.Wrap(err, "prepare bundle dir failed")) - } - - c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created") - - cdCtr, err := c.getRemote().NewContainer(ctx, id, - containerd.WithSpec(ociSpec), - // TODO(mlaventure): when containerd support lcow, revisit runtime value - containerd.WithRuntime(fmt.Sprintf("io.containerd.runtime.v1.%s", runtime.GOOS), runtimeOptions)) - if err != nil { - return wrapError(err) - } - - c.Lock() - c.containers[id] = &container{ - bundleDir: bdir, - ctr: cdCtr, - } - c.Unlock() - - return nil -} - -// Start create and start a task for the specified containerd id -func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin bool, attachStdio StdioCallback) (int, error) { - ctr := c.getContainer(id) - if ctr == nil { - return -1, errors.WithStack(newNotFoundError("no such container")) - } - if t := ctr.getTask(); t != nil { - return -1, errors.WithStack(newConflictError("container already started")) - } - - var ( - cp *types.Descriptor - t containerd.Task - rio cio.IO - err error - stdinCloseSync = make(chan struct{}) - ) - - if checkpointDir != "" { - // write checkpoint to the content store - tar := archive.Diff(ctx, "", checkpointDir) - cp, err = c.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar) - // remove the checkpoint when we're done - defer func() { - if cp != nil { - err := c.getRemote().ContentStore().Delete(context.Background(), cp.Digest) - if err != nil { - c.logger.WithError(err).WithFields(logrus.Fields{ - "ref": checkpointDir, - "digest": cp.Digest, - }).Warnf("failed to delete temporary checkpoint entry") - } - } - }() - if err := tar.Close(); err != nil { - return -1, errors.Wrap(err, "failed to close checkpoint tar stream") - } - if err != nil { - return -1, errors.Wrapf(err, "failed to upload checkpoint to containerd") - } - } - - spec, err := ctr.ctr.Spec(ctx) - if err != nil { - return -1, errors.Wrap(err, "failed to retrieve spec") - } - uid, gid := getSpecUser(spec) - t, err = ctr.ctr.NewTask(ctx, - func(id string) (cio.IO, error) { - fifos := newFIFOSet(ctr.bundleDir, InitProcessName, withStdin, spec.Process.Terminal) - - rio, err = c.createIO(fifos, id, InitProcessName, stdinCloseSync, attachStdio) - return rio, err - }, - func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error { - info.Checkpoint = cp - info.Options = &runctypes.CreateOptions{ - IoUid: uint32(uid), - IoGid: uint32(gid), - NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "", - } - return nil - }) - if err != nil { - close(stdinCloseSync) - if rio != nil { - rio.Cancel() - rio.Close() - } - return -1, wrapError(err) - } - - ctr.setTask(t) - - // Signal c.createIO that it can call CloseIO - close(stdinCloseSync) - - if err := t.Start(ctx); err != nil { - if _, err := t.Delete(ctx); err != nil { - c.logger.WithError(err).WithField("container", id). - Error("failed to delete task after fail start") - } - ctr.setTask(nil) - return -1, wrapError(err) - } - - return int(t.Pid()), nil -} - -func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error) { - ctr := c.getContainer(containerID) - if ctr == nil { - return -1, errors.WithStack(newNotFoundError("no such container")) - } - t := ctr.getTask() - if t == nil { - return -1, errors.WithStack(newInvalidParameterError("container is not running")) - } - - if p := ctr.getProcess(processID); p != nil { - return -1, errors.WithStack(newConflictError("id already in use")) - } - - var ( - p containerd.Process - rio cio.IO - err error - stdinCloseSync = make(chan struct{}) - ) - - fifos := newFIFOSet(ctr.bundleDir, processID, withStdin, spec.Terminal) - - defer func() { - if err != nil { - if rio != nil { - rio.Cancel() - rio.Close() - } - } - }() - - p, err = t.Exec(ctx, processID, spec, func(id string) (cio.IO, error) { - rio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio) - return rio, err - }) - if err != nil { - close(stdinCloseSync) - return -1, wrapError(err) - } - - ctr.addProcess(processID, p) - - // Signal c.createIO that it can call CloseIO - close(stdinCloseSync) - - if err = p.Start(ctx); err != nil { - p.Delete(context.Background()) - ctr.deleteProcess(processID) - return -1, wrapError(err) - } - - return int(p.Pid()), nil -} - -func (c *client) SignalProcess(ctx context.Context, containerID, processID string, signal int) error { - p, err := c.getProcess(containerID, processID) - if err != nil { - return err - } - return wrapError(p.Kill(ctx, syscall.Signal(signal))) -} - -func (c *client) ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error { - p, err := c.getProcess(containerID, processID) - if err != nil { - return err - } - - return p.Resize(ctx, uint32(width), uint32(height)) -} - -func (c *client) CloseStdin(ctx context.Context, containerID, processID string) error { - p, err := c.getProcess(containerID, processID) - if err != nil { - return err - } - - return p.CloseIO(ctx, containerd.WithStdinCloser) -} - -func (c *client) Pause(ctx context.Context, containerID string) error { - p, err := c.getProcess(containerID, InitProcessName) - if err != nil { - return err - } - - return wrapError(p.(containerd.Task).Pause(ctx)) -} - -func (c *client) Resume(ctx context.Context, containerID string) error { - p, err := c.getProcess(containerID, InitProcessName) - if err != nil { - return err - } - - return p.(containerd.Task).Resume(ctx) -} - -func (c *client) Stats(ctx context.Context, containerID string) (*Stats, error) { - p, err := c.getProcess(containerID, InitProcessName) - if err != nil { - return nil, err - } - - m, err := p.(containerd.Task).Metrics(ctx) - if err != nil { - return nil, err - } - - v, err := typeurl.UnmarshalAny(m.Data) - if err != nil { - return nil, err - } - return interfaceToStats(m.Timestamp, v), nil -} - -func (c *client) ListPids(ctx context.Context, containerID string) ([]uint32, error) { - p, err := c.getProcess(containerID, InitProcessName) - if err != nil { - return nil, err - } - - pis, err := p.(containerd.Task).Pids(ctx) - if err != nil { - return nil, err - } - - var pids []uint32 - for _, i := range pis { - pids = append(pids, i.Pid) - } - - return pids, nil -} - -func (c *client) Summary(ctx context.Context, containerID string) ([]Summary, error) { - p, err := c.getProcess(containerID, InitProcessName) - if err != nil { - return nil, err - } - - pis, err := p.(containerd.Task).Pids(ctx) - if err != nil { - return nil, err - } - - var infos []Summary - for _, pi := range pis { - i, err := typeurl.UnmarshalAny(pi.Info) - if err != nil { - return nil, errors.Wrap(err, "unable to decode process details") - } - s, err := summaryFromInterface(i) - if err != nil { - return nil, err - } - infos = append(infos, *s) - } - - return infos, nil -} - -func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) { - p, err := c.getProcess(containerID, InitProcessName) - if err != nil { - return 255, time.Now(), nil - } - - status, err := p.(containerd.Task).Delete(ctx) - if err != nil { - return 255, time.Now(), nil - } - - if ctr := c.getContainer(containerID); ctr != nil { - ctr.setTask(nil) - } - return status.ExitCode(), status.ExitTime(), nil -} - -func (c *client) Delete(ctx context.Context, containerID string) error { - ctr := c.getContainer(containerID) - if ctr == nil { - return errors.WithStack(newNotFoundError("no such container")) - } - - if err := ctr.ctr.Delete(ctx); err != nil { - return wrapError(err) - } - - if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" { - if err := os.RemoveAll(ctr.bundleDir); err != nil { - c.logger.WithError(err).WithFields(logrus.Fields{ - "container": containerID, - "bundle": ctr.bundleDir, - }).Error("failed to remove state dir") - } - } - - c.removeContainer(containerID) - - return nil -} - -func (c *client) Status(ctx context.Context, containerID string) (Status, error) { - ctr := c.getContainer(containerID) - if ctr == nil { - return StatusUnknown, errors.WithStack(newNotFoundError("no such container")) - } - - t := ctr.getTask() - if t == nil { - return StatusUnknown, errors.WithStack(newNotFoundError("no such task")) - } - - s, err := t.Status(ctx) - if err != nil { - return StatusUnknown, wrapError(err) - } - - return Status(s.Status), nil -} - -func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error { - p, err := c.getProcess(containerID, InitProcessName) - if err != nil { - return err - } - - img, err := p.(containerd.Task).Checkpoint(ctx) - if err != nil { - return wrapError(err) - } - // Whatever happens, delete the checkpoint from containerd - defer func() { - err := c.getRemote().ImageService().Delete(context.Background(), img.Name()) - if err != nil { - c.logger.WithError(err).WithField("digest", img.Target().Digest). - Warnf("failed to delete checkpoint image") - } - }() - - b, err := content.ReadBlob(ctx, c.getRemote().ContentStore(), img.Target()) - if err != nil { - return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data")) - } - var index v1.Index - if err := json.Unmarshal(b, &index); err != nil { - return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data")) - } - - var cpDesc *v1.Descriptor - for _, m := range index.Manifests { - if m.MediaType == images.MediaTypeContainerd1Checkpoint { - cpDesc = &m - break - } - } - if cpDesc == nil { - return errdefs.System(errors.Wrapf(err, "invalid checkpoint")) - } - - rat, err := c.getRemote().ContentStore().ReaderAt(ctx, *cpDesc) - if err != nil { - return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader")) - } - defer rat.Close() - _, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat)) - if err != nil { - return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader")) - } - - return err -} - -func (c *client) getContainer(id string) *container { - c.RLock() - ctr := c.containers[id] - c.RUnlock() - - return ctr -} - -func (c *client) removeContainer(id string) { - c.Lock() - delete(c.containers, id) - c.Unlock() -} - -func (c *client) getProcess(containerID, processID string) (containerd.Process, error) { - ctr := c.getContainer(containerID) - if ctr == nil { - return nil, errors.WithStack(newNotFoundError("no such container")) - } - - t := ctr.getTask() - if t == nil { - return nil, errors.WithStack(newNotFoundError("container is not running")) - } - if processID == InitProcessName { - return t, nil - } - - p := ctr.getProcess(processID) - if p == nil { - return nil, errors.WithStack(newNotFoundError("no such exec")) - } - return p, nil -} - -// createIO creates the io to be used by a process -// This needs to get a pointer to interface as upon closure the process may not have yet been registered -func (c *client) createIO(fifos *cio.FIFOSet, containerID, processID string, stdinCloseSync chan struct{}, attachStdio StdioCallback) (cio.IO, error) { - var ( - io *cio.DirectIO - err error - ) - - io, err = cio.NewDirectIO(context.Background(), fifos) - if err != nil { - return nil, err - } - - if io.Stdin != nil { - var ( - err error - stdinOnce sync.Once - ) - pipe := io.Stdin - io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error { - stdinOnce.Do(func() { - err = pipe.Close() - // Do the rest in a new routine to avoid a deadlock if the - // Exec/Start call failed. - go func() { - <-stdinCloseSync - p, err := c.getProcess(containerID, processID) - if err == nil { - err = p.CloseIO(context.Background(), containerd.WithStdinCloser) - if err != nil && strings.Contains(err.Error(), "transport is closing") { - err = nil - } - } - }() - }) - return err - }) - } - - rio, err := attachStdio(io) - if err != nil { - io.Cancel() - io.Close() - } - return rio, err -} - -func (c *client) processEvent(ctr *container, et EventType, ei EventInfo) { - c.eventQ.append(ei.ContainerID, func() { - err := c.backend.ProcessEvent(ei.ContainerID, et, ei) - if err != nil { - c.logger.WithError(err).WithFields(logrus.Fields{ - "container": ei.ContainerID, - "event": et, - "event-info": ei, - }).Error("failed to process event") - } - - if et == EventExit && ei.ProcessID != ei.ContainerID { - p := ctr.getProcess(ei.ProcessID) - if p == nil { - c.logger.WithError(errors.New("no such process")). - WithFields(logrus.Fields{ - "container": ei.ContainerID, - "process": ei.ProcessID, - }).Error("exit event") - return - } - _, err = p.Delete(context.Background()) - if err != nil { - c.logger.WithError(err).WithFields(logrus.Fields{ - "container": ei.ContainerID, - "process": ei.ProcessID, - }).Warn("failed to delete process") - } - ctr.deleteProcess(ei.ProcessID) - - ctr := c.getContainer(ei.ContainerID) - if ctr == nil { - c.logger.WithFields(logrus.Fields{ - "container": ei.ContainerID, - }).Error("failed to find container") - } else { - newFIFOSet(ctr.bundleDir, ei.ProcessID, true, false).Close() - } - } - }) -} - -func (c *client) processEventStream(ctx context.Context) { - var ( - err error - ev *events.Envelope - et EventType - ei EventInfo - ctr *container - ) - - // Filter on both namespace *and* topic. To create an "and" filter, - // this must be a single, comma-separated string - eventStream, errC := c.getRemote().EventService().Subscribe(ctx, "namespace=="+c.namespace+",topic~=|^/tasks/|") - - c.logger.WithField("namespace", c.namespace).Debug("processing event stream") - - var oomKilled bool - for { - select { - case err = <-errC: - if err != nil { - errStatus, ok := status.FromError(err) - if !ok || errStatus.Code() != codes.Canceled { - c.logger.WithError(err).Error("failed to get event") - go c.processEventStream(ctx) - } else { - c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown") - } - } - return - case ev = <-eventStream: - if ev.Event == nil { - c.logger.WithField("event", ev).Warn("invalid event") - continue - } - - v, err := typeurl.UnmarshalAny(ev.Event) - if err != nil { - c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event") - continue - } - - c.logger.WithField("topic", ev.Topic).Debug("event") - - switch t := v.(type) { - case *apievents.TaskCreate: - et = EventCreate - ei = EventInfo{ - ContainerID: t.ContainerID, - ProcessID: t.ContainerID, - Pid: t.Pid, - } - case *apievents.TaskStart: - et = EventStart - ei = EventInfo{ - ContainerID: t.ContainerID, - ProcessID: t.ContainerID, - Pid: t.Pid, - } - case *apievents.TaskExit: - et = EventExit - ei = EventInfo{ - ContainerID: t.ContainerID, - ProcessID: t.ID, - Pid: t.Pid, - ExitCode: t.ExitStatus, - ExitedAt: t.ExitedAt, - } - case *apievents.TaskOOM: - et = EventOOM - ei = EventInfo{ - ContainerID: t.ContainerID, - OOMKilled: true, - } - oomKilled = true - case *apievents.TaskExecAdded: - et = EventExecAdded - ei = EventInfo{ - ContainerID: t.ContainerID, - ProcessID: t.ExecID, - } - case *apievents.TaskExecStarted: - et = EventExecStarted - ei = EventInfo{ - ContainerID: t.ContainerID, - ProcessID: t.ExecID, - Pid: t.Pid, - } - case *apievents.TaskPaused: - et = EventPaused - ei = EventInfo{ - ContainerID: t.ContainerID, - } - case *apievents.TaskResumed: - et = EventResumed - ei = EventInfo{ - ContainerID: t.ContainerID, - } - default: - c.logger.WithFields(logrus.Fields{ - "topic": ev.Topic, - "type": reflect.TypeOf(t)}, - ).Info("ignoring event") - continue - } - - ctr = c.getContainer(ei.ContainerID) - if ctr == nil { - c.logger.WithField("container", ei.ContainerID).Warn("unknown container") - continue - } - - if oomKilled { - ctr.setOOMKilled(true) - oomKilled = false - } - ei.OOMKilled = ctr.getOOMKilled() - - c.processEvent(ctr, et, ei) - } - } -} - -func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) { - writer, err := c.getRemote().ContentStore().Writer(ctx, content.WithRef(ref)) - if err != nil { - return nil, err - } - defer writer.Close() - size, err := io.Copy(writer, r) - if err != nil { - return nil, err - } - labels := map[string]string{ - "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339), - } - if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil { - return nil, err - } - return &types.Descriptor{ - MediaType: mediaType, - Digest: writer.Digest(), - Size_: size, - }, nil -} - -func wrapError(err error) error { - switch { - case err == nil: - return nil - case containerderrors.IsNotFound(err): - return errdefs.NotFound(err) - } - - msg := err.Error() - for _, s := range []string{"container does not exist", "not found", "no such container"} { - if strings.Contains(msg, s) { - return errdefs.NotFound(err) - } - } - return err -} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_daemon_linux.go b/vendor/github.com/docker/docker/libcontainerd/client_daemon_linux.go deleted file mode 100644 index b57c4d3c5..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/client_daemon_linux.go +++ /dev/null @@ -1,108 +0,0 @@ -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/containerd/containerd" - "github.com/containerd/containerd/cio" - "github.com/docker/docker/pkg/idtools" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" -) - -func summaryFromInterface(i interface{}) (*Summary, error) { - return &Summary{}, nil -} - -func (c *client) UpdateResources(ctx context.Context, containerID string, resources *Resources) error { - p, err := c.getProcess(containerID, InitProcessName) - if err != nil { - return err - } - - // go doesn't like the alias in 1.8, this means this need to be - // platform specific - return p.(containerd.Task).Update(ctx, containerd.WithResources((*specs.LinuxResources)(resources))) -} - -func hostIDFromMap(id uint32, mp []specs.LinuxIDMapping) int { - for _, m := range mp { - if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 { - return int(m.HostID + id - m.ContainerID) - } - } - return 0 -} - -func getSpecUser(ociSpec *specs.Spec) (int, int) { - var ( - uid int - gid int - ) - - for _, ns := range ociSpec.Linux.Namespaces { - if ns.Type == specs.UserNamespace { - uid = hostIDFromMap(0, ociSpec.Linux.UIDMappings) - gid = hostIDFromMap(0, ociSpec.Linux.GIDMappings) - break - } - } - - return uid, gid -} - -func prepareBundleDir(bundleDir string, ociSpec *specs.Spec) (string, error) { - uid, gid := getSpecUser(ociSpec) - if uid == 0 && gid == 0 { - return bundleDir, idtools.MkdirAllAndChownNew(bundleDir, 0755, idtools.IDPair{UID: 0, GID: 0}) - } - - p := string(filepath.Separator) - components := strings.Split(bundleDir, string(filepath.Separator)) - for _, d := range components[1:] { - p = filepath.Join(p, d) - fi, err := os.Stat(p) - if err != nil && !os.IsNotExist(err) { - return "", err - } - if os.IsNotExist(err) || fi.Mode()&1 == 0 { - p = fmt.Sprintf("%s.%d.%d", p, uid, gid) - if err := idtools.MkdirAndChown(p, 0700, idtools.IDPair{UID: uid, GID: gid}); err != nil && !os.IsExist(err) { - return "", err - } - } - } - - return p, nil -} - -func newFIFOSet(bundleDir, processID string, withStdin, withTerminal bool) *cio.FIFOSet { - config := cio.Config{ - Terminal: withTerminal, - Stdout: filepath.Join(bundleDir, processID+"-stdout"), - } - paths := []string{config.Stdout} - - if withStdin { - config.Stdin = filepath.Join(bundleDir, processID+"-stdin") - paths = append(paths, config.Stdin) - } - if !withTerminal { - config.Stderr = filepath.Join(bundleDir, processID+"-stderr") - paths = append(paths, config.Stderr) - } - closer := func() error { - for _, path := range paths { - if err := os.RemoveAll(path); err != nil { - logrus.Warnf("libcontainerd: failed to remove fifo %v: %v", path, err) - } - } - return nil - } - - return cio.NewFIFOSet(config, closer) -} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_daemon_windows.go b/vendor/github.com/docker/docker/libcontainerd/client_daemon_windows.go deleted file mode 100644 index 4aba33e18..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/client_daemon_windows.go +++ /dev/null @@ -1,55 +0,0 @@ -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import ( - "fmt" - "path/filepath" - - "github.com/containerd/containerd/cio" - "github.com/containerd/containerd/windows/hcsshimtypes" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -func summaryFromInterface(i interface{}) (*Summary, error) { - switch pd := i.(type) { - case *hcsshimtypes.ProcessDetails: - return &Summary{ - CreateTimestamp: pd.CreatedAt, - ImageName: pd.ImageName, - KernelTime100ns: pd.KernelTime_100Ns, - MemoryCommitBytes: pd.MemoryCommitBytes, - MemoryWorkingSetPrivateBytes: pd.MemoryWorkingSetPrivateBytes, - MemoryWorkingSetSharedBytes: pd.MemoryWorkingSetSharedBytes, - ProcessId: pd.ProcessID, - UserTime100ns: pd.UserTime_100Ns, - }, nil - default: - return nil, errors.Errorf("Unknown process details type %T", pd) - } -} - -func prepareBundleDir(bundleDir string, ociSpec *specs.Spec) (string, error) { - return bundleDir, nil -} - -func pipeName(containerID, processID, name string) string { - return fmt.Sprintf(`\\.\pipe\containerd-%s-%s-%s`, containerID, processID, name) -} - -func newFIFOSet(bundleDir, processID string, withStdin, withTerminal bool) *cio.FIFOSet { - containerID := filepath.Base(bundleDir) - config := cio.Config{ - Terminal: withTerminal, - Stdout: pipeName(containerID, processID, "stdout"), - } - - if withStdin { - config.Stdin = pipeName(containerID, processID, "stdin") - } - - if !config.Terminal { - config.Stderr = pipeName(containerID, processID, "stderr") - } - - return cio.NewFIFOSet(config, nil) -} diff --git a/vendor/github.com/docker/docker/libcontainerd/client_local_windows.go b/vendor/github.com/docker/docker/libcontainerd/client_local_windows.go deleted file mode 100644 index 6e3454e51..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/client_local_windows.go +++ /dev/null @@ -1,1319 +0,0 @@ -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "regexp" - "strings" - "sync" - "syscall" - "time" - - "github.com/Microsoft/hcsshim" - opengcs "github.com/Microsoft/opengcs/client" - "github.com/containerd/containerd" - "github.com/containerd/containerd/cio" - "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/pkg/system" - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -const InitProcessName = "init" - -type process struct { - id string - pid int - hcsProcess hcsshim.Process -} - -type container struct { - sync.Mutex - - // The ociSpec is required, as client.Create() needs a spec, but can - // be called from the RestartManager context which does not otherwise - // have access to the Spec - ociSpec *specs.Spec - - isWindows bool - manualStopRequested bool - hcsContainer hcsshim.Container - - id string - status Status - exitedAt time.Time - exitCode uint32 - waitCh chan struct{} - init *process - execs map[string]*process - updatePending bool -} - -// Win32 error codes that are used for various workarounds -// These really should be ALL_CAPS to match golangs syscall library and standard -// Win32 error conventions, but golint insists on CamelCase. -const ( - CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string - ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started - ErrorBadPathname = syscall.Errno(161) // The specified path is invalid - ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object -) - -// defaultOwner is a tag passed to HCS to allow it to differentiate between -// container creator management stacks. We hard code "docker" in the case -// of docker. -const defaultOwner = "docker" - -func (c *client) Version(ctx context.Context) (containerd.Version, error) { - return containerd.Version{}, errors.New("not implemented on Windows") -} - -// Create is the entrypoint to create a container from a spec. -// Table below shows the fields required for HCS JSON calling parameters, -// where if not populated, is omitted. -// +-----------------+--------------------------------------------+---------------------------------------------------+ -// | | Isolation=Process | Isolation=Hyper-V | -// +-----------------+--------------------------------------------+---------------------------------------------------+ -// | VolumePath | \\?\\Volume{GUIDa} | | -// | LayerFolderPath | %root%\windowsfilter\containerID | | -// | Layers[] | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID | -// | HvRuntime | | ImagePath=%root%\BaseLayerID\UtilityVM | -// +-----------------+--------------------------------------------+---------------------------------------------------+ -// -// Isolation=Process example: -// -// { -// "SystemType": "Container", -// "Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", -// "Owner": "docker", -// "VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}", -// "IgnoreFlushesDuringBoot": true, -// "LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776", -// "Layers": [{ -// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", -// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" -// }], -// "HostName": "5e0055c814a6", -// "MappedDirectories": [], -// "HvPartition": false, -// "EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"], -//} -// -// Isolation=Hyper-V example: -// -//{ -// "SystemType": "Container", -// "Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d", -// "Owner": "docker", -// "IgnoreFlushesDuringBoot": true, -// "Layers": [{ -// "ID": "18955d65-d45a-557b-bf1c-49d6dfefc526", -// "Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c" -// }], -// "HostName": "475c2c58933b", -// "MappedDirectories": [], -// "HvPartition": true, -// "EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"], -// "DNSSearchList": "a.com,b.com,c.com", -// "HvRuntime": { -// "ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM" -// }, -//} -func (c *client) Create(_ context.Context, id string, spec *specs.Spec, runtimeOptions interface{}) error { - if ctr := c.getContainer(id); ctr != nil { - return errors.WithStack(newConflictError("id already in use")) - } - - // spec.Linux must be nil for Windows containers, but spec.Windows - // will be filled in regardless of container platform. This is a - // temporary workaround due to LCOW requiring layer folder paths, - // which are stored under spec.Windows. - // - // TODO: @darrenstahlmsft fix this once the OCI spec is updated to - // support layer folder paths for LCOW - if spec.Linux == nil { - return c.createWindows(id, spec, runtimeOptions) - } - return c.createLinux(id, spec, runtimeOptions) -} - -func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) error { - logger := c.logger.WithField("container", id) - configuration := &hcsshim.ContainerConfig{ - SystemType: "Container", - Name: id, - Owner: defaultOwner, - IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot, - HostName: spec.Hostname, - HvPartition: false, - } - - if spec.Windows.Resources != nil { - if spec.Windows.Resources.CPU != nil { - if spec.Windows.Resources.CPU.Count != nil { - // This check is being done here rather than in adaptContainerSettings - // because we don't want to update the HostConfig in case this container - // is moved to a host with more CPUs than this one. - cpuCount := *spec.Windows.Resources.CPU.Count - hostCPUCount := uint64(sysinfo.NumCPU()) - if cpuCount > hostCPUCount { - c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount) - cpuCount = hostCPUCount - } - configuration.ProcessorCount = uint32(cpuCount) - } - if spec.Windows.Resources.CPU.Shares != nil { - configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares) - } - if spec.Windows.Resources.CPU.Maximum != nil { - configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum) - } - } - if spec.Windows.Resources.Memory != nil { - if spec.Windows.Resources.Memory.Limit != nil { - configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024 - } - } - if spec.Windows.Resources.Storage != nil { - if spec.Windows.Resources.Storage.Bps != nil { - configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps - } - if spec.Windows.Resources.Storage.Iops != nil { - configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops - } - } - } - - if spec.Windows.HyperV != nil { - configuration.HvPartition = true - } - - if spec.Windows.Network != nil { - configuration.EndpointList = spec.Windows.Network.EndpointList - configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery - if spec.Windows.Network.DNSSearchList != nil { - configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",") - } - configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName - } - - if cs, ok := spec.Windows.CredentialSpec.(string); ok { - configuration.Credentials = cs - } - - // We must have least two layers in the spec, the bottom one being a - // base image, the top one being the RW layer. - if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 { - return fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime") - } - - // Strip off the top-most layer as that's passed in separately to HCS - configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1] - layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1] - - if configuration.HvPartition { - // We don't currently support setting the utility VM image explicitly. - // TODO @swernli/jhowardmsft circa RS5, this may be re-locatable. - if spec.Windows.HyperV.UtilityVMPath != "" { - return errors.New("runtime does not support an explicit utility VM path for Hyper-V containers") - } - - // Find the upper-most utility VM image. - var uvmImagePath string - for _, path := range layerFolders { - fullPath := filepath.Join(path, "UtilityVM") - _, err := os.Stat(fullPath) - if err == nil { - uvmImagePath = fullPath - break - } - if !os.IsNotExist(err) { - return err - } - } - if uvmImagePath == "" { - return errors.New("utility VM image could not be found") - } - configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath} - - if spec.Root.Path != "" { - return errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container") - } - } else { - const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$` - if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil { - return fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path) - } - // HCS API requires the trailing backslash to be removed - configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1] - } - - if spec.Root.Readonly { - return errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`) - } - - for _, layerPath := range layerFolders { - _, filename := filepath.Split(layerPath) - g, err := hcsshim.NameToGuid(filename) - if err != nil { - return err - } - configuration.Layers = append(configuration.Layers, hcsshim.Layer{ - ID: g.ToString(), - Path: layerPath, - }) - } - - // Add the mounts (volumes, bind mounts etc) to the structure - var mds []hcsshim.MappedDir - var mps []hcsshim.MappedPipe - for _, mount := range spec.Mounts { - const pipePrefix = `\\.\pipe\` - if mount.Type != "" { - return fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type) - } - if strings.HasPrefix(mount.Destination, pipePrefix) { - mp := hcsshim.MappedPipe{ - HostPath: mount.Source, - ContainerPipeName: mount.Destination[len(pipePrefix):], - } - mps = append(mps, mp) - } else { - md := hcsshim.MappedDir{ - HostPath: mount.Source, - ContainerPath: mount.Destination, - ReadOnly: false, - } - for _, o := range mount.Options { - if strings.ToLower(o) == "ro" { - md.ReadOnly = true - } - } - mds = append(mds, md) - } - } - configuration.MappedDirectories = mds - if len(mps) > 0 && system.GetOSVersion().Build < 16299 { // RS3 - return errors.New("named pipe mounts are not supported on this version of Windows") - } - configuration.MappedPipes = mps - - hcsContainer, err := hcsshim.CreateContainer(id, configuration) - if err != nil { - return err - } - - // Construct a container object for calling start on it. - ctr := &container{ - id: id, - execs: make(map[string]*process), - isWindows: true, - ociSpec: spec, - hcsContainer: hcsContainer, - status: StatusCreated, - waitCh: make(chan struct{}), - } - - logger.Debug("starting container") - if err = hcsContainer.Start(); err != nil { - c.logger.WithError(err).Error("failed to start container") - ctr.debugGCS() - if err := c.terminateContainer(ctr); err != nil { - c.logger.WithError(err).Error("failed to cleanup after a failed Start") - } else { - c.logger.Debug("cleaned up after failed Start by calling Terminate") - } - return err - } - ctr.debugGCS() - - c.Lock() - c.containers[id] = ctr - c.Unlock() - - logger.Debug("createWindows() completed successfully") - return nil - -} - -func (c *client) createLinux(id string, spec *specs.Spec, runtimeOptions interface{}) error { - logrus.Debugf("libcontainerd: createLinux(): containerId %s ", id) - logger := c.logger.WithField("container", id) - - if runtimeOptions == nil { - return fmt.Errorf("lcow option must be supplied to the runtime") - } - lcowConfig, ok := runtimeOptions.(*opengcs.Config) - if !ok { - return fmt.Errorf("lcow option must be supplied to the runtime") - } - - configuration := &hcsshim.ContainerConfig{ - HvPartition: true, - Name: id, - SystemType: "container", - ContainerType: "linux", - Owner: defaultOwner, - TerminateOnLastHandleClosed: true, - } - - if lcowConfig.ActualMode == opengcs.ModeActualVhdx { - configuration.HvRuntime = &hcsshim.HvRuntime{ - ImagePath: lcowConfig.Vhdx, - BootSource: "Vhd", - WritableBootSource: false, - } - } else { - configuration.HvRuntime = &hcsshim.HvRuntime{ - ImagePath: lcowConfig.KirdPath, - LinuxKernelFile: lcowConfig.KernelFile, - LinuxInitrdFile: lcowConfig.InitrdFile, - LinuxBootParameters: lcowConfig.BootParameters, - } - } - - if spec.Windows == nil { - return fmt.Errorf("spec.Windows must not be nil for LCOW containers") - } - - // We must have least one layer in the spec - if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) == 0 { - return fmt.Errorf("OCI spec is invalid - at least one LayerFolders must be supplied to the runtime") - } - - // Strip off the top-most layer as that's passed in separately to HCS - configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1] - layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1] - - for _, layerPath := range layerFolders { - _, filename := filepath.Split(layerPath) - g, err := hcsshim.NameToGuid(filename) - if err != nil { - return err - } - configuration.Layers = append(configuration.Layers, hcsshim.Layer{ - ID: g.ToString(), - Path: filepath.Join(layerPath, "layer.vhd"), - }) - } - - if spec.Windows.Network != nil { - configuration.EndpointList = spec.Windows.Network.EndpointList - configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery - if spec.Windows.Network.DNSSearchList != nil { - configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",") - } - configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName - } - - // Add the mounts (volumes, bind mounts etc) to the structure. We have to do - // some translation for both the mapped directories passed into HCS and in - // the spec. - // - // For HCS, we only pass in the mounts from the spec which are type "bind". - // Further, the "ContainerPath" field (which is a little mis-leadingly - // named when it applies to the utility VM rather than the container in the - // utility VM) is moved to under /tmp/gcs//binds, where this is passed - // by the caller through a 'uvmpath' option. - // - // We do similar translation for the mounts in the spec by stripping out - // the uvmpath option, and translating the Source path to the location in the - // utility VM calculated above. - // - // From inside the utility VM, you would see a 9p mount such as in the following - // where a host folder has been mapped to /target. The line with /tmp/gcs//binds - // specifically: - // - // / # mount - // rootfs on / type rootfs (rw,size=463736k,nr_inodes=115934) - // proc on /proc type proc (rw,relatime) - // sysfs on /sys type sysfs (rw,relatime) - // udev on /dev type devtmpfs (rw,relatime,size=498100k,nr_inodes=124525,mode=755) - // tmpfs on /run type tmpfs (rw,relatime) - // cgroup on /sys/fs/cgroup type cgroup (rw,relatime,cpuset,cpu,cpuacct,blkio,memory,devices,freezer,net_cls,perf_event,net_prio,hugetlb,pids,rdma) - // mqueue on /dev/mqueue type mqueue (rw,relatime) - // devpts on /dev/pts type devpts (rw,relatime,mode=600,ptmxmode=000) - // /binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target on /binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target type 9p (rw,sync,dirsync,relatime,trans=fd,rfdno=6,wfdno=6) - // /dev/pmem0 on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0 type ext4 (ro,relatime,block_validity,delalloc,norecovery,barrier,dax,user_xattr,acl) - // /dev/sda on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl) - // overlay on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/rootfs type overlay (rw,relatime,lowerdir=/tmp/base/:/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0,upperdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/upper,workdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/work) - // - // /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l - // total 16 - // drwx------ 3 0 0 60 Sep 7 18:54 binds - // -rw-r--r-- 1 0 0 3345 Sep 7 18:54 config.json - // drwxr-xr-x 10 0 0 4096 Sep 6 17:26 layer0 - // drwxr-xr-x 1 0 0 4096 Sep 7 18:54 rootfs - // drwxr-xr-x 5 0 0 4096 Sep 7 18:54 scratch - // - // /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l binds - // total 0 - // drwxrwxrwt 2 0 0 4096 Sep 7 16:51 target - - mds := []hcsshim.MappedDir{} - specMounts := []specs.Mount{} - for _, mount := range spec.Mounts { - specMount := mount - if mount.Type == "bind" { - // Strip out the uvmpath from the options - updatedOptions := []string{} - uvmPath := "" - readonly := false - for _, opt := range mount.Options { - dropOption := false - elements := strings.SplitN(opt, "=", 2) - switch elements[0] { - case "uvmpath": - uvmPath = elements[1] - dropOption = true - case "rw": - case "ro": - readonly = true - case "rbind": - default: - return fmt.Errorf("unsupported option %q", opt) - } - if !dropOption { - updatedOptions = append(updatedOptions, opt) - } - } - mount.Options = updatedOptions - if uvmPath == "" { - return fmt.Errorf("no uvmpath for bind mount %+v", mount) - } - md := hcsshim.MappedDir{ - HostPath: mount.Source, - ContainerPath: path.Join(uvmPath, mount.Destination), - CreateInUtilityVM: true, - ReadOnly: readonly, - } - mds = append(mds, md) - specMount.Source = path.Join(uvmPath, mount.Destination) - } - specMounts = append(specMounts, specMount) - } - configuration.MappedDirectories = mds - - hcsContainer, err := hcsshim.CreateContainer(id, configuration) - if err != nil { - return err - } - - spec.Mounts = specMounts - - // Construct a container object for calling start on it. - ctr := &container{ - id: id, - execs: make(map[string]*process), - isWindows: false, - ociSpec: spec, - hcsContainer: hcsContainer, - status: StatusCreated, - waitCh: make(chan struct{}), - } - - // Start the container. - logger.Debug("starting container") - if err = hcsContainer.Start(); err != nil { - c.logger.WithError(err).Error("failed to start container") - ctr.debugGCS() - if err := c.terminateContainer(ctr); err != nil { - c.logger.WithError(err).Error("failed to cleanup after a failed Start") - } else { - c.logger.Debug("cleaned up after failed Start by calling Terminate") - } - return err - } - ctr.debugGCS() - - c.Lock() - c.containers[id] = ctr - c.Unlock() - - c.eventQ.append(id, func() { - ei := EventInfo{ - ContainerID: id, - } - c.logger.WithFields(logrus.Fields{ - "container": ctr.id, - "event": EventCreate, - }).Info("sending event") - err := c.backend.ProcessEvent(id, EventCreate, ei) - if err != nil { - c.logger.WithError(err).WithFields(logrus.Fields{ - "container": id, - "event": EventCreate, - }).Error("failed to process event") - } - }) - - logger.Debug("createLinux() completed successfully") - return nil -} - -func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio StdioCallback) (int, error) { - ctr := c.getContainer(id) - switch { - case ctr == nil: - return -1, errors.WithStack(newNotFoundError("no such container")) - case ctr.init != nil: - return -1, errors.WithStack(newConflictError("container already started")) - } - - logger := c.logger.WithField("container", id) - - // Note we always tell HCS to create stdout as it's required - // regardless of '-i' or '-t' options, so that docker can always grab - // the output through logs. We also tell HCS to always create stdin, - // even if it's not used - it will be closed shortly. Stderr is only - // created if it we're not -t. - var ( - emulateConsole bool - createStdErrPipe bool - ) - if ctr.ociSpec.Process != nil { - emulateConsole = ctr.ociSpec.Process.Terminal - createStdErrPipe = !ctr.ociSpec.Process.Terminal - } - - createProcessParms := &hcsshim.ProcessConfig{ - EmulateConsole: emulateConsole, - WorkingDirectory: ctr.ociSpec.Process.Cwd, - CreateStdInPipe: true, - CreateStdOutPipe: true, - CreateStdErrPipe: createStdErrPipe, - } - - if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil { - createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height) - createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width) - } - - // Configure the environment for the process - createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env) - if ctr.isWindows { - createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ") - } else { - createProcessParms.CommandArgs = ctr.ociSpec.Process.Args - } - createProcessParms.User = ctr.ociSpec.Process.User.Username - - // LCOW requires the raw OCI spec passed through HCS and onwards to - // GCS for the utility VM. - if !ctr.isWindows { - ociBuf, err := json.Marshal(ctr.ociSpec) - if err != nil { - return -1, err - } - ociRaw := json.RawMessage(ociBuf) - createProcessParms.OCISpecification = &ociRaw - } - - ctr.Lock() - defer ctr.Unlock() - - // Start the command running in the container. - newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms) - if err != nil { - logger.WithError(err).Error("CreateProcess() failed") - return -1, err - } - defer func() { - if err != nil { - if err := newProcess.Kill(); err != nil { - logger.WithError(err).Error("failed to kill process") - } - go func() { - if err := newProcess.Wait(); err != nil { - logger.WithError(err).Error("failed to wait for process") - } - if err := newProcess.Close(); err != nil { - logger.WithError(err).Error("failed to clean process resources") - } - }() - } - }() - p := &process{ - hcsProcess: newProcess, - id: InitProcessName, - pid: newProcess.Pid(), - } - logger.WithField("pid", p.pid).Debug("init process started") - - dio, err := newIOFromProcess(newProcess, ctr.ociSpec.Process.Terminal) - if err != nil { - logger.WithError(err).Error("failed to get stdio pipes") - return -1, err - } - _, err = attachStdio(dio) - if err != nil { - logger.WithError(err).Error("failed to attache stdio") - return -1, err - } - ctr.status = StatusRunning - ctr.init = p - - // Spin up a go routine waiting for exit to handle cleanup - go c.reapProcess(ctr, p) - - // Generate the associated event - c.eventQ.append(id, func() { - ei := EventInfo{ - ContainerID: id, - ProcessID: InitProcessName, - Pid: uint32(p.pid), - } - c.logger.WithFields(logrus.Fields{ - "container": ctr.id, - "event": EventStart, - "event-info": ei, - }).Info("sending event") - err := c.backend.ProcessEvent(ei.ContainerID, EventStart, ei) - if err != nil { - c.logger.WithError(err).WithFields(logrus.Fields{ - "container": id, - "event": EventStart, - "event-info": ei, - }).Error("failed to process event") - } - }) - logger.Debug("start() completed") - return p.pid, nil -} - -func newIOFromProcess(newProcess hcsshim.Process, terminal bool) (*cio.DirectIO, error) { - stdin, stdout, stderr, err := newProcess.Stdio() - if err != nil { - return nil, err - } - - dio := cio.NewDirectIO(createStdInCloser(stdin, newProcess), nil, nil, terminal) - - // Convert io.ReadClosers to io.Readers - if stdout != nil { - dio.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout}) - } - if stderr != nil { - dio.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr}) - } - return dio, nil -} - -// Exec adds a process in an running container -func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error) { - ctr := c.getContainer(containerID) - switch { - case ctr == nil: - return -1, errors.WithStack(newNotFoundError("no such container")) - case ctr.hcsContainer == nil: - return -1, errors.WithStack(newInvalidParameterError("container is not running")) - case ctr.execs != nil && ctr.execs[processID] != nil: - return -1, errors.WithStack(newConflictError("id already in use")) - } - logger := c.logger.WithFields(logrus.Fields{ - "container": containerID, - "exec": processID, - }) - - // Note we always tell HCS to - // create stdout as it's required regardless of '-i' or '-t' options, so that - // docker can always grab the output through logs. We also tell HCS to always - // create stdin, even if it's not used - it will be closed shortly. Stderr - // is only created if it we're not -t. - createProcessParms := hcsshim.ProcessConfig{ - CreateStdInPipe: true, - CreateStdOutPipe: true, - CreateStdErrPipe: !spec.Terminal, - } - if spec.Terminal { - createProcessParms.EmulateConsole = true - if spec.ConsoleSize != nil { - createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height) - createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width) - } - } - - // Take working directory from the process to add if it is defined, - // otherwise take from the first process. - if spec.Cwd != "" { - createProcessParms.WorkingDirectory = spec.Cwd - } else { - createProcessParms.WorkingDirectory = ctr.ociSpec.Process.Cwd - } - - // Configure the environment for the process - createProcessParms.Environment = setupEnvironmentVariables(spec.Env) - if ctr.isWindows { - createProcessParms.CommandLine = strings.Join(spec.Args, " ") - } else { - createProcessParms.CommandArgs = spec.Args - } - createProcessParms.User = spec.User.Username - - logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine) - - // Start the command running in the container. - newProcess, err := ctr.hcsContainer.CreateProcess(&createProcessParms) - if err != nil { - logger.WithError(err).Errorf("exec's CreateProcess() failed") - return -1, err - } - pid := newProcess.Pid() - defer func() { - if err != nil { - if err := newProcess.Kill(); err != nil { - logger.WithError(err).Error("failed to kill process") - } - go func() { - if err := newProcess.Wait(); err != nil { - logger.WithError(err).Error("failed to wait for process") - } - if err := newProcess.Close(); err != nil { - logger.WithError(err).Error("failed to clean process resources") - } - }() - } - }() - - dio, err := newIOFromProcess(newProcess, spec.Terminal) - if err != nil { - logger.WithError(err).Error("failed to get stdio pipes") - return -1, err - } - // Tell the engine to attach streams back to the client - _, err = attachStdio(dio) - if err != nil { - return -1, err - } - - p := &process{ - id: processID, - pid: pid, - hcsProcess: newProcess, - } - - // Add the process to the container's list of processes - ctr.Lock() - ctr.execs[processID] = p - ctr.Unlock() - - // Spin up a go routine waiting for exit to handle cleanup - go c.reapProcess(ctr, p) - - c.eventQ.append(ctr.id, func() { - ei := EventInfo{ - ContainerID: ctr.id, - ProcessID: p.id, - Pid: uint32(p.pid), - } - c.logger.WithFields(logrus.Fields{ - "container": ctr.id, - "event": EventExecAdded, - "event-info": ei, - }).Info("sending event") - err := c.backend.ProcessEvent(ctr.id, EventExecAdded, ei) - if err != nil { - c.logger.WithError(err).WithFields(logrus.Fields{ - "container": ctr.id, - "event": EventExecAdded, - "event-info": ei, - }).Error("failed to process event") - } - err = c.backend.ProcessEvent(ctr.id, EventExecStarted, ei) - if err != nil { - c.logger.WithError(err).WithFields(logrus.Fields{ - "container": ctr.id, - "event": EventExecStarted, - "event-info": ei, - }).Error("failed to process event") - } - }) - - return pid, nil -} - -// Signal handles `docker stop` on Windows. While Linux has support for -// the full range of signals, signals aren't really implemented on Windows. -// We fake supporting regular stop and -9 to force kill. -func (c *client) SignalProcess(_ context.Context, containerID, processID string, signal int) error { - ctr, p, err := c.getProcess(containerID, processID) - if err != nil { - return err - } - - ctr.manualStopRequested = true - - logger := c.logger.WithFields(logrus.Fields{ - "container": containerID, - "process": processID, - "pid": p.pid, - "signal": signal, - }) - logger.Debug("Signal()") - - if processID == InitProcessName { - if syscall.Signal(signal) == syscall.SIGKILL { - // Terminate the compute system - if err := ctr.hcsContainer.Terminate(); err != nil { - if !hcsshim.IsPending(err) { - logger.WithError(err).Error("failed to terminate hccshim container") - } - } - } else { - // Shut down the container - if err := ctr.hcsContainer.Shutdown(); err != nil { - if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) { - // ignore errors - logger.WithError(err).Error("failed to shutdown hccshim container") - } - } - } - } else { - return p.hcsProcess.Kill() - } - - return nil -} - -// Resize handles a CLI event to resize an interactive docker run or docker -// exec window. -func (c *client) ResizeTerminal(_ context.Context, containerID, processID string, width, height int) error { - _, p, err := c.getProcess(containerID, processID) - if err != nil { - return err - } - - c.logger.WithFields(logrus.Fields{ - "container": containerID, - "process": processID, - "height": height, - "width": width, - "pid": p.pid, - }).Debug("resizing") - return p.hcsProcess.ResizeConsole(uint16(width), uint16(height)) -} - -func (c *client) CloseStdin(_ context.Context, containerID, processID string) error { - _, p, err := c.getProcess(containerID, processID) - if err != nil { - return err - } - - return p.hcsProcess.CloseStdin() -} - -// Pause handles pause requests for containers -func (c *client) Pause(_ context.Context, containerID string) error { - ctr, _, err := c.getProcess(containerID, InitProcessName) - if err != nil { - return err - } - - if ctr.ociSpec.Windows.HyperV == nil { - return errors.New("cannot pause Windows Server Containers") - } - - ctr.Lock() - defer ctr.Unlock() - - if err = ctr.hcsContainer.Pause(); err != nil { - return err - } - - ctr.status = StatusPaused - - c.eventQ.append(containerID, func() { - err := c.backend.ProcessEvent(containerID, EventPaused, EventInfo{ - ContainerID: containerID, - ProcessID: InitProcessName, - }) - c.logger.WithFields(logrus.Fields{ - "container": ctr.id, - "event": EventPaused, - }).Info("sending event") - if err != nil { - c.logger.WithError(err).WithFields(logrus.Fields{ - "container": containerID, - "event": EventPaused, - }).Error("failed to process event") - } - }) - - return nil -} - -// Resume handles resume requests for containers -func (c *client) Resume(_ context.Context, containerID string) error { - ctr, _, err := c.getProcess(containerID, InitProcessName) - if err != nil { - return err - } - - if ctr.ociSpec.Windows.HyperV == nil { - return errors.New("cannot resume Windows Server Containers") - } - - ctr.Lock() - defer ctr.Unlock() - - if err = ctr.hcsContainer.Resume(); err != nil { - return err - } - - ctr.status = StatusRunning - - c.eventQ.append(containerID, func() { - err := c.backend.ProcessEvent(containerID, EventResumed, EventInfo{ - ContainerID: containerID, - ProcessID: InitProcessName, - }) - c.logger.WithFields(logrus.Fields{ - "container": ctr.id, - "event": EventResumed, - }).Info("sending event") - if err != nil { - c.logger.WithError(err).WithFields(logrus.Fields{ - "container": containerID, - "event": EventResumed, - }).Error("failed to process event") - } - }) - - return nil -} - -// Stats handles stats requests for containers -func (c *client) Stats(_ context.Context, containerID string) (*Stats, error) { - ctr, _, err := c.getProcess(containerID, InitProcessName) - if err != nil { - return nil, err - } - - readAt := time.Now() - s, err := ctr.hcsContainer.Statistics() - if err != nil { - return nil, err - } - return &Stats{ - Read: readAt, - HCSStats: &s, - }, nil -} - -// Restore is the handler for restoring a container -func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallback) (bool, int, error) { - c.logger.WithField("container", id).Debug("restore()") - - // TODO Windows: On RS1, a re-attach isn't possible. - // However, there is a scenario in which there is an issue. - // Consider a background container. The daemon dies unexpectedly. - // HCS will still have the compute service alive and running. - // For consistence, we call in to shoot it regardless if HCS knows about it - // We explicitly just log a warning if the terminate fails. - // Then we tell the backend the container exited. - if hc, err := hcsshim.OpenContainer(id); err == nil { - const terminateTimeout = time.Minute * 2 - err := hc.Terminate() - - if hcsshim.IsPending(err) { - err = hc.WaitTimeout(terminateTimeout) - } else if hcsshim.IsAlreadyStopped(err) { - err = nil - } - - if err != nil { - c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore") - return false, -1, err - } - } - return false, -1, nil -} - -// GetPidsForContainer returns a list of process IDs running in a container. -// Not used on Windows. -func (c *client) ListPids(_ context.Context, _ string) ([]uint32, error) { - return nil, errors.New("not implemented on Windows") -} - -// Summary returns a summary of the processes running in a container. -// This is present in Windows to support docker top. In linux, the -// engine shells out to ps to get process information. On Windows, as -// the containers could be Hyper-V containers, they would not be -// visible on the container host. However, libcontainerd does have -// that information. -func (c *client) Summary(_ context.Context, containerID string) ([]Summary, error) { - ctr, _, err := c.getProcess(containerID, InitProcessName) - if err != nil { - return nil, err - } - - p, err := ctr.hcsContainer.ProcessList() - if err != nil { - return nil, err - } - - pl := make([]Summary, len(p)) - for i := range p { - pl[i] = Summary(p[i]) - } - return pl, nil -} - -func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) { - ec := -1 - ctr := c.getContainer(containerID) - if ctr == nil { - return uint32(ec), time.Now(), errors.WithStack(newNotFoundError("no such container")) - } - - select { - case <-ctx.Done(): - return uint32(ec), time.Now(), errors.WithStack(ctx.Err()) - case <-ctr.waitCh: - default: - return uint32(ec), time.Now(), errors.New("container is not stopped") - } - - ctr.Lock() - defer ctr.Unlock() - return ctr.exitCode, ctr.exitedAt, nil -} - -func (c *client) Delete(_ context.Context, containerID string) error { - c.Lock() - defer c.Unlock() - ctr := c.containers[containerID] - if ctr == nil { - return errors.WithStack(newNotFoundError("no such container")) - } - - ctr.Lock() - defer ctr.Unlock() - - switch ctr.status { - case StatusCreated: - if err := c.shutdownContainer(ctr); err != nil { - return err - } - fallthrough - case StatusStopped: - delete(c.containers, containerID) - return nil - } - - return errors.WithStack(newInvalidParameterError("container is not stopped")) -} - -func (c *client) Status(ctx context.Context, containerID string) (Status, error) { - c.Lock() - defer c.Unlock() - ctr := c.containers[containerID] - if ctr == nil { - return StatusUnknown, errors.WithStack(newNotFoundError("no such container")) - } - - ctr.Lock() - defer ctr.Unlock() - return ctr.status, nil -} - -func (c *client) UpdateResources(ctx context.Context, containerID string, resources *Resources) error { - // Updating resource isn't supported on Windows - // but we should return nil for enabling updating container - return nil -} - -func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error { - return errors.New("Windows: Containers do not support checkpoints") -} - -func (c *client) getContainer(id string) *container { - c.Lock() - ctr := c.containers[id] - c.Unlock() - - return ctr -} - -func (c *client) getProcess(containerID, processID string) (*container, *process, error) { - ctr := c.getContainer(containerID) - switch { - case ctr == nil: - return nil, nil, errors.WithStack(newNotFoundError("no such container")) - case ctr.init == nil: - return nil, nil, errors.WithStack(newNotFoundError("container is not running")) - case processID == InitProcessName: - return ctr, ctr.init, nil - default: - ctr.Lock() - defer ctr.Unlock() - if ctr.execs == nil { - return nil, nil, errors.WithStack(newNotFoundError("no execs")) - } - } - - p := ctr.execs[processID] - if p == nil { - return nil, nil, errors.WithStack(newNotFoundError("no such exec")) - } - - return ctr, p, nil -} - -func (c *client) shutdownContainer(ctr *container) error { - const shutdownTimeout = time.Minute * 5 - err := ctr.hcsContainer.Shutdown() - - if hcsshim.IsPending(err) { - err = ctr.hcsContainer.WaitTimeout(shutdownTimeout) - } else if hcsshim.IsAlreadyStopped(err) { - err = nil - } - - if err != nil { - c.logger.WithError(err).WithField("container", ctr.id). - Debug("failed to shutdown container, terminating it") - terminateErr := c.terminateContainer(ctr) - if terminateErr != nil { - c.logger.WithError(terminateErr).WithField("container", ctr.id). - Error("failed to shutdown container, and subsequent terminate also failed") - return fmt.Errorf("%s: subsequent terminate failed %s", err, terminateErr) - } - return err - } - - return nil -} - -func (c *client) terminateContainer(ctr *container) error { - const terminateTimeout = time.Minute * 5 - err := ctr.hcsContainer.Terminate() - - if hcsshim.IsPending(err) { - err = ctr.hcsContainer.WaitTimeout(terminateTimeout) - } else if hcsshim.IsAlreadyStopped(err) { - err = nil - } - - if err != nil { - c.logger.WithError(err).WithField("container", ctr.id). - Debug("failed to terminate container") - return err - } - - return nil -} - -func (c *client) reapProcess(ctr *container, p *process) int { - logger := c.logger.WithFields(logrus.Fields{ - "container": ctr.id, - "process": p.id, - }) - - var eventErr error - - // Block indefinitely for the process to exit. - if err := p.hcsProcess.Wait(); err != nil { - if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE { - logger.WithError(err).Warnf("Wait() failed (container may have been killed)") - } - // Fall through here, do not return. This ensures we attempt to - // continue the shutdown in HCS and tell the docker engine that the - // process/container has exited to avoid a container being dropped on - // the floor. - } - exitedAt := time.Now() - - exitCode, err := p.hcsProcess.ExitCode() - if err != nil { - if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE { - logger.WithError(err).Warnf("unable to get exit code for process") - } - // Since we got an error retrieving the exit code, make sure that the - // code we return doesn't incorrectly indicate success. - exitCode = -1 - - // Fall through here, do not return. This ensures we attempt to - // continue the shutdown in HCS and tell the docker engine that the - // process/container has exited to avoid a container being dropped on - // the floor. - } - - if err := p.hcsProcess.Close(); err != nil { - logger.WithError(err).Warnf("failed to cleanup hcs process resources") - exitCode = -1 - eventErr = fmt.Errorf("hcsProcess.Close() failed %s", err) - } - - if p.id == InitProcessName { - // Update container status - ctr.Lock() - ctr.status = StatusStopped - ctr.exitedAt = exitedAt - ctr.exitCode = uint32(exitCode) - close(ctr.waitCh) - ctr.Unlock() - - if err := c.shutdownContainer(ctr); err != nil { - exitCode = -1 - logger.WithError(err).Warn("failed to shutdown container") - thisErr := fmt.Errorf("failed to shutdown container: %s", err) - if eventErr != nil { - eventErr = fmt.Errorf("%s: %s", eventErr, thisErr) - } else { - eventErr = thisErr - } - } else { - logger.Debug("completed container shutdown") - } - - if err := ctr.hcsContainer.Close(); err != nil { - exitCode = -1 - logger.WithError(err).Error("failed to clean hcs container resources") - thisErr := fmt.Errorf("failed to terminate container: %s", err) - if eventErr != nil { - eventErr = fmt.Errorf("%s: %s", eventErr, thisErr) - } else { - eventErr = thisErr - } - } - } - - c.eventQ.append(ctr.id, func() { - ei := EventInfo{ - ContainerID: ctr.id, - ProcessID: p.id, - Pid: uint32(p.pid), - ExitCode: uint32(exitCode), - ExitedAt: exitedAt, - Error: eventErr, - } - c.logger.WithFields(logrus.Fields{ - "container": ctr.id, - "event": EventExit, - "event-info": ei, - }).Info("sending event") - err := c.backend.ProcessEvent(ctr.id, EventExit, ei) - if err != nil { - c.logger.WithError(err).WithFields(logrus.Fields{ - "container": ctr.id, - "event": EventExit, - "event-info": ei, - }).Error("failed to process event") - } - if p.id != InitProcessName { - ctr.Lock() - delete(ctr.execs, p.id) - ctr.Unlock() - } - }) - - return exitCode -} diff --git a/vendor/github.com/docker/docker/libcontainerd/errors.go b/vendor/github.com/docker/docker/libcontainerd/errors.go deleted file mode 100644 index bdc26715b..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/errors.go +++ /dev/null @@ -1,13 +0,0 @@ -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import ( - "errors" - - "github.com/docker/docker/errdefs" -) - -func newNotFoundError(err string) error { return errdefs.NotFound(errors.New(err)) } - -func newInvalidParameterError(err string) error { return errdefs.InvalidParameter(errors.New(err)) } - -func newConflictError(err string) error { return errdefs.Conflict(errors.New(err)) } diff --git a/vendor/github.com/docker/docker/libcontainerd/process_windows.go b/vendor/github.com/docker/docker/libcontainerd/process_windows.go deleted file mode 100644 index 8cdf1daca..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/process_windows.go +++ /dev/null @@ -1,44 +0,0 @@ -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import ( - "io" - "sync" - - "github.com/Microsoft/hcsshim" - "github.com/docker/docker/pkg/ioutils" -) - -type autoClosingReader struct { - io.ReadCloser - sync.Once -} - -func (r *autoClosingReader) Read(b []byte) (n int, err error) { - n, err = r.ReadCloser.Read(b) - if err != nil { - r.Once.Do(func() { r.ReadCloser.Close() }) - } - return -} - -func createStdInCloser(pipe io.WriteCloser, process hcsshim.Process) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(pipe, func() error { - if err := pipe.Close(); err != nil { - return err - } - - err := process.CloseStdin() - if err != nil && !hcsshim.IsNotExist(err) && !hcsshim.IsAlreadyClosed(err) { - // This error will occur if the compute system is currently shutting down - if perr, ok := err.(*hcsshim.ProcessError); ok && perr.Err != hcsshim.ErrVmcomputeOperationInvalidState { - return err - } - } - - return nil - }) -} - -func (p *process) Cleanup() error { - return nil -} diff --git a/vendor/github.com/docker/docker/libcontainerd/queue.go b/vendor/github.com/docker/docker/libcontainerd/queue.go deleted file mode 100644 index 207722c44..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/queue.go +++ /dev/null @@ -1,35 +0,0 @@ -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import "sync" - -type queue struct { - sync.Mutex - fns map[string]chan struct{} -} - -func (q *queue) append(id string, f func()) { - q.Lock() - defer q.Unlock() - - if q.fns == nil { - q.fns = make(map[string]chan struct{}) - } - - done := make(chan struct{}) - - fn, ok := q.fns[id] - q.fns[id] = done - go func() { - if ok { - <-fn - } - f() - close(done) - - q.Lock() - if q.fns[id] == done { - delete(q.fns, id) - } - q.Unlock() - }() -} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_daemon.go b/vendor/github.com/docker/docker/libcontainerd/remote_daemon.go deleted file mode 100644 index cd2ac1ce4..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/remote_daemon.go +++ /dev/null @@ -1,344 +0,0 @@ -// +build !windows - -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/BurntSushi/toml" - "github.com/containerd/containerd" - "github.com/containerd/containerd/services/server" - "github.com/docker/docker/pkg/system" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - maxConnectionRetryCount = 3 - healthCheckTimeout = 3 * time.Second - shutdownTimeout = 15 * time.Second - configFile = "containerd.toml" - binaryName = "docker-containerd" - pidFile = "docker-containerd.pid" -) - -type pluginConfigs struct { - Plugins map[string]interface{} `toml:"plugins"` -} - -type remote struct { - sync.RWMutex - server.Config - - daemonPid int - logger *logrus.Entry - - daemonWaitCh chan struct{} - clients []*client - shutdownContext context.Context - shutdownCancel context.CancelFunc - shutdown bool - - // Options - startDaemon bool - rootDir string - stateDir string - snapshotter string - pluginConfs pluginConfigs -} - -// New creates a fresh instance of libcontainerd remote. -func New(rootDir, stateDir string, options ...RemoteOption) (rem Remote, err error) { - defer func() { - if err != nil { - err = errors.Wrap(err, "Failed to connect to containerd") - } - }() - - r := &remote{ - rootDir: rootDir, - stateDir: stateDir, - Config: server.Config{ - Root: filepath.Join(rootDir, "daemon"), - State: filepath.Join(stateDir, "daemon"), - }, - pluginConfs: pluginConfigs{make(map[string]interface{})}, - daemonPid: -1, - logger: logrus.WithField("module", "libcontainerd"), - } - r.shutdownContext, r.shutdownCancel = context.WithCancel(context.Background()) - - rem = r - for _, option := range options { - if err = option.Apply(r); err != nil { - return - } - } - r.setDefaults() - - if err = system.MkdirAll(stateDir, 0700, ""); err != nil { - return - } - - if r.startDaemon { - os.Remove(r.GRPC.Address) - if err = r.startContainerd(); err != nil { - return - } - defer func() { - if err != nil { - r.Cleanup() - } - }() - } - - // This connection is just used to monitor the connection - client, err := containerd.New(r.GRPC.Address) - if err != nil { - return - } - if _, err := client.Version(context.Background()); err != nil { - system.KillProcess(r.daemonPid) - return nil, errors.Wrapf(err, "unable to get containerd version") - } - - go r.monitorConnection(client) - - return r, nil -} - -func (r *remote) NewClient(ns string, b Backend) (Client, error) { - c := &client{ - stateDir: r.stateDir, - logger: r.logger.WithField("namespace", ns), - namespace: ns, - backend: b, - containers: make(map[string]*container), - } - - rclient, err := containerd.New(r.GRPC.Address, containerd.WithDefaultNamespace(ns)) - if err != nil { - return nil, err - } - c.remote = rclient - - go c.processEventStream(r.shutdownContext) - - r.Lock() - r.clients = append(r.clients, c) - r.Unlock() - return c, nil -} - -func (r *remote) Cleanup() { - if r.daemonPid != -1 { - r.shutdownCancel() - r.stopDaemon() - } - - // cleanup some files - os.Remove(filepath.Join(r.stateDir, pidFile)) - - r.platformCleanup() -} - -func (r *remote) getContainerdPid() (int, error) { - pidFile := filepath.Join(r.stateDir, pidFile) - f, err := os.OpenFile(pidFile, os.O_RDWR, 0600) - if err != nil { - if os.IsNotExist(err) { - return -1, nil - } - return -1, err - } - defer f.Close() - - b := make([]byte, 8) - n, err := f.Read(b) - if err != nil && err != io.EOF { - return -1, err - } - - if n > 0 { - pid, err := strconv.ParseUint(string(b[:n]), 10, 64) - if err != nil { - return -1, err - } - if system.IsProcessAlive(int(pid)) { - return int(pid), nil - } - } - - return -1, nil -} - -func (r *remote) getContainerdConfig() (string, error) { - path := filepath.Join(r.stateDir, configFile) - f, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600) - if err != nil { - return "", errors.Wrapf(err, "failed to open containerd config file at %s", path) - } - defer f.Close() - - enc := toml.NewEncoder(f) - if err = enc.Encode(r.Config); err != nil { - return "", errors.Wrapf(err, "failed to encode general config") - } - if err = enc.Encode(r.pluginConfs); err != nil { - return "", errors.Wrapf(err, "failed to encode plugin configs") - } - - return path, nil -} - -func (r *remote) startContainerd() error { - pid, err := r.getContainerdPid() - if err != nil { - return err - } - - if pid != -1 { - r.daemonPid = pid - logrus.WithField("pid", pid). - Infof("libcontainerd: %s is still running", binaryName) - return nil - } - - configFile, err := r.getContainerdConfig() - if err != nil { - return err - } - - args := []string{"--config", configFile} - cmd := exec.Command(binaryName, args...) - // redirect containerd logs to docker logs - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.SysProcAttr = containerdSysProcAttr() - // clear the NOTIFY_SOCKET from the env when starting containerd - cmd.Env = nil - for _, e := range os.Environ() { - if !strings.HasPrefix(e, "NOTIFY_SOCKET") { - cmd.Env = append(cmd.Env, e) - } - } - if err := cmd.Start(); err != nil { - return err - } - - r.daemonWaitCh = make(chan struct{}) - go func() { - // Reap our child when needed - if err := cmd.Wait(); err != nil { - r.logger.WithError(err).Errorf("containerd did not exit successfully") - } - close(r.daemonWaitCh) - }() - - r.daemonPid = cmd.Process.Pid - - err = ioutil.WriteFile(filepath.Join(r.stateDir, pidFile), []byte(fmt.Sprintf("%d", r.daemonPid)), 0660) - if err != nil { - system.KillProcess(r.daemonPid) - return errors.Wrap(err, "libcontainerd: failed to save daemon pid to disk") - } - - logrus.WithField("pid", r.daemonPid). - Infof("libcontainerd: started new %s process", binaryName) - - return nil -} - -func (r *remote) monitorConnection(monitor *containerd.Client) { - var transientFailureCount = 0 - - for { - select { - case <-r.shutdownContext.Done(): - r.logger.Info("stopping healthcheck following graceful shutdown") - monitor.Close() - return - case <-time.After(500 * time.Millisecond): - } - - ctx, cancel := context.WithTimeout(r.shutdownContext, healthCheckTimeout) - _, err := monitor.IsServing(ctx) - cancel() - if err == nil { - transientFailureCount = 0 - continue - } - - select { - case <-r.shutdownContext.Done(): - r.logger.Info("stopping healthcheck following graceful shutdown") - monitor.Close() - return - default: - } - - r.logger.WithError(err).WithField("binary", binaryName).Debug("daemon is not responding") - - if r.daemonPid == -1 { - continue - } - - transientFailureCount++ - if transientFailureCount < maxConnectionRetryCount || system.IsProcessAlive(r.daemonPid) { - continue - } - - transientFailureCount = 0 - if system.IsProcessAlive(r.daemonPid) { - r.logger.WithField("pid", r.daemonPid).Info("killing and restarting containerd") - // Try to get a stack trace - syscall.Kill(r.daemonPid, syscall.SIGUSR1) - <-time.After(100 * time.Millisecond) - system.KillProcess(r.daemonPid) - } - if r.daemonWaitCh != nil { - <-r.daemonWaitCh - } - - os.Remove(r.GRPC.Address) - if err := r.startContainerd(); err != nil { - r.logger.WithError(err).Error("failed restarting containerd") - continue - } - - if err := monitor.Reconnect(); err != nil { - r.logger.WithError(err).Error("failed connect to containerd") - continue - } - - var wg sync.WaitGroup - - for _, c := range r.clients { - wg.Add(1) - - go func(c *client) { - defer wg.Done() - c.logger.WithField("namespace", c.namespace).Debug("creating new containerd remote client") - if err := c.reconnect(); err != nil { - r.logger.WithError(err).Error("failed to connect to containerd") - // TODO: Better way to handle this? - // This *shouldn't* happen, but this could wind up where the daemon - // is not able to communicate with an eventually up containerd - } - }(c) - - wg.Wait() - } - } -} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_daemon_linux.go b/vendor/github.com/docker/docker/libcontainerd/remote_daemon_linux.go deleted file mode 100644 index dc59eb8c1..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/remote_daemon_linux.go +++ /dev/null @@ -1,61 +0,0 @@ -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import ( - "os" - "path/filepath" - "syscall" - "time" - - "github.com/containerd/containerd/defaults" - "github.com/docker/docker/pkg/system" -) - -const ( - sockFile = "docker-containerd.sock" - debugSockFile = "docker-containerd-debug.sock" -) - -func (r *remote) setDefaults() { - if r.GRPC.Address == "" { - r.GRPC.Address = filepath.Join(r.stateDir, sockFile) - } - if r.GRPC.MaxRecvMsgSize == 0 { - r.GRPC.MaxRecvMsgSize = defaults.DefaultMaxRecvMsgSize - } - if r.GRPC.MaxSendMsgSize == 0 { - r.GRPC.MaxSendMsgSize = defaults.DefaultMaxSendMsgSize - } - if r.Debug.Address == "" { - r.Debug.Address = filepath.Join(r.stateDir, debugSockFile) - } - if r.Debug.Level == "" { - r.Debug.Level = "info" - } - if r.OOMScore == 0 { - r.OOMScore = -999 - } - if r.snapshotter == "" { - r.snapshotter = "overlay" - } -} - -func (r *remote) stopDaemon() { - // Ask the daemon to quit - syscall.Kill(r.daemonPid, syscall.SIGTERM) - // Wait up to 15secs for it to stop - for i := time.Duration(0); i < shutdownTimeout; i += time.Second { - if !system.IsProcessAlive(r.daemonPid) { - break - } - time.Sleep(time.Second) - } - - if system.IsProcessAlive(r.daemonPid) { - r.logger.WithField("pid", r.daemonPid).Warn("daemon didn't stop within 15 secs, killing it") - syscall.Kill(r.daemonPid, syscall.SIGKILL) - } -} - -func (r *remote) platformCleanup() { - os.Remove(filepath.Join(r.stateDir, sockFile)) -} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_daemon_options.go b/vendor/github.com/docker/docker/libcontainerd/remote_daemon_options.go deleted file mode 100644 index d40e4c0c4..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/remote_daemon_options.go +++ /dev/null @@ -1,141 +0,0 @@ -// +build !windows - -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import "fmt" - -// WithRemoteAddr sets the external containerd socket to connect to. -func WithRemoteAddr(addr string) RemoteOption { - return rpcAddr(addr) -} - -type rpcAddr string - -func (a rpcAddr) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.GRPC.Address = string(a) - return nil - } - return fmt.Errorf("WithRemoteAddr option not supported for this remote") -} - -// WithRemoteAddrUser sets the uid and gid to create the RPC address with -func WithRemoteAddrUser(uid, gid int) RemoteOption { - return rpcUser{uid, gid} -} - -type rpcUser struct { - uid int - gid int -} - -func (u rpcUser) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.GRPC.UID = u.uid - remote.GRPC.GID = u.gid - return nil - } - return fmt.Errorf("WithRemoteAddr option not supported for this remote") -} - -// WithStartDaemon defines if libcontainerd should also run containerd daemon. -func WithStartDaemon(start bool) RemoteOption { - return startDaemon(start) -} - -type startDaemon bool - -func (s startDaemon) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.startDaemon = bool(s) - return nil - } - return fmt.Errorf("WithStartDaemon option not supported for this remote") -} - -// WithLogLevel defines which log level to starts containerd with. -// This only makes sense if WithStartDaemon() was set to true. -func WithLogLevel(lvl string) RemoteOption { - return logLevel(lvl) -} - -type logLevel string - -func (l logLevel) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.Debug.Level = string(l) - return nil - } - return fmt.Errorf("WithDebugLog option not supported for this remote") -} - -// WithDebugAddress defines at which location the debug GRPC connection -// should be made -func WithDebugAddress(addr string) RemoteOption { - return debugAddress(addr) -} - -type debugAddress string - -func (d debugAddress) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.Debug.Address = string(d) - return nil - } - return fmt.Errorf("WithDebugAddress option not supported for this remote") -} - -// WithMetricsAddress defines at which location the debug GRPC connection -// should be made -func WithMetricsAddress(addr string) RemoteOption { - return metricsAddress(addr) -} - -type metricsAddress string - -func (m metricsAddress) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.Metrics.Address = string(m) - return nil - } - return fmt.Errorf("WithMetricsAddress option not supported for this remote") -} - -// WithSnapshotter defines snapshotter driver should be used -func WithSnapshotter(name string) RemoteOption { - return snapshotter(name) -} - -type snapshotter string - -func (s snapshotter) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.snapshotter = string(s) - return nil - } - return fmt.Errorf("WithSnapshotter option not supported for this remote") -} - -// WithPlugin allow configuring a containerd plugin -// configuration values passed needs to be quoted if quotes are needed in -// the toml format. -func WithPlugin(name string, conf interface{}) RemoteOption { - return pluginConf{ - name: name, - conf: conf, - } -} - -type pluginConf struct { - // Name is the name of the plugin - name string - conf interface{} -} - -func (p pluginConf) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.pluginConfs.Plugins[p.name] = p.conf - return nil - } - return fmt.Errorf("WithPlugin option not supported for this remote") -} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_daemon_options_linux.go b/vendor/github.com/docker/docker/libcontainerd/remote_daemon_options_linux.go deleted file mode 100644 index a820fb389..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/remote_daemon_options_linux.go +++ /dev/null @@ -1,18 +0,0 @@ -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import "fmt" - -// WithOOMScore defines the oom_score_adj to set for the containerd process. -func WithOOMScore(score int) RemoteOption { - return oomScore(score) -} - -type oomScore int - -func (o oomScore) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.OOMScore = int(o) - return nil - } - return fmt.Errorf("WithOOMScore option not supported for this remote") -} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_daemon_windows.go b/vendor/github.com/docker/docker/libcontainerd/remote_daemon_windows.go deleted file mode 100644 index 89342d739..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/remote_daemon_windows.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build remote_daemon - -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import ( - "os" -) - -const ( - grpcPipeName = `\\.\pipe\docker-containerd-containerd` - debugPipeName = `\\.\pipe\docker-containerd-debug` -) - -func (r *remote) setDefaults() { - if r.GRPC.Address == "" { - r.GRPC.Address = grpcPipeName - } - if r.Debug.Address == "" { - r.Debug.Address = debugPipeName - } - if r.Debug.Level == "" { - r.Debug.Level = "info" - } - if r.snapshotter == "" { - r.snapshotter = "naive" // TODO(mlaventure): switch to "windows" once implemented - } -} - -func (r *remote) stopDaemon() { - p, err := os.FindProcess(r.daemonPid) - if err != nil { - r.logger.WithField("pid", r.daemonPid).Warn("could not find daemon process") - return - } - - if err = p.Kill(); err != nil { - r.logger.WithError(err).WithField("pid", r.daemonPid).Warn("could not kill daemon process") - return - } - - _, err = p.Wait() - if err != nil { - r.logger.WithError(err).WithField("pid", r.daemonPid).Warn("wait for daemon process") - return - } -} - -func (r *remote) platformCleanup() { - // Nothing to do -} diff --git a/vendor/github.com/docker/docker/libcontainerd/remote_local.go b/vendor/github.com/docker/docker/libcontainerd/remote_local.go deleted file mode 100644 index 8ea5198b8..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/remote_local.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build windows - -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import ( - "sync" - - "github.com/sirupsen/logrus" -) - -type remote struct { - sync.RWMutex - - logger *logrus.Entry - clients []*client - - // Options - rootDir string - stateDir string -} - -// New creates a fresh instance of libcontainerd remote. -func New(rootDir, stateDir string, options ...RemoteOption) (Remote, error) { - return &remote{ - logger: logrus.WithField("module", "libcontainerd"), - rootDir: rootDir, - stateDir: stateDir, - }, nil -} - -type client struct { - sync.Mutex - - rootDir string - stateDir string - backend Backend - logger *logrus.Entry - eventQ queue - containers map[string]*container -} - -func (r *remote) NewClient(ns string, b Backend) (Client, error) { - c := &client{ - rootDir: r.rootDir, - stateDir: r.stateDir, - backend: b, - logger: r.logger.WithField("namespace", ns), - containers: make(map[string]*container), - } - r.Lock() - r.clients = append(r.clients, c) - r.Unlock() - - return c, nil -} - -func (r *remote) Cleanup() { - // Nothing to do -} diff --git a/vendor/github.com/docker/docker/libcontainerd/types.go b/vendor/github.com/docker/docker/libcontainerd/types.go deleted file mode 100644 index 96ffbe267..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/types.go +++ /dev/null @@ -1,108 +0,0 @@ -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import ( - "context" - "time" - - "github.com/containerd/containerd" - "github.com/containerd/containerd/cio" - "github.com/opencontainers/runtime-spec/specs-go" -) - -// EventType represents a possible event from libcontainerd -type EventType string - -// Event constants used when reporting events -const ( - EventUnknown EventType = "unknown" - EventExit EventType = "exit" - EventOOM EventType = "oom" - EventCreate EventType = "create" - EventStart EventType = "start" - EventExecAdded EventType = "exec-added" - EventExecStarted EventType = "exec-started" - EventPaused EventType = "paused" - EventResumed EventType = "resumed" -) - -// Status represents the current status of a container -type Status string - -// Possible container statuses -const ( - // Running indicates the process is currently executing - StatusRunning Status = "running" - // Created indicates the process has been created within containerd but the - // user's defined process has not started - StatusCreated Status = "created" - // Stopped indicates that the process has ran and exited - StatusStopped Status = "stopped" - // Paused indicates that the process is currently paused - StatusPaused Status = "paused" - // Pausing indicates that the process is currently switching from a - // running state into a paused state - StatusPausing Status = "pausing" - // Unknown indicates that we could not determine the status from the runtime - StatusUnknown Status = "unknown" -) - -// Remote on Linux defines the accesspoint to the containerd grpc API. -// Remote on Windows is largely an unimplemented interface as there is -// no remote containerd. -type Remote interface { - // Client returns a new Client instance connected with given Backend. - NewClient(namespace string, backend Backend) (Client, error) - // Cleanup stops containerd if it was started by libcontainerd. - // Note this is not used on Windows as there is no remote containerd. - Cleanup() -} - -// RemoteOption allows to configure parameters of remotes. -// This is unused on Windows. -type RemoteOption interface { - Apply(Remote) error -} - -// EventInfo contains the event info -type EventInfo struct { - ContainerID string - ProcessID string - Pid uint32 - ExitCode uint32 - ExitedAt time.Time - OOMKilled bool - Error error -} - -// Backend defines callbacks that the client of the library needs to implement. -type Backend interface { - ProcessEvent(containerID string, event EventType, ei EventInfo) error -} - -// Client provides access to containerd features. -type Client interface { - Version(ctx context.Context) (containerd.Version, error) - - Restore(ctx context.Context, containerID string, attachStdio StdioCallback) (alive bool, pid int, err error) - - Create(ctx context.Context, containerID string, spec *specs.Spec, runtimeOptions interface{}) error - Start(ctx context.Context, containerID, checkpointDir string, withStdin bool, attachStdio StdioCallback) (pid int, err error) - SignalProcess(ctx context.Context, containerID, processID string, signal int) error - Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error) - ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error - CloseStdin(ctx context.Context, containerID, processID string) error - Pause(ctx context.Context, containerID string) error - Resume(ctx context.Context, containerID string) error - Stats(ctx context.Context, containerID string) (*Stats, error) - ListPids(ctx context.Context, containerID string) ([]uint32, error) - Summary(ctx context.Context, containerID string) ([]Summary, error) - DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) - Delete(ctx context.Context, containerID string) error - Status(ctx context.Context, containerID string) (Status, error) - - UpdateResources(ctx context.Context, containerID string, resources *Resources) error - CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error -} - -// StdioCallback is called to connect a container or process stdio. -type StdioCallback func(io *cio.DirectIO) (cio.IO, error) diff --git a/vendor/github.com/docker/docker/libcontainerd/types_linux.go b/vendor/github.com/docker/docker/libcontainerd/types_linux.go deleted file mode 100644 index 943382b9b..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/types_linux.go +++ /dev/null @@ -1,30 +0,0 @@ -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import ( - "time" - - "github.com/containerd/cgroups" - "github.com/opencontainers/runtime-spec/specs-go" -) - -// Summary is not used on linux -type Summary struct{} - -// Stats holds metrics properties as returned by containerd -type Stats struct { - Read time.Time - Metrics *cgroups.Metrics -} - -func interfaceToStats(read time.Time, v interface{}) *Stats { - return &Stats{ - Metrics: v.(*cgroups.Metrics), - Read: read, - } -} - -// Resources defines updatable container resource values. TODO: it must match containerd upcoming API -type Resources specs.LinuxResources - -// Checkpoints contains the details of a checkpoint -type Checkpoints struct{} diff --git a/vendor/github.com/docker/docker/libcontainerd/types_windows.go b/vendor/github.com/docker/docker/libcontainerd/types_windows.go deleted file mode 100644 index 9041a2e8d..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/types_windows.go +++ /dev/null @@ -1,42 +0,0 @@ -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import ( - "time" - - "github.com/Microsoft/hcsshim" - opengcs "github.com/Microsoft/opengcs/client" -) - -// Summary contains a ProcessList item from HCS to support `top` -type Summary hcsshim.ProcessListItem - -// Stats contains statistics from HCS -type Stats struct { - Read time.Time - HCSStats *hcsshim.Statistics -} - -func interfaceToStats(read time.Time, v interface{}) *Stats { - return &Stats{ - HCSStats: v.(*hcsshim.Statistics), - Read: read, - } -} - -// Resources defines updatable container resource values. -type Resources struct{} - -// LCOWOption is a CreateOption required for LCOW configuration -type LCOWOption struct { - Config *opengcs.Config -} - -// Checkpoint holds the details of a checkpoint (not supported in windows) -type Checkpoint struct { - Name string -} - -// Checkpoints contains the details of a checkpoint -type Checkpoints struct { - Checkpoints []*Checkpoint -} diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_linux.go b/vendor/github.com/docker/docker/libcontainerd/utils_linux.go deleted file mode 100644 index ce17d1963..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/utils_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import "syscall" - -// containerdSysProcAttr returns the SysProcAttr to use when exec'ing -// containerd -func containerdSysProcAttr() *syscall.SysProcAttr { - return &syscall.SysProcAttr{ - Setsid: true, - Pdeathsig: syscall.SIGKILL, - } -} diff --git a/vendor/github.com/docker/docker/libcontainerd/utils_windows.go b/vendor/github.com/docker/docker/libcontainerd/utils_windows.go deleted file mode 100644 index fbf243d4f..000000000 --- a/vendor/github.com/docker/docker/libcontainerd/utils_windows.go +++ /dev/null @@ -1,46 +0,0 @@ -package libcontainerd // import "github.com/docker/docker/libcontainerd" - -import ( - "strings" - - "syscall" - - opengcs "github.com/Microsoft/opengcs/client" -) - -// setupEnvironmentVariables converts a string array of environment variables -// into a map as required by the HCS. Source array is in format [v1=k1] [v2=k2] etc. -func setupEnvironmentVariables(a []string) map[string]string { - r := make(map[string]string) - for _, s := range a { - arr := strings.SplitN(s, "=", 2) - if len(arr) == 2 { - r[arr[0]] = arr[1] - } - } - return r -} - -// Apply for the LCOW option is a no-op. -func (s *LCOWOption) Apply(interface{}) error { - return nil -} - -// debugGCS is a dirty hack for debugging for Linux Utility VMs. It simply -// runs a bunch of commands inside the UVM, but seriously aides in advanced debugging. -func (c *container) debugGCS() { - if c == nil || c.isWindows || c.hcsContainer == nil { - return - } - cfg := opengcs.Config{ - Uvm: c.hcsContainer, - UvmTimeoutSeconds: 600, - } - cfg.DebugGCS() -} - -// containerdSysProcAttr returns the SysProcAttr to use when exec'ing -// containerd -func containerdSysProcAttr() *syscall.SysProcAttr { - return nil -} diff --git a/vendor/github.com/docker/docker/migrate/v1/migratev1.go b/vendor/github.com/docker/docker/migrate/v1/migratev1.go deleted file mode 100644 index 9cd759a3b..000000000 --- a/vendor/github.com/docker/docker/migrate/v1/migratev1.go +++ /dev/null @@ -1,501 +0,0 @@ -package v1 // import "github.com/docker/docker/migrate/v1" - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strconv" - "sync" - "time" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/image" - imagev1 "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - refstore "github.com/docker/docker/reference" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -type graphIDRegistrar interface { - RegisterByGraphID(string, layer.ChainID, layer.DiffID, string, int64) (layer.Layer, error) - Release(layer.Layer) ([]layer.Metadata, error) -} - -type graphIDMounter interface { - CreateRWLayerByGraphID(string, string, layer.ChainID) error -} - -type checksumCalculator interface { - ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID layer.DiffID, size int64, err error) -} - -const ( - graphDirName = "graph" - tarDataFileName = "tar-data.json.gz" - migrationFileName = ".migration-v1-images.json" - migrationTagsFileName = ".migration-v1-tags" - migrationDiffIDFileName = ".migration-diffid" - migrationSizeFileName = ".migration-size" - migrationTarDataFileName = ".migration-tardata" - containersDirName = "containers" - configFileNameLegacy = "config.json" - configFileName = "config.v2.json" - repositoriesFilePrefixLegacy = "repositories-" -) - -var ( - errUnsupported = errors.New("migration is not supported") -) - -// Migrate takes an old graph directory and transforms the metadata into the -// new format. -func Migrate(root, driverName string, ls layer.Store, is image.Store, rs refstore.Store, ms metadata.Store) error { - graphDir := filepath.Join(root, graphDirName) - if _, err := os.Lstat(graphDir); os.IsNotExist(err) { - return nil - } - - mappings, err := restoreMappings(root) - if err != nil { - return err - } - - if cc, ok := ls.(checksumCalculator); ok { - CalculateLayerChecksums(root, cc, mappings) - } - - if registrar, ok := ls.(graphIDRegistrar); !ok { - return errUnsupported - } else if err := migrateImages(root, registrar, is, ms, mappings); err != nil { - return err - } - - err = saveMappings(root, mappings) - if err != nil { - return err - } - - if mounter, ok := ls.(graphIDMounter); !ok { - return errUnsupported - } else if err := migrateContainers(root, mounter, is, mappings); err != nil { - return err - } - - return migrateRefs(root, driverName, rs, mappings) -} - -// CalculateLayerChecksums walks an old graph directory and calculates checksums -// for each layer. These checksums are later used for migration. -func CalculateLayerChecksums(root string, ls checksumCalculator, mappings map[string]image.ID) { - graphDir := filepath.Join(root, graphDirName) - // spawn some extra workers also for maximum performance because the process is bounded by both cpu and io - workers := runtime.NumCPU() * 3 - workQueue := make(chan string, workers) - - wg := sync.WaitGroup{} - - for i := 0; i < workers; i++ { - wg.Add(1) - go func() { - for id := range workQueue { - start := time.Now() - if err := calculateLayerChecksum(graphDir, id, ls); err != nil { - logrus.Errorf("could not calculate checksum for %q, %q", id, err) - } - elapsed := time.Since(start) - logrus.Debugf("layer %s took %.2f seconds", id, elapsed.Seconds()) - } - wg.Done() - }() - } - - dir, err := ioutil.ReadDir(graphDir) - if err != nil { - logrus.Errorf("could not read directory %q", graphDir) - return - } - for _, v := range dir { - v1ID := v.Name() - if err := imagev1.ValidateID(v1ID); err != nil { - continue - } - if _, ok := mappings[v1ID]; ok { // support old migrations without helper files - continue - } - workQueue <- v1ID - } - close(workQueue) - wg.Wait() -} - -func calculateLayerChecksum(graphDir, id string, ls checksumCalculator) error { - diffIDFile := filepath.Join(graphDir, id, migrationDiffIDFileName) - if _, err := os.Lstat(diffIDFile); err == nil { - return nil - } else if !os.IsNotExist(err) { - return err - } - - parent, err := getParent(filepath.Join(graphDir, id)) - if err != nil { - return err - } - - diffID, size, err := ls.ChecksumForGraphID(id, parent, filepath.Join(graphDir, id, tarDataFileName), filepath.Join(graphDir, id, migrationTarDataFileName)) - if err != nil { - return err - } - - if err := ioutil.WriteFile(filepath.Join(graphDir, id, migrationSizeFileName), []byte(strconv.Itoa(int(size))), 0600); err != nil { - return err - } - - if err := ioutils.AtomicWriteFile(filepath.Join(graphDir, id, migrationDiffIDFileName), []byte(diffID), 0600); err != nil { - return err - } - - logrus.Infof("calculated checksum for layer %s: %s", id, diffID) - return nil -} - -func restoreMappings(root string) (map[string]image.ID, error) { - mappings := make(map[string]image.ID) - - mfile := filepath.Join(root, migrationFileName) - f, err := os.Open(mfile) - if err != nil && !os.IsNotExist(err) { - return nil, err - } else if err == nil { - err := json.NewDecoder(f).Decode(&mappings) - if err != nil { - f.Close() - return nil, err - } - f.Close() - } - - return mappings, nil -} - -func saveMappings(root string, mappings map[string]image.ID) error { - mfile := filepath.Join(root, migrationFileName) - f, err := os.OpenFile(mfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - defer f.Close() - return json.NewEncoder(f).Encode(mappings) -} - -func migrateImages(root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) error { - graphDir := filepath.Join(root, graphDirName) - - dir, err := ioutil.ReadDir(graphDir) - if err != nil { - return err - } - for _, v := range dir { - v1ID := v.Name() - if err := imagev1.ValidateID(v1ID); err != nil { - continue - } - if _, exists := mappings[v1ID]; exists { - continue - } - if err := migrateImage(v1ID, root, ls, is, ms, mappings); err != nil { - continue - } - } - - return nil -} - -func migrateContainers(root string, ls graphIDMounter, is image.Store, imageMappings map[string]image.ID) error { - containersDir := filepath.Join(root, containersDirName) - dir, err := ioutil.ReadDir(containersDir) - if err != nil { - return err - } - for _, v := range dir { - id := v.Name() - - if _, err := os.Stat(filepath.Join(containersDir, id, configFileName)); err == nil { - continue - } - - containerJSON, err := ioutil.ReadFile(filepath.Join(containersDir, id, configFileNameLegacy)) - if err != nil { - logrus.Errorf("migrate container error: %v", err) - continue - } - - var c map[string]*json.RawMessage - if err := json.Unmarshal(containerJSON, &c); err != nil { - logrus.Errorf("migrate container error: %v", err) - continue - } - - imageStrJSON, ok := c["Image"] - if !ok { - return fmt.Errorf("invalid container configuration for %v", id) - } - - var image string - if err := json.Unmarshal([]byte(*imageStrJSON), &image); err != nil { - logrus.Errorf("migrate container error: %v", err) - continue - } - - imageID, ok := imageMappings[image] - if !ok { - logrus.Errorf("image not migrated %v", imageID) // non-fatal error - continue - } - - c["Image"] = rawJSON(imageID) - - containerJSON, err = json.Marshal(c) - if err != nil { - return err - } - - if err := ioutil.WriteFile(filepath.Join(containersDir, id, configFileName), containerJSON, 0600); err != nil { - return err - } - - img, err := is.Get(imageID) - if err != nil { - return err - } - - if err := ls.CreateRWLayerByGraphID(id, id, img.RootFS.ChainID()); err != nil { - logrus.Errorf("migrate container error: %v", err) - continue - } - - logrus.Infof("migrated container %s to point to %s", id, imageID) - - } - return nil -} - -type refAdder interface { - AddTag(ref reference.Named, id digest.Digest, force bool) error - AddDigest(ref reference.Canonical, id digest.Digest, force bool) error -} - -func migrateRefs(root, driverName string, rs refAdder, mappings map[string]image.ID) error { - migrationFile := filepath.Join(root, migrationTagsFileName) - if _, err := os.Lstat(migrationFile); !os.IsNotExist(err) { - return err - } - - type repositories struct { - Repositories map[string]map[string]string - } - - var repos repositories - - f, err := os.Open(filepath.Join(root, repositoriesFilePrefixLegacy+driverName)) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - defer f.Close() - if err := json.NewDecoder(f).Decode(&repos); err != nil { - return err - } - - for name, repo := range repos.Repositories { - for tag, id := range repo { - if strongID, exists := mappings[id]; exists { - ref, err := reference.ParseNormalizedNamed(name) - if err != nil { - logrus.Errorf("migrate tags: invalid name %q, %q", name, err) - continue - } - if !reference.IsNameOnly(ref) { - logrus.Errorf("migrate tags: invalid name %q, unexpected tag or digest", name) - continue - } - if dgst, err := digest.Parse(tag); err == nil { - canonical, err := reference.WithDigest(reference.TrimNamed(ref), dgst) - if err != nil { - logrus.Errorf("migrate tags: invalid digest %q, %q", dgst, err) - continue - } - if err := rs.AddDigest(canonical, strongID.Digest(), false); err != nil { - logrus.Errorf("can't migrate digest %q for %q, err: %q", reference.FamiliarString(ref), strongID, err) - } - } else { - tagRef, err := reference.WithTag(ref, tag) - if err != nil { - logrus.Errorf("migrate tags: invalid tag %q, %q", tag, err) - continue - } - if err := rs.AddTag(tagRef, strongID.Digest(), false); err != nil { - logrus.Errorf("can't migrate tag %q for %q, err: %q", reference.FamiliarString(ref), strongID, err) - } - } - logrus.Infof("migrated tag %s:%s to point to %s", name, tag, strongID) - } - } - } - - mf, err := os.Create(migrationFile) - if err != nil { - return err - } - mf.Close() - - return nil -} - -func getParent(confDir string) (string, error) { - jsonFile := filepath.Join(confDir, "json") - imageJSON, err := ioutil.ReadFile(jsonFile) - if err != nil { - return "", err - } - var parent struct { - Parent string - ParentID digest.Digest `json:"parent_id"` - } - if err := json.Unmarshal(imageJSON, &parent); err != nil { - return "", err - } - if parent.Parent == "" && parent.ParentID != "" { // v1.9 - parent.Parent = parent.ParentID.Hex() - } - // compatibilityID for parent - parentCompatibilityID, err := ioutil.ReadFile(filepath.Join(confDir, "parent")) - if err == nil && len(parentCompatibilityID) > 0 { - parent.Parent = string(parentCompatibilityID) - } - return parent.Parent, nil -} - -func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) { - defer func() { - if err != nil { - logrus.Errorf("migration failed for %v, err: %v", id, err) - } - }() - - parent, err := getParent(filepath.Join(root, graphDirName, id)) - if err != nil { - return err - } - - var parentID image.ID - if parent != "" { - var exists bool - if parentID, exists = mappings[parent]; !exists { - if err := migrateImage(parent, root, ls, is, ms, mappings); err != nil { - // todo: fail or allow broken chains? - return err - } - parentID = mappings[parent] - } - } - - rootFS := image.NewRootFS() - var history []image.History - - if parentID != "" { - parentImg, err := is.Get(parentID) - if err != nil { - return err - } - - rootFS = parentImg.RootFS - history = parentImg.History - } - - diffIDData, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationDiffIDFileName)) - if err != nil { - return err - } - diffID, err := digest.Parse(string(diffIDData)) - if err != nil { - return err - } - - sizeStr, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationSizeFileName)) - if err != nil { - return err - } - size, err := strconv.ParseInt(string(sizeStr), 10, 64) - if err != nil { - return err - } - - layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), layer.DiffID(diffID), filepath.Join(root, graphDirName, id, migrationTarDataFileName), size) - if err != nil { - return err - } - logrus.Infof("migrated layer %s to %s", id, layer.DiffID()) - - jsonFile := filepath.Join(root, graphDirName, id, "json") - imageJSON, err := ioutil.ReadFile(jsonFile) - if err != nil { - return err - } - - h, err := imagev1.HistoryFromConfig(imageJSON, false) - if err != nil { - return err - } - history = append(history, h) - - rootFS.Append(layer.DiffID()) - - config, err := imagev1.MakeConfigFromV1Config(imageJSON, rootFS, history) - if err != nil { - return err - } - strongID, err := is.Create(config) - if err != nil { - return err - } - logrus.Infof("migrated image %s to %s", id, strongID) - - if parentID != "" { - if err := is.SetParent(strongID, parentID); err != nil { - return err - } - } - - checksum, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "checksum")) - if err == nil { // best effort - dgst, err := digest.Parse(string(checksum)) - if err == nil { - V2MetadataService := metadata.NewV2MetadataService(ms) - V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst}) - } - } - _, err = ls.Release(layer) - if err != nil { - return err - } - - mappings[id] = strongID - return -} - -func rawJSON(value interface{}) *json.RawMessage { - jsonval, err := json.Marshal(value) - if err != nil { - return nil - } - return (*json.RawMessage)(&jsonval) -} diff --git a/vendor/github.com/docker/docker/oci/defaults.go b/vendor/github.com/docker/docker/oci/defaults.go deleted file mode 100644 index 4145412dd..000000000 --- a/vendor/github.com/docker/docker/oci/defaults.go +++ /dev/null @@ -1,211 +0,0 @@ -package oci // import "github.com/docker/docker/oci" - -import ( - "os" - "runtime" - - "github.com/opencontainers/runtime-spec/specs-go" -) - -func iPtr(i int64) *int64 { return &i } -func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } -func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } - -func defaultCapabilities() []string { - return []string{ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE", - } -} - -// DefaultSpec returns the default spec used by docker for the current Platform -func DefaultSpec() specs.Spec { - return DefaultOSSpec(runtime.GOOS) -} - -// DefaultOSSpec returns the spec for a given OS -func DefaultOSSpec(osName string) specs.Spec { - if osName == "windows" { - return DefaultWindowsSpec() - } - return DefaultLinuxSpec() -} - -// DefaultWindowsSpec create a default spec for running Windows containers -func DefaultWindowsSpec() specs.Spec { - return specs.Spec{ - Version: specs.Version, - Windows: &specs.Windows{}, - Process: &specs.Process{}, - Root: &specs.Root{}, - } -} - -// DefaultLinuxSpec create a default spec for running Linux containers -func DefaultLinuxSpec() specs.Spec { - s := specs.Spec{ - Version: specs.Version, - Process: &specs.Process{ - Capabilities: &specs.LinuxCapabilities{ - Bounding: defaultCapabilities(), - Permitted: defaultCapabilities(), - Inheritable: defaultCapabilities(), - Effective: defaultCapabilities(), - }, - }, - Root: &specs.Root{}, - } - s.Mounts = []specs.Mount{ - { - Destination: "/proc", - Type: "proc", - Source: "proc", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - { - Destination: "/dev", - Type: "tmpfs", - Source: "tmpfs", - Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, - }, - { - Destination: "/dev/pts", - Type: "devpts", - Source: "devpts", - Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, - }, - { - Destination: "/sys", - Type: "sysfs", - Source: "sysfs", - Options: []string{"nosuid", "noexec", "nodev", "ro"}, - }, - { - Destination: "/sys/fs/cgroup", - Type: "cgroup", - Source: "cgroup", - Options: []string{"ro", "nosuid", "noexec", "nodev"}, - }, - { - Destination: "/dev/mqueue", - Type: "mqueue", - Source: "mqueue", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - { - Destination: "/dev/shm", - Type: "tmpfs", - Source: "shm", - Options: []string{"nosuid", "noexec", "nodev", "mode=1777"}, - }, - } - - s.Linux = &specs.Linux{ - MaskedPaths: []string{ - "/proc/kcore", - "/proc/keys", - "/proc/latency_stats", - "/proc/timer_list", - "/proc/timer_stats", - "/proc/sched_debug", - "/proc/scsi", - "/sys/firmware", - }, - ReadonlyPaths: []string{ - "/proc/asound", - "/proc/bus", - "/proc/fs", - "/proc/irq", - "/proc/sys", - "/proc/sysrq-trigger", - }, - Namespaces: []specs.LinuxNamespace{ - {Type: "mount"}, - {Type: "network"}, - {Type: "uts"}, - {Type: "pid"}, - {Type: "ipc"}, - }, - // Devices implicitly contains the following devices: - // null, zero, full, random, urandom, tty, console, and ptmx. - // ptmx is a bind mount or symlink of the container's ptmx. - // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices - Devices: []specs.LinuxDevice{}, - Resources: &specs.LinuxResources{ - Devices: []specs.LinuxDeviceCgroup{ - { - Allow: false, - Access: "rwm", - }, - { - Allow: true, - Type: "c", - Major: iPtr(1), - Minor: iPtr(5), - Access: "rwm", - }, - { - Allow: true, - Type: "c", - Major: iPtr(1), - Minor: iPtr(3), - Access: "rwm", - }, - { - Allow: true, - Type: "c", - Major: iPtr(1), - Minor: iPtr(9), - Access: "rwm", - }, - { - Allow: true, - Type: "c", - Major: iPtr(1), - Minor: iPtr(8), - Access: "rwm", - }, - { - Allow: true, - Type: "c", - Major: iPtr(5), - Minor: iPtr(0), - Access: "rwm", - }, - { - Allow: true, - Type: "c", - Major: iPtr(5), - Minor: iPtr(1), - Access: "rwm", - }, - { - Allow: false, - Type: "c", - Major: iPtr(10), - Minor: iPtr(229), - Access: "rwm", - }, - }, - }, - } - - // For LCOW support, populate a blank Windows spec - if runtime.GOOS == "windows" { - s.Windows = &specs.Windows{} - } - - return s -} diff --git a/vendor/github.com/docker/docker/oci/devices_linux.go b/vendor/github.com/docker/docker/oci/devices_linux.go deleted file mode 100644 index 46d4e1d32..000000000 --- a/vendor/github.com/docker/docker/oci/devices_linux.go +++ /dev/null @@ -1,86 +0,0 @@ -package oci // import "github.com/docker/docker/oci" - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/devices" - "github.com/opencontainers/runtime-spec/specs-go" -) - -// Device transforms a libcontainer configs.Device to a specs.LinuxDevice object. -func Device(d *configs.Device) specs.LinuxDevice { - return specs.LinuxDevice{ - Type: string(d.Type), - Path: d.Path, - Major: d.Major, - Minor: d.Minor, - FileMode: fmPtr(int64(d.FileMode)), - UID: u32Ptr(int64(d.Uid)), - GID: u32Ptr(int64(d.Gid)), - } -} - -func deviceCgroup(d *configs.Device) specs.LinuxDeviceCgroup { - t := string(d.Type) - return specs.LinuxDeviceCgroup{ - Allow: true, - Type: t, - Major: &d.Major, - Minor: &d.Minor, - Access: d.Permissions, - } -} - -// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. -func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { - resolvedPathOnHost := pathOnHost - - // check if it is a symbolic link - if src, e := os.Lstat(pathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { - if linkedPathOnHost, e := filepath.EvalSymlinks(pathOnHost); e == nil { - resolvedPathOnHost = linkedPathOnHost - } - } - - device, err := devices.DeviceFromPath(resolvedPathOnHost, cgroupPermissions) - // if there was no error, return the device - if err == nil { - device.Path = pathInContainer - return append(devs, Device(device)), append(devPermissions, deviceCgroup(device)), nil - } - - // if the device is not a device node - // try to see if it's a directory holding many devices - if err == devices.ErrNotADevice { - - // check if it is a directory - if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { - - // mount the internal devices recursively - filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { - childDevice, e := devices.DeviceFromPath(dpath, cgroupPermissions) - if e != nil { - // ignore the device - return nil - } - - // add the device to userSpecified devices - childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, pathInContainer, 1) - devs = append(devs, Device(childDevice)) - devPermissions = append(devPermissions, deviceCgroup(childDevice)) - - return nil - }) - } - } - - if len(devs) > 0 { - return devs, devPermissions, nil - } - - return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", pathOnHost, err) -} diff --git a/vendor/github.com/docker/docker/oci/devices_unsupported.go b/vendor/github.com/docker/docker/oci/devices_unsupported.go deleted file mode 100644 index af6dd3bda..000000000 --- a/vendor/github.com/docker/docker/oci/devices_unsupported.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !linux - -package oci // import "github.com/docker/docker/oci" - -import ( - "errors" - - "github.com/opencontainers/runc/libcontainer/configs" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -// Device transforms a libcontainer configs.Device to a specs.Device object. -// Not implemented -func Device(d *configs.Device) specs.LinuxDevice { return specs.LinuxDevice{} } - -// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. -// Not implemented -func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { - return nil, nil, errors.New("oci/devices: unsupported platform") -} diff --git a/vendor/github.com/docker/docker/oci/namespaces.go b/vendor/github.com/docker/docker/oci/namespaces.go deleted file mode 100644 index 5a2d8f208..000000000 --- a/vendor/github.com/docker/docker/oci/namespaces.go +++ /dev/null @@ -1,13 +0,0 @@ -package oci // import "github.com/docker/docker/oci" - -import "github.com/opencontainers/runtime-spec/specs-go" - -// RemoveNamespace removes the `nsType` namespace from OCI spec `s` -func RemoveNamespace(s *specs.Spec, nsType specs.LinuxNamespaceType) { - for i, n := range s.Linux.Namespaces { - if n.Type == nsType { - s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...) - return - } - } -} diff --git a/vendor/github.com/docker/docker/opts/address_pools.go b/vendor/github.com/docker/docker/opts/address_pools.go deleted file mode 100644 index 9b27a6285..000000000 --- a/vendor/github.com/docker/docker/opts/address_pools.go +++ /dev/null @@ -1,84 +0,0 @@ -package opts - -import ( - "encoding/csv" - "encoding/json" - "fmt" - "strconv" - "strings" - - types "github.com/docker/libnetwork/ipamutils" -) - -// PoolsOpt is a Value type for parsing the default address pools definitions -type PoolsOpt struct { - values []*types.NetworkToSplit -} - -// UnmarshalJSON fills values structure info from JSON input -func (p *PoolsOpt) UnmarshalJSON(raw []byte) error { - return json.Unmarshal(raw, &(p.values)) -} - -// Set predefined pools -func (p *PoolsOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - poolsDef := types.NetworkToSplit{} - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - key := strings.ToLower(parts[0]) - value := strings.ToLower(parts[1]) - - switch key { - case "base": - poolsDef.Base = value - case "size": - size, err := strconv.Atoi(value) - if err != nil { - return fmt.Errorf("invalid size value: %q (must be integer): %v", value, err) - } - poolsDef.Size = size - default: - return fmt.Errorf("unexpected key '%s' in '%s'", key, field) - } - } - - p.values = append(p.values, &poolsDef) - - return nil -} - -// Type returns the type of this option -func (p *PoolsOpt) Type() string { - return "pool-options" -} - -// String returns a string repr of this option -func (p *PoolsOpt) String() string { - var pools []string - for _, pool := range p.values { - repr := fmt.Sprintf("%s %d", pool.Base, pool.Size) - pools = append(pools, repr) - } - return strings.Join(pools, ", ") -} - -// Value returns the mounts -func (p *PoolsOpt) Value() []*types.NetworkToSplit { - return p.values -} - -// Name returns the flag name of this option -func (p *PoolsOpt) Name() string { - return "default-address-pools" -} diff --git a/vendor/github.com/docker/docker/opts/env.go b/vendor/github.com/docker/docker/opts/env.go deleted file mode 100644 index f6e5e9074..000000000 --- a/vendor/github.com/docker/docker/opts/env.go +++ /dev/null @@ -1,48 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "fmt" - "os" - "runtime" - "strings" - - "github.com/pkg/errors" -) - -// ValidateEnv validates an environment variable and returns it. -// If no value is specified, it returns the current value using os.Getenv. -// -// As on ParseEnvFile and related to #16585, environment variable names -// are not validate what so ever, it's up to application inside docker -// to validate them or not. -// -// The only validation here is to check if name is empty, per #25099 -func ValidateEnv(val string) (string, error) { - arr := strings.Split(val, "=") - if arr[0] == "" { - return "", errors.Errorf("invalid environment variable: %s", val) - } - if len(arr) > 1 { - return val, nil - } - if !doesEnvExist(val) { - return val, nil - } - return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil -} - -func doesEnvExist(name string) bool { - for _, entry := range os.Environ() { - parts := strings.SplitN(entry, "=", 2) - if runtime.GOOS == "windows" { - // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent. - if strings.EqualFold(parts[0], name) { - return true - } - } - if parts[0] == name { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go deleted file mode 100644 index 2adf4211d..000000000 --- a/vendor/github.com/docker/docker/opts/hosts.go +++ /dev/null @@ -1,165 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "fmt" - "net" - "net/url" - "strconv" - "strings" -) - -var ( - // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp:// - // These are the IANA registered port numbers for use with Docker - // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker - DefaultHTTPPort = 2375 // Default HTTP Port - // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled - DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port - // DefaultUnixSocket Path for the unix socket. - // Docker daemon by default always listens on the default unix socket - DefaultUnixSocket = "/var/run/docker.sock" - // DefaultTCPHost constant defines the default host string used by docker on Windows - DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) - // DefaultTLSHost constant defines the default host string used by docker for TLS sockets - DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) - // DefaultNamedPipe defines the default named pipe used by docker on Windows - DefaultNamedPipe = `//./pipe/docker_engine` -) - -// ValidateHost validates that the specified string is a valid host and returns it. -func ValidateHost(val string) (string, error) { - host := strings.TrimSpace(val) - // The empty string means default and is not handled by parseDaemonHost - if host != "" { - _, err := parseDaemonHost(host) - if err != nil { - return val, err - } - } - // Note: unlike most flag validators, we don't return the mutated value here - // we need to know what the user entered later (using ParseHost) to adjust for TLS - return val, nil -} - -// ParseHost and set defaults for a Daemon host string -func ParseHost(defaultToTLS bool, val string) (string, error) { - host := strings.TrimSpace(val) - if host == "" { - if defaultToTLS { - host = DefaultTLSHost - } else { - host = DefaultHost - } - } else { - var err error - host, err = parseDaemonHost(host) - if err != nil { - return val, err - } - } - return host, nil -} - -// parseDaemonHost parses the specified address and returns an address that will be used as the host. -// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. -func parseDaemonHost(addr string) (string, error) { - addrParts := strings.SplitN(addr, "://", 2) - if len(addrParts) == 1 && addrParts[0] != "" { - addrParts = []string{"tcp", addrParts[0]} - } - - switch addrParts[0] { - case "tcp": - return ParseTCPAddr(addrParts[1], DefaultTCPHost) - case "unix": - return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) - case "npipe": - return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) - case "fd": - return addr, nil - default: - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } -} - -// parseSimpleProtoAddr parses and validates that the specified address is a valid -// socket address for simple protocols like unix and npipe. It returns a formatted -// socket address, either using the address parsed from addr, or the contents of -// defaultAddr if addr is a blank string. -func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, proto+"://") - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) - } - if addr == "" { - addr = defaultAddr - } - return fmt.Sprintf("%s://%s", proto, addr), nil -} - -// ParseTCPAddr parses and validates that the specified address is a valid TCP -// address. It returns a formatted TCP address, either using the address parsed -// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. -// tryAddr is expected to have already been Trim()'d -// defaultAddr must be in the full `tcp://host:port` form -func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { - if tryAddr == "" || tryAddr == "tcp://" { - return defaultAddr, nil - } - addr := strings.TrimPrefix(tryAddr, "tcp://") - if strings.Contains(addr, "://") || addr == "" { - return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) - } - - defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") - defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) - if err != nil { - return "", err - } - // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but - // not 1.4. See https://github.com/golang/go/issues/12200 and - // https://github.com/golang/go/issues/6530. - if strings.HasSuffix(addr, "]:") { - addr += defaultPort - } - - u, err := url.Parse("tcp://" + addr) - if err != nil { - return "", err - } - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - // try port addition once - host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) - } - if err != nil { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - if host == "" { - host = defaultHost - } - if port == "" { - port = defaultPort - } - p, err := strconv.Atoi(port) - if err != nil && p == 0 { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil -} - -// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. -// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). -func ValidateExtraHost(val string) (string, error) { - // allow for IPv6 addresses in extra hosts by only splitting on first ":" - arr := strings.SplitN(val, ":", 2) - if len(arr) != 2 || len(arr[0]) == 0 { - return "", fmt.Errorf("bad format for add-host: %q", val) - } - if _, err := ValidateIPAddress(arr[1]); err != nil { - return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) - } - return val, nil -} diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go deleted file mode 100644 index 9d5bb6456..000000000 --- a/vendor/github.com/docker/docker/opts/hosts_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package opts // import "github.com/docker/docker/opts" - -import "fmt" - -// DefaultHost constant defines the default host string used by docker on other hosts than Windows -var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go deleted file mode 100644 index 906eba53e..000000000 --- a/vendor/github.com/docker/docker/opts/hosts_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -// DefaultHost constant defines the default host string used by docker on Windows -var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go deleted file mode 100644 index cfbff3a9f..000000000 --- a/vendor/github.com/docker/docker/opts/ip.go +++ /dev/null @@ -1,47 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "fmt" - "net" -) - -// IPOpt holds an IP. It is used to store values from CLI flags. -type IPOpt struct { - *net.IP -} - -// NewIPOpt creates a new IPOpt from a reference net.IP and a -// string representation of an IP. If the string is not a valid -// IP it will fallback to the specified reference. -func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { - o := &IPOpt{ - IP: ref, - } - o.Set(defaultVal) - return o -} - -// Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parsable as an IP address it returns an error. -func (o *IPOpt) Set(val string) error { - ip := net.ParseIP(val) - if ip == nil { - return fmt.Errorf("%s is not an ip address", val) - } - *o.IP = ip - return nil -} - -// String returns the IP address stored in the IPOpt. If stored IP is a -// nil pointer, it returns an empty string. -func (o *IPOpt) String() string { - if *o.IP == nil { - return "" - } - return o.IP.String() -} - -// Type returns the type of the option -func (o *IPOpt) Type() string { - return "ip" -} diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go deleted file mode 100644 index de8aacb80..000000000 --- a/vendor/github.com/docker/docker/opts/opts.go +++ /dev/null @@ -1,337 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "fmt" - "net" - "path" - "regexp" - "strings" - - "github.com/docker/go-units" -) - -var ( - alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) - domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) -) - -// ListOpts holds a list of values and a validation function. -type ListOpts struct { - values *[]string - validator ValidatorFctType -} - -// NewListOpts creates a new ListOpts with the specified validator. -func NewListOpts(validator ValidatorFctType) ListOpts { - var values []string - return *NewListOptsRef(&values, validator) -} - -// NewListOptsRef creates a new ListOpts with the specified values and validator. -func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { - return &ListOpts{ - values: values, - validator: validator, - } -} - -func (opts *ListOpts) String() string { - if len(*opts.values) == 0 { - return "" - } - return fmt.Sprintf("%v", *opts.values) -} - -// Set validates if needed the input value and adds it to the -// internal slice. -func (opts *ListOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - *opts.values = append(*opts.values, value) - return nil -} - -// Delete removes the specified element from the slice. -func (opts *ListOpts) Delete(key string) { - for i, k := range *opts.values { - if k == key { - *opts.values = append((*opts.values)[:i], (*opts.values)[i+1:]...) - return - } - } -} - -// GetMap returns the content of values in a map in order to avoid -// duplicates. -func (opts *ListOpts) GetMap() map[string]struct{} { - ret := make(map[string]struct{}) - for _, k := range *opts.values { - ret[k] = struct{}{} - } - return ret -} - -// GetAll returns the values of slice. -func (opts *ListOpts) GetAll() []string { - return *opts.values -} - -// GetAllOrEmpty returns the values of the slice -// or an empty slice when there are no values. -func (opts *ListOpts) GetAllOrEmpty() []string { - v := *opts.values - if v == nil { - return make([]string, 0) - } - return v -} - -// Get checks the existence of the specified key. -func (opts *ListOpts) Get(key string) bool { - for _, k := range *opts.values { - if k == key { - return true - } - } - return false -} - -// Len returns the amount of element in the slice. -func (opts *ListOpts) Len() int { - return len(*opts.values) -} - -// Type returns a string name for this Option type -func (opts *ListOpts) Type() string { - return "list" -} - -// WithValidator returns the ListOpts with validator set. -func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts { - opts.validator = validator - return opts -} - -// NamedOption is an interface that list and map options -// with names implement. -type NamedOption interface { - Name() string -} - -// NamedListOpts is a ListOpts with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -type NamedListOpts struct { - name string - ListOpts -} - -var _ NamedOption = &NamedListOpts{} - -// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. -func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { - return &NamedListOpts{ - name: name, - ListOpts: *NewListOptsRef(values, validator), - } -} - -// Name returns the name of the NamedListOpts in the configuration. -func (o *NamedListOpts) Name() string { - return o.name -} - -// MapOpts holds a map of values and a validation function. -type MapOpts struct { - values map[string]string - validator ValidatorFctType -} - -// Set validates if needed the input value and add it to the -// internal map, by splitting on '='. -func (opts *MapOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - vals := strings.SplitN(value, "=", 2) - if len(vals) == 1 { - (opts.values)[vals[0]] = "" - } else { - (opts.values)[vals[0]] = vals[1] - } - return nil -} - -// GetAll returns the values of MapOpts as a map. -func (opts *MapOpts) GetAll() map[string]string { - return opts.values -} - -func (opts *MapOpts) String() string { - return fmt.Sprintf("%v", opts.values) -} - -// Type returns a string name for this Option type -func (opts *MapOpts) Type() string { - return "map" -} - -// NewMapOpts creates a new MapOpts with the specified map of values and a validator. -func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { - if values == nil { - values = make(map[string]string) - } - return &MapOpts{ - values: values, - validator: validator, - } -} - -// NamedMapOpts is a MapOpts struct with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -type NamedMapOpts struct { - name string - MapOpts -} - -var _ NamedOption = &NamedMapOpts{} - -// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. -func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { - return &NamedMapOpts{ - name: name, - MapOpts: *NewMapOpts(values, validator), - } -} - -// Name returns the name of the NamedMapOpts in the configuration. -func (o *NamedMapOpts) Name() string { - return o.name -} - -// ValidatorFctType defines a validator function that returns a validated string and/or an error. -type ValidatorFctType func(val string) (string, error) - -// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error -type ValidatorFctListType func(val string) ([]string, error) - -// ValidateIPAddress validates an Ip address. -func ValidateIPAddress(val string) (string, error) { - var ip = net.ParseIP(strings.TrimSpace(val)) - if ip != nil { - return ip.String(), nil - } - return "", fmt.Errorf("%s is not an ip address", val) -} - -// ValidateDNSSearch validates domain for resolvconf search configuration. -// A zero length domain is represented by a dot (.). -func ValidateDNSSearch(val string) (string, error) { - if val = strings.Trim(val, " "); val == "." { - return val, nil - } - return validateDomain(val) -} - -func validateDomain(val string) (string, error) { - if alphaRegexp.FindString(val) == "" { - return "", fmt.Errorf("%s is not a valid domain", val) - } - ns := domainRegexp.FindSubmatch([]byte(val)) - if len(ns) > 0 && len(ns[1]) < 255 { - return string(ns[1]), nil - } - return "", fmt.Errorf("%s is not a valid domain", val) -} - -// ValidateLabel validates that the specified string is a valid label, and returns it. -// Labels are in the form on key=value. -func ValidateLabel(val string) (string, error) { - if strings.Count(val, "=") < 1 { - return "", fmt.Errorf("bad attribute format: %s", val) - } - return val, nil -} - -// ValidateSingleGenericResource validates that a single entry in the -// generic resource list is valid. -// i.e 'GPU=UID1' is valid however 'GPU:UID1' or 'UID1' isn't -func ValidateSingleGenericResource(val string) (string, error) { - if strings.Count(val, "=") < 1 { - return "", fmt.Errorf("invalid node-generic-resource format `%s` expected `name=value`", val) - } - return val, nil -} - -// ParseLink parses and validates the specified string as a link format (name:alias) -func ParseLink(val string) (string, string, error) { - if val == "" { - return "", "", fmt.Errorf("empty string specified for links") - } - arr := strings.Split(val, ":") - if len(arr) > 2 { - return "", "", fmt.Errorf("bad format for links: %s", val) - } - if len(arr) == 1 { - return val, val, nil - } - // This is kept because we can actually get a HostConfig with links - // from an already created container and the format is not `foo:bar` - // but `/foo:/c1/bar` - if strings.HasPrefix(arr[0], "/") { - _, alias := path.Split(arr[1]) - return arr[0][1:], alias, nil - } - return arr[0], arr[1], nil -} - -// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc) -type MemBytes int64 - -// String returns the string format of the human readable memory bytes -func (m *MemBytes) String() string { - // NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not. - // We return "0" in case value is 0 here so that the default value is hidden. - // (Sometimes "default 0 B" is actually misleading) - if m.Value() != 0 { - return units.BytesSize(float64(m.Value())) - } - return "0" -} - -// Set sets the value of the MemBytes by passing a string -func (m *MemBytes) Set(value string) error { - val, err := units.RAMInBytes(value) - *m = MemBytes(val) - return err -} - -// Type returns the type -func (m *MemBytes) Type() string { - return "bytes" -} - -// Value returns the value in int64 -func (m *MemBytes) Value() int64 { - return int64(*m) -} - -// UnmarshalJSON is the customized unmarshaler for MemBytes -func (m *MemBytes) UnmarshalJSON(s []byte) error { - if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' { - return fmt.Errorf("invalid size: %q", s) - } - val, err := units.RAMInBytes(string(s[1 : len(s)-1])) - *m = MemBytes(val) - return err -} diff --git a/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go deleted file mode 100644 index 0c32367cb..000000000 --- a/vendor/github.com/docker/docker/opts/opts_unix.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !windows - -package opts // import "github.com/docker/docker/opts" - -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 -const DefaultHTTPHost = "localhost" diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go deleted file mode 100644 index 0e1b6c6d1..000000000 --- a/vendor/github.com/docker/docker/opts/opts_windows.go +++ /dev/null @@ -1,56 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. -// @jhowardmsft, @swernli. -// -// On Windows, this mitigates a problem with the default options of running -// a docker client against a local docker daemon on TP5. -// -// What was found that if the default host is "localhost", even if the client -// (and daemon as this is local) is not physically on a network, and the DNS -// cache is flushed (ipconfig /flushdns), then the client will pause for -// exactly one second when connecting to the daemon for calls. For example -// using docker run windowsservercore cmd, the CLI will send a create followed -// by an attach. You see the delay between the attach finishing and the attach -// being seen by the daemon. -// -// Here's some daemon debug logs with additional debug spew put in. The -// AfterWriteJSON log is the very last thing the daemon does as part of the -// create call. The POST /attach is the second CLI call. Notice the second -// time gap. -// -// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" -// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" -// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." -// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... -// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." -// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." -// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" -// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" -// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" -// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" -// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" -// ... 1 second gap here.... -// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" -// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" -// -// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change -// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, -// the Windows networking stack is supposed to resolve "localhost" internally, -// without hitting DNS, or even reading the hosts file (which is why localhost -// is commented out in the hosts file on Windows). -// -// We have validated that working around this using the actual IPv4 localhost -// address does not cause the delay. -// -// This does not occur with the docker client built with 1.4.3 on the same -// Windows build, regardless of whether the daemon is built using 1.5.1 -// or 1.4.3. It does not occur on Linux. We also verified we see the same thing -// on a cross-compiled Windows binary (from Linux). -// -// Final note: This is a mitigation, not a 'real' fix. It is still susceptible -// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' -// explicitly. - -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 -const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/docker/docker/opts/quotedstring.go b/vendor/github.com/docker/docker/opts/quotedstring.go deleted file mode 100644 index 6c889070e..000000000 --- a/vendor/github.com/docker/docker/opts/quotedstring.go +++ /dev/null @@ -1,37 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -// QuotedString is a string that may have extra quotes around the value. The -// quotes are stripped from the value. -type QuotedString struct { - value *string -} - -// Set sets a new value -func (s *QuotedString) Set(val string) error { - *s.value = trimQuotes(val) - return nil -} - -// Type returns the type of the value -func (s *QuotedString) Type() string { - return "string" -} - -func (s *QuotedString) String() string { - return *s.value -} - -func trimQuotes(value string) string { - lastIndex := len(value) - 1 - for _, char := range []byte{'\'', '"'} { - if value[0] == char && value[lastIndex] == char { - return value[1:lastIndex] - } - } - return value -} - -// NewQuotedString returns a new quoted string option -func NewQuotedString(value *string) *QuotedString { - return &QuotedString{value: value} -} diff --git a/vendor/github.com/docker/docker/opts/runtime.go b/vendor/github.com/docker/docker/opts/runtime.go deleted file mode 100644 index 4b9babf0a..000000000 --- a/vendor/github.com/docker/docker/opts/runtime.go +++ /dev/null @@ -1,79 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/types" -) - -// RuntimeOpt defines a map of Runtimes -type RuntimeOpt struct { - name string - stockRuntimeName string - values *map[string]types.Runtime -} - -// NewNamedRuntimeOpt creates a new RuntimeOpt -func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt { - if ref == nil { - ref = &map[string]types.Runtime{} - } - return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime} -} - -// Name returns the name of the NamedListOpts in the configuration. -func (o *RuntimeOpt) Name() string { - return o.name -} - -// Set validates and updates the list of Runtimes -func (o *RuntimeOpt) Set(val string) error { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid runtime argument: %s", val) - } - - parts[0] = strings.TrimSpace(parts[0]) - parts[1] = strings.TrimSpace(parts[1]) - if parts[0] == "" || parts[1] == "" { - return fmt.Errorf("invalid runtime argument: %s", val) - } - - parts[0] = strings.ToLower(parts[0]) - if parts[0] == o.stockRuntimeName { - return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName) - } - - if _, ok := (*o.values)[parts[0]]; ok { - return fmt.Errorf("runtime '%s' was already defined", parts[0]) - } - - (*o.values)[parts[0]] = types.Runtime{Path: parts[1]} - - return nil -} - -// String returns Runtime values as a string. -func (o *RuntimeOpt) String() string { - var out []string - for k := range *o.values { - out = append(out, k) - } - - return fmt.Sprintf("%v", out) -} - -// GetMap returns a map of Runtimes (name: path) -func (o *RuntimeOpt) GetMap() map[string]types.Runtime { - if o.values != nil { - return *o.values - } - - return map[string]types.Runtime{} -} - -// Type returns the type of the option -func (o *RuntimeOpt) Type() string { - return "runtime" -} diff --git a/vendor/github.com/docker/docker/opts/ulimit.go b/vendor/github.com/docker/docker/opts/ulimit.go deleted file mode 100644 index 0e2a36236..000000000 --- a/vendor/github.com/docker/docker/opts/ulimit.go +++ /dev/null @@ -1,81 +0,0 @@ -package opts // import "github.com/docker/docker/opts" - -import ( - "fmt" - - "github.com/docker/go-units" -) - -// UlimitOpt defines a map of Ulimits -type UlimitOpt struct { - values *map[string]*units.Ulimit -} - -// NewUlimitOpt creates a new UlimitOpt -func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { - if ref == nil { - ref = &map[string]*units.Ulimit{} - } - return &UlimitOpt{ref} -} - -// Set validates a Ulimit and sets its name as a key in UlimitOpt -func (o *UlimitOpt) Set(val string) error { - l, err := units.ParseUlimit(val) - if err != nil { - return err - } - - (*o.values)[l.Name] = l - - return nil -} - -// String returns Ulimit values as a string. -func (o *UlimitOpt) String() string { - var out []string - for _, v := range *o.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to Ulimits. -func (o *UlimitOpt) GetList() []*units.Ulimit { - var ulimits []*units.Ulimit - for _, v := range *o.values { - ulimits = append(ulimits, v) - } - - return ulimits -} - -// Type returns the option type -func (o *UlimitOpt) Type() string { - return "ulimit" -} - -// NamedUlimitOpt defines a named map of Ulimits -type NamedUlimitOpt struct { - name string - UlimitOpt -} - -var _ NamedOption = &NamedUlimitOpt{} - -// NewNamedUlimitOpt creates a new NamedUlimitOpt -func NewNamedUlimitOpt(name string, ref *map[string]*units.Ulimit) *NamedUlimitOpt { - if ref == nil { - ref = &map[string]*units.Ulimit{} - } - return &NamedUlimitOpt{ - name: name, - UlimitOpt: *NewUlimitOpt(ref), - } -} - -// Name returns the option name -func (o *NamedUlimitOpt) Name() string { - return o.name -} diff --git a/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go b/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go deleted file mode 100644 index 9c12e8db8..000000000 --- a/vendor/github.com/docker/docker/pkg/aaparser/aaparser.go +++ /dev/null @@ -1,89 +0,0 @@ -// Package aaparser is a convenience package interacting with `apparmor_parser`. -package aaparser // import "github.com/docker/docker/pkg/aaparser" - -import ( - "fmt" - "os/exec" - "strconv" - "strings" -) - -const ( - binary = "apparmor_parser" -) - -// GetVersion returns the major and minor version of apparmor_parser. -func GetVersion() (int, error) { - output, err := cmd("", "--version") - if err != nil { - return -1, err - } - - return parseVersion(output) -} - -// LoadProfile runs `apparmor_parser -Kr` on a specified apparmor profile to -// replace the profile. The `-K` is necessary to make sure that apparmor_parser -// doesn't try to write to a read-only filesystem. -func LoadProfile(profilePath string) error { - _, err := cmd("", "-Kr", profilePath) - return err -} - -// cmd runs `apparmor_parser` with the passed arguments. -func cmd(dir string, arg ...string) (string, error) { - c := exec.Command(binary, arg...) - c.Dir = dir - - output, err := c.CombinedOutput() - if err != nil { - return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), output, err) - } - - return string(output), nil -} - -// parseVersion takes the output from `apparmor_parser --version` and returns -// a representation of the {major, minor, patch} version as a single number of -// the form MMmmPPP {major, minor, patch}. -func parseVersion(output string) (int, error) { - // output is in the form of the following: - // AppArmor parser version 2.9.1 - // Copyright (C) 1999-2008 Novell Inc. - // Copyright 2009-2012 Canonical Ltd. - - lines := strings.SplitN(output, "\n", 2) - words := strings.Split(lines[0], " ") - version := words[len(words)-1] - - // split by major minor version - v := strings.Split(version, ".") - if len(v) == 0 || len(v) > 3 { - return -1, fmt.Errorf("parsing version failed for output: `%s`", output) - } - - // Default the versions to 0. - var majorVersion, minorVersion, patchLevel int - - majorVersion, err := strconv.Atoi(v[0]) - if err != nil { - return -1, err - } - - if len(v) > 1 { - minorVersion, err = strconv.Atoi(v[1]) - if err != nil { - return -1, err - } - } - if len(v) > 2 { - patchLevel, err = strconv.Atoi(v[2]) - if err != nil { - return -1, err - } - } - - // major*10^5 + minor*10^3 + patch*10^0 - numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel - return numericVersion, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go deleted file mode 100644 index daddebded..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ /dev/null @@ -1,1291 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" - "context" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "syscall" - "time" - - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -var unpigzPath string - -func init() { - if path, err := exec.LookPath("unpigz"); err != nil { - logrus.Debug("unpigz binary not found in PATH, falling back to go gzip library") - } else { - logrus.Debugf("Using unpigz binary found at path %s", path) - unpigzPath = path - } -} - -type ( - // Compression is the state represents if compressed or not. - Compression int - // WhiteoutFormat is the format of whiteouts unpacked - WhiteoutFormat int - - // TarOptions wraps the tar options. - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - NoLchown bool - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - ChownOpts *idtools.IDPair - IncludeSourceDir bool - // WhiteoutFormat is the expected on disk format for whiteout files. - // This format will be converted to the standard format on pack - // and from the standard format on unpack. - WhiteoutFormat WhiteoutFormat - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - NoOverwriteDirNonDir bool - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - InUserNS bool - } -) - -// Archiver implements the Archiver interface and allows the reuse of most utility functions of -// this package with a pluggable Untar function. Also, to facilitate the passing of specific id -// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. -type Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - IDMappingsVar *idtools.IDMappings -} - -// NewDefaultArchiver returns a new Archiver without any IDMappings -func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar, IDMappingsVar: &idtools.IDMappings{}} -} - -// breakoutError is used to differentiate errors related to breaking out -// When testing archive breakout in the unit tests, this error is expected -// in order for the test to pass. -type breakoutError error - -const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Bzip2 is bzip2 compression algorithm. - Bzip2 - // Gzip is gzip compression algorithm. - Gzip - // Xz is xz compression algorithm. - Xz -) - -const ( - // AUFSWhiteoutFormat is the default format for whiteouts - AUFSWhiteoutFormat WhiteoutFormat = iota - // OverlayWhiteoutFormat formats whiteout according to the overlay - // standard. - OverlayWhiteoutFormat -) - -const ( - modeISDIR = 040000 // Directory - modeISFIFO = 010000 // FIFO - modeISREG = 0100000 // Regular file - modeISLNK = 0120000 // Symbolic link - modeISBLK = 060000 // Block special file - modeISCHR = 020000 // Character special file - modeISSOCK = 0140000 // Socket -) - -// IsArchivePath checks if the (possibly compressed) file at the given path -// starts with a tar file header. -func IsArchivePath(path string) bool { - file, err := os.Open(path) - if err != nil { - return false - } - defer file.Close() - rdr, err := DecompressStream(file) - if err != nil { - return false - } - defer rdr.Close() - r := tar.NewReader(rdr) - _, err = r.Next() - return err == nil -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(source []byte) Compression { - for compression, m := range map[Compression][]byte{ - Bzip2: {0x42, 0x5A, 0x68}, - Gzip: {0x1F, 0x8B, 0x08}, - Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, - } { - if len(source) < len(m) { - logrus.Debug("Len too short") - continue - } - if bytes.Equal(m, source[:len(m)]) { - return compression - } - } - return Uncompressed -} - -func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { - args := []string{"xz", "-d", "-c", "-q"} - - return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) -} - -func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { - if unpigzPath == "" { - return gzip.NewReader(buf) - } - - disablePigzEnv := os.Getenv("MOBY_DISABLE_PIGZ") - if disablePigzEnv != "" { - if disablePigz, err := strconv.ParseBool(disablePigzEnv); err != nil { - return nil, err - } else if disablePigz { - return gzip.NewReader(buf) - } - } - - return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) -} - -func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { - return ioutils.NewReadCloserWrapper(readBuf, func() error { - cancel() - return readBuf.Close() - }) -} - -// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - p := pools.BufioReader32KPool - buf := p.Get(archive) - bs, err := buf.Peek(10) - if err != nil && err != io.EOF { - // Note: we'll ignore any io.EOF error because there are some odd - // cases where the layer.tar file will be empty (zero bytes) and - // that results in an io.EOF from the Peek() call. So, in those - // cases we'll just treat it as a non-compressed stream and - // that means just create an empty layer. - // See Issue 18170 - return nil, err - } - - compression := DetectCompression(bs) - switch compression { - case Uncompressed: - readBufWrapper := p.NewReadCloserWrapper(buf, buf) - return readBufWrapper, nil - case Gzip: - ctx, cancel := context.WithCancel(context.Background()) - - gzReader, err := gzDecompress(ctx, buf) - if err != nil { - cancel() - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) - return wrapReadCloser(readBufWrapper, cancel), nil - case Bzip2: - bz2Reader := bzip2.NewReader(buf) - readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) - return readBufWrapper, nil - case Xz: - ctx, cancel := context.WithCancel(context.Background()) - - xzReader, err := xzDecompress(ctx, buf) - if err != nil { - cancel() - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) - return wrapReadCloser(readBufWrapper, cancel), nil - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) - switch compression { - case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil - case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to -// modify the contents or header of an entry in the archive. If the file already -// exists in the archive the TarModifierFunc will be called with the Header and -// a reader which will return the files content. If the file does not exist both -// header and content will be nil. -type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) - -// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the -// tar stream are modified if they match any of the keys in mods. -func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { - pipeReader, pipeWriter := io.Pipe() - - go func() { - tarReader := tar.NewReader(inputTarStream) - tarWriter := tar.NewWriter(pipeWriter) - defer inputTarStream.Close() - defer tarWriter.Close() - - modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { - header, data, err := modifier(name, original, tarReader) - switch { - case err != nil: - return err - case header == nil: - return nil - } - - header.Name = name - header.Size = int64(len(data)) - if err := tarWriter.WriteHeader(header); err != nil { - return err - } - if len(data) != 0 { - if _, err := tarWriter.Write(data); err != nil { - return err - } - } - return nil - } - - var err error - var originalHeader *tar.Header - for { - originalHeader, err = tarReader.Next() - if err == io.EOF { - break - } - if err != nil { - pipeWriter.CloseWithError(err) - return - } - - modifier, ok := mods[originalHeader.Name] - if !ok { - // No modifiers for this file, copy the header and data - if err := tarWriter.WriteHeader(originalHeader); err != nil { - pipeWriter.CloseWithError(err) - return - } - if _, err := pools.Copy(tarWriter, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - continue - } - delete(mods, originalHeader.Name) - - if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - // Apply the modifiers that haven't matched any files in the archive - for name, modifier := range mods { - if err := modify(name, nil, modifier, nil); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - pipeWriter.Close() - - }() - return pipeReader -} - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - } - return "" -} - -// FileInfoHeader creates a populated Header from fi. -// Compared to archive pkg this function fills in more information. -// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), -// which have been deleted since Go 1.9 archive/tar. -func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return nil, err - } - hdr.Format = tar.FormatPAX - hdr.ModTime = hdr.ModTime.Truncate(time.Second) - hdr.AccessTime = time.Time{} - hdr.ChangeTime = time.Time{} - hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) - name, err = canonicalTarName(name, fi.IsDir()) - if err != nil { - return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) - } - hdr.Name = name - if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { - return nil, err - } - return hdr, nil -} - -// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar -// https://github.com/golang/go/commit/66b5a2f -func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { - fm := fi.Mode() - switch { - case fm.IsRegular(): - mode |= modeISREG - case fi.IsDir(): - mode |= modeISDIR - case fm&os.ModeSymlink != 0: - mode |= modeISLNK - case fm&os.ModeDevice != 0: - if fm&os.ModeCharDevice != 0 { - mode |= modeISCHR - } else { - mode |= modeISBLK - } - case fm&os.ModeNamedPipe != 0: - mode |= modeISFIFO - case fm&os.ModeSocket != 0: - mode |= modeISSOCK - } - return mode -} - -// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem -// to a tar header -func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } - return nil -} - -type tarWhiteoutConverter interface { - ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) - ConvertRead(*tar.Header, string) (bool, error) -} - -type tarAppender struct { - TarWriter *tar.Writer - Buffer *bufio.Writer - - // for hardlink mapping - SeenFiles map[uint64]string - IDMappings *idtools.IDMappings - ChownOpts *idtools.IDPair - - // For packing and unpacking whiteout files in the - // non standard format. The whiteout files defined - // by the AUFS standard are used as the tar whiteout - // standard. - WhiteoutConverter tarWhiteoutConverter -} - -func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { - return &tarAppender{ - SeenFiles: make(map[uint64]string), - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - IDMappings: idMapping, - ChownOpts: chownOpts, - } -} - -// canonicalTarName provides a platform-independent and consistent posix-style -//path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) (string, error) { - name, err := CanonicalTarNameForPath(name) - if err != nil { - return "", err - } - - // suffix with '/' for directories - if isDir && !strings.HasSuffix(name, "/") { - name += "/" - } - return name, nil -} - -// addTarFile adds to the tar archive a file from `path` as `name` -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - var link string - if fi.Mode()&os.ModeSymlink != 0 { - var err error - link, err = os.Readlink(path) - if err != nil { - return err - } - } - - hdr, err := FileInfoHeader(name, fi, link) - if err != nil { - return err - } - if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { - return err - } - - // if it's not a directory and has more than 1 link, - // it's hard linked, so set the type flag accordingly - if !fi.IsDir() && hasHardlinks(fi) { - inode, err := getInodeFromStat(fi.Sys()) - if err != nil { - return err - } - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - //check whether the file is overlayfs whiteout - //if yes, skip re-mapping container ID mappings. - isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 - - //handle re-mapping container ID mappings back to host ID mappings before - //writing tar headers/files. We skip whiteout files because they were written - //by the kernel and already have proper ownership relative to the host - if !isOverlayWhiteout && - !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && - !ta.IDMappings.Empty() { - fileIDPair, err := getFileUIDGID(fi.Sys()) - if err != nil { - return err - } - hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) - if err != nil { - return err - } - } - - // explicitly override with ChownOpts - if ta.ChownOpts != nil { - hdr.Uid = ta.ChownOpts.UID - hdr.Gid = ta.ChownOpts.GID - } - - if ta.WhiteoutConverter != nil { - wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) - if err != nil { - return err - } - - // If a new whiteout file exists, write original hdr, then - // replace hdr with wo to be written after. Whiteouts should - // always be written after the original. Note the original - // hdr may have been updated to be a whiteout with returning - // a whiteout header - if wo != nil { - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - return fmt.Errorf("tar: cannot use whiteout for non-empty file") - } - hdr = wo - } - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - // We use system.OpenSequential to ensure we use sequential file - // access on Windows to avoid depleting the standby list. - // On Linux, this equates to a regular os.Open. - file, err := system.OpenSequential(path) - if err != nil { - return err - } - - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = io.Copy(ta.Buffer, file) - file.Close() - if err != nil { - return err - } - err = ta.Buffer.Flush() - if err != nil { - return err - } - } - - return nil -} - -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error { - // hdr.Mode is in linux format, which we can use for sycalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - case tar.TypeReg, tar.TypeRegA: - // Source is regular file. We use system.OpenFileSequential to use sequential - // file access to avoid depleting the standby list on Windows. - // On Linux, this equates to a regular os.OpenFile - file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) - if err != nil { - return err - } - if _, err := io.Copy(file, reader); err != nil { - file.Close() - return err - } - file.Close() - - case tar.TypeBlock, tar.TypeChar: - if inUserns { // cannot create devices in a userns - return nil - } - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeFifo: - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeLink: - targetPath := filepath.Join(extractDir, hdr.Linkname) - // check for hardlink breakout - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) - } - if err := os.Link(targetPath, path); err != nil { - return err - } - - case tar.TypeSymlink: - // path -> hdr.Linkname = targetPath - // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) - - // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because - // that symlink would first have to be created, which would be caught earlier, at this very check: - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - logrus.Debug("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) - } - - // Lchown is not supported on Windows. - if Lchown && runtime.GOOS != "windows" { - if chownOpts == nil { - chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} - } - if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - return err - } - } - - var errors []string - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - if err == syscall.ENOTSUP { - // We ignore errors here because not all graphdrivers support - // xattrs *cough* old versions of AUFS *cough*. However only - // ENOTSUP should be emitted in that case, otherwise we still - // bail. - errors = append(errors, err.Error()) - continue - } - return err - } - - } - - if len(errors) > 0 { - logrus.WithFields(logrus.Fields{ - "errors": errors, - }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") - } - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if err := handleLChmod(hdr, path, hdrInfo); err != nil { - return err - } - - aTime := hdr.AccessTime - if aTime.Before(hdr.ModTime) { - // Last access time should never be before last modified time. - aTime = hdr.ModTime - } - - // system.Chtimes doesn't support a NOFOLLOW flag atm - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } else { - ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} - if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } - return nil -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -func Tar(path string, compression Compression) (io.ReadCloser, error) { - return TarWithOptions(path, &TarOptions{Compression: compression}) -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - - // Fix the source path to work with long path names. This is a no-op - // on platforms other than Windows. - srcPath = fixVolumePathPrefix(srcPath) - - pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) - if err != nil { - return nil, err - } - - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - go func() { - ta := newTarAppender( - idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), - compressWriter, - options.ChownOpts, - ) - ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Errorf("Can't close tar writer: %s", err) - } - if err := compressWriter.Close(); err != nil { - logrus.Errorf("Can't close compress writer: %s", err) - } - if err := pipeWriter.Close(); err != nil { - logrus.Errorf("Can't close pipe writer: %s", err) - } - }() - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - stat, err := os.Lstat(srcPath) - if err != nil { - return - } - - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") - } - - dir, base := SplitPathDirEntry(srcPath) - srcPath = dir - options.IncludeFiles = []string{base} - } - - if len(options.IncludeFiles) == 0 { - options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - for _, include := range options.IncludeFiles { - rebaseName := options.RebaseNames[include] - - walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil - } - - if options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - skip, err = pm.Matches(relFilePath) - if err != nil { - logrus.Errorf("Error matching %s: %v", relFilePath, err) - return err - } - } - - if skip { - // If we want to skip this file and its a directory - // then we should first check to see if there's an - // excludes pattern (e.g. !dir/file) that starts with this - // dir. If so then we can't skip this dir. - - // Its not a dir then so we can just return/skip. - if !f.IsDir() { - return nil - } - - // No exceptions (!...) in patterns so just skip dir - if !pm.Exclusions() { - return filepath.SkipDir - } - - dirSlash := relFilePath + string(filepath.Separator) - - for _, pat := range pm.Patterns() { - if !pat.Exclusion() { - continue - } - if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { - // found a match - so can't skip this dir - return nil - } - } - - // No matching exclusion dir so just skip dir - return filepath.SkipDir - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource. - if rebaseName != "" { - var replacement string - if rebaseName != string(filepath.Separator) { - // Special case the root directory to replace with an - // empty string instead so that we don't end up with - // double slashes in the paths. - replacement = rebaseName - } - - relFilePath = strings.Replace(relFilePath, include, replacement, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Errorf("Can't add file %s to tar: %s", filePath, err) - // if pipe is broken, stop writing tar stream to it - if err == io.ErrClosedPipe { - return err - } - } - return nil - }) - } - }() - - return pipeReader, nil -} - -// Unpack unpacks the decompressedArchive to dest with options. -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - tr := tar.NewReader(decompressedArchive) - trBuf := pools.BufioReader32KPool.Get(nil) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - rootIDs := idMappings.RootPair() - whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) - - // Iterate through the files in the archive. -loop: - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // Normalize name, for safety and for a simple is-root check - // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: - // This keeps "..\" as-is, but normalizes "\..\" to "\". - hdr.Name = filepath.Clean(hdr.Name) - - for _, exclude := range options.ExcludePatterns { - if strings.HasPrefix(hdr.Name, exclude) { - continue loop - } - } - - // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in - // the filepath format for the OS on which the daemon is running. Hence - // the check for a slash-suffix MUST be done in an OS-agnostic way. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) - if err != nil { - return err - } - } - } - - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return err - } - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - - // If path exits we almost always just want to remove and replace it - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing directory with a non-directory from the archive. - return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) - } - - if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing non-directory with a directory from the archive. - return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) - } - - if fi.IsDir() && hdr.Name == "." { - continue - } - - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - trBuf.Reset(tr) - - if err := remapIDs(idMappings, hdr); err != nil { - return err - } - - if whiteoutConverter != nil { - writeFile, err := whiteoutConverter.ConvertRead(hdr, path) - if err != nil { - return err - } - if !writeFile { - continue - } - } - - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return err - } - } - return nil -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - dest = filepath.Clean(dest) - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - r := tarArchive - if decompress { - decompressedArchive, err := DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return Unpack(r, dest, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func (archiver *Archiver) TarUntar(src, dst string) error { - logrus.Debugf("TarUntar(%s %s)", src, dst) - archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) - if err != nil { - return err - } - defer archive.Close() - options := &TarOptions{ - UIDMaps: archiver.IDMappingsVar.UIDs(), - GIDMaps: archiver.IDMappingsVar.GIDs(), - } - return archiver.Untar(archive, dst, options) -} - -// UntarPath untar a file from path to a destination, src is the source tar file path. -func (archiver *Archiver) UntarPath(src, dst string) error { - archive, err := os.Open(src) - if err != nil { - return err - } - defer archive.Close() - options := &TarOptions{ - UIDMaps: archiver.IDMappingsVar.UIDs(), - GIDMaps: archiver.IDMappingsVar.GIDs(), - } - return archiver.Untar(archive, dst, options) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func (archiver *Archiver) CopyWithTar(src, dst string) error { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return archiver.CopyFileWithTar(src, dst) - } - - // if this Archiver is set up with ID mapping we need to create - // the new destination directory with the remapped root UID/GID pair - // as owner - rootIDs := archiver.IDMappingsVar.RootPair() - // Create dst, copy src's content into it - logrus.Debugf("Creating dest directory: %s", dst) - if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { - return err - } - logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) - return archiver.TarUntar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) - srcSt, err := os.Stat(src) - if err != nil { - return err - } - - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - - // Clean up the trailing slash. This must be done in an operating - // system specific manner. - if dst[len(dst)-1] == os.PathSeparator { - dst = filepath.Join(dst, filepath.Base(src)) - } - // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { - return err - } - - r, w := io.Pipe() - errC := make(chan error, 1) - - go func() { - defer close(errC) - - errC <- func() error { - defer w.Close() - - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - hdr, err := tar.FileInfoHeader(srcSt, "") - if err != nil { - return err - } - hdr.Format = tar.FormatPAX - hdr.ModTime = hdr.ModTime.Truncate(time.Second) - hdr.AccessTime = time.Time{} - hdr.ChangeTime = time.Time{} - hdr.Name = filepath.Base(dst) - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil { - return err - } - - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }() - }() - defer func() { - if er := <-errC; err == nil && er != nil { - err = er - } - }() - - err = archiver.Untar(r, filepath.Dir(dst), nil) - if err != nil { - r.CloseWithError(err) - } - return err -} - -// IDMappings returns the IDMappings of the archiver. -func (archiver *Archiver) IDMappings() *idtools.IDMappings { - return archiver.IDMappingsVar -} - -func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { - ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) - hdr.Uid, hdr.Gid = ids.UID, ids.GID - return err -} - -// cmdStream executes a command, and returns its stdout as a stream. -// If the command fails to run or doesn't complete successfully, an error -// will be returned, including anything written on stderr. -func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { - cmd.Stdin = input - pipeR, pipeW := io.Pipe() - cmd.Stdout = pipeW - var errBuf bytes.Buffer - cmd.Stderr = &errBuf - - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, err - } - - // Copy stdout to the returned pipe - go func() { - if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) - } else { - pipeW.Close() - } - }() - - return pipeR, nil -} - -// NewTempArchive reads the content of src into a temporary file, and returns the contents -// of that file as an archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { - f, err := ioutil.TempFile(dir, "") - if err != nil { - return nil, err - } - if _, err := io.Copy(f, src); err != nil { - return nil, err - } - if _, err := f.Seek(0, 0); err != nil { - return nil, err - } - st, err := f.Stat() - if err != nil { - return nil, err - } - size := st.Size() - return &TempArchive{File: f, Size: size}, nil -} - -// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -type TempArchive struct { - *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience - read int64 - closed bool -} - -// Close closes the underlying file if it's still open, or does a no-op -// to allow callers to try to close the TempArchive multiple times safely. -func (archive *TempArchive) Close() error { - if archive.closed { - return nil - } - - archive.closed = true - - return archive.File.Close() -} - -func (archive *TempArchive) Read(data []byte) (int, error) { - n, err := archive.File.Read(data) - archive.read += int64(n) - if err != nil || archive.read == archive.Size { - archive.Close() - os.Remove(archive.File.Name()) - } - return n, err -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go deleted file mode 100644 index 970d4d068..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go +++ /dev/null @@ -1,92 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - if format == OverlayWhiteoutFormat { - return overlayWhiteoutConverter{} - } - return nil -} - -type overlayWhiteoutConverter struct{} - -func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { - // convert whiteouts to AUFS format - if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { - // we just rename the file and make it normal - dir, filename := filepath.Split(hdr.Name) - hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 - hdr.Typeflag = tar.TypeReg - hdr.Size = 0 - } - - if fi.Mode()&os.ModeDir != 0 { - // convert opaque dirs to AUFS format by writing an empty file with the prefix - opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") - if err != nil { - return nil, err - } - if len(opaque) == 1 && opaque[0] == 'y' { - if hdr.Xattrs != nil { - delete(hdr.Xattrs, "trusted.overlay.opaque") - } - - // create a header for the whiteout file - // it should inherit some properties from the parent, but be a regular file - wo = &tar.Header{ - Typeflag: tar.TypeReg, - Mode: hdr.Mode & int64(os.ModePerm), - Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), - Size: 0, - Uid: hdr.Uid, - Uname: hdr.Uname, - Gid: hdr.Gid, - Gname: hdr.Gname, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } - } - } - - return -} - -func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { - base := filepath.Base(path) - dir := filepath.Dir(path) - - // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay - if base == WhiteoutOpaqueDir { - err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) - // don't write the file itself - return false, err - } - - // if a file was deleted and we are using overlay, we need to create a character device - if strings.HasPrefix(base, WhiteoutPrefix) { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { - return false, err - } - if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { - return false, err - } - - // don't write the file itself - return false, nil - } - - return true, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go deleted file mode 100644 index 462dfc632..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_other.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go deleted file mode 100644 index e81076c17..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go +++ /dev/null @@ -1,114 +0,0 @@ -// +build !windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "errors" - "os" - "path/filepath" - "syscall" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" - "golang.org/x/sys/unix" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. On Linux, we -// can't use filepath.Join(srcPath,include) because this will clean away -// a trailing "." or "/" which may be important. -func getWalkRoot(srcPath string, include string) string { - return srcPath + string(filepath.Separator) + include -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - return p, nil // already unix-style -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. - -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} - -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - s, ok := stat.(*syscall.Stat_t) - - if ok { - // Currently go does not fill in the major/minors - if s.Mode&unix.S_IFBLK != 0 || - s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert - hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert - } - } - - return -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if ok { - inode = s.Ino - } - - return -} - -func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") - } - return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - if rsystem.RunningInUserNS() { - // cannot create a device if running in user namespace - return nil - } - - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= unix.S_IFBLK - case tar.TypeChar: - mode |= unix.S_IFCHR - case tar.TypeFifo: - mode |= unix.S_IFIFO - } - - return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go deleted file mode 100644 index 69aadd823..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go +++ /dev/null @@ -1,77 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/longpath" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return longpath.AddPrefix(srcPath) -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. -func getWalkRoot(srcPath string, include string) string { - return filepath.Join(srcPath, include) -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - // windows: convert windows style relative path with backslashes - // into forward slashes. Since windows does not allow '/' or '\' - // in file names, it is mostly safe to replace however we must - // check just in case - if strings.Contains(p, "/") { - return "", fmt.Errorf("Windows path contains forward slash: %s", p) - } - return strings.Replace(p, string(os.PathSeparator), "/", -1), nil - -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) - permPart := perm & os.ModePerm - noPermPart := perm &^ os.ModePerm - // Add the x bit: make everything +x from windows - permPart |= 0111 - permPart &= 0755 - - return noPermPart | permPart -} - -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - // do nothing. no notion of Rdev, Nlink in stat on Windows - return -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Inode in stat on Windows - return -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - return nil -} - -func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { - // no notion of file ownership mapping yet on Windows - return idtools.IDPair{UID: 0, GID: 0}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go deleted file mode 100644 index 43734db5b..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ /dev/null @@ -1,441 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "syscall" - "time" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -// ChangeType represents the change type. -type ChangeType int - -const ( - // ChangeModify represents the modify operation. - ChangeModify = iota - // ChangeAdd represents the add operation. - ChangeAdd - // ChangeDelete represents the delete operation. - ChangeDelete -) - -func (c ChangeType) String() string { - switch c { - case ChangeModify: - return "C" - case ChangeAdd: - return "A" - case ChangeDelete: - return "D" - } - return "" -} - -// Change represents a change, it wraps the change type and path. -// It describes changes of the files in the path respect to the -// parent layers. The change could be modify, add, delete. -// This is used for layer diff. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - return fmt.Sprintf("%s %s", change.Kind, change.Path) -} - -// for sort.Sort -type changesByPath []Change - -func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } -func (c changesByPath) Len() int { return len(c) } -func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } - -// Gnu tar and the go tar writer don't have sub-second mtime -// precision, which is problematic when we apply changes via tar -// files, we handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a == b || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} - -func sameFsTimeSpec(a, b syscall.Timespec) bool { - return a.Sec == b.Sec && - (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) -} - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func Changes(layers []string, rw string) ([]Change, error) { - return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) -} - -func aufsMetadataSkip(path string) (skip bool, err error) { - skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) - if err != nil { - skip = true - } - return -} - -func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { - f := filepath.Base(path) - - // If there is a whiteout, then the file was removed - if strings.HasPrefix(f, WhiteoutPrefix) { - originalFile := f[len(WhiteoutPrefix):] - return filepath.Join(filepath.Dir(path), originalFile), nil - } - - return "", nil -} - -type skipChange func(string) (bool, error) -type deleteChange func(string, string, os.FileInfo) (string, error) - -func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { - var ( - changes []Change - changedDirs = make(map[string]struct{}) - ) - - err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(rw, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - if sc != nil { - if skip, err := sc(path); skip { - return err - } - } - - change := Change{ - Path: path, - } - - deletedFile, err := dc(rw, path, f) - if err != nil { - return err - } - - // Find out what kind of modification happened - if deletedFile != "" { - change.Path = deletedFile - change.Kind = ChangeDelete - } else { - // Otherwise, the file was added - change.Kind = ChangeAdd - - // ...Unless it already existed in a top layer, in which case, it's a modification - for _, layer := range layers { - stat, err := os.Stat(filepath.Join(layer, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the top layer, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - change.Kind = ChangeModify - break - } - } - } - - // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. - // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directory in the rw and ro layers are all equal. - // Check https://github.com/docker/docker/pull/13590 for details. - if f.IsDir() { - changedDirs[path] = struct{}{} - } - if change.Kind == ChangeAdd || change.Kind == ChangeDelete { - parent := filepath.Dir(path) - if _, ok := changedDirs[parent]; !ok && parent != "/" { - changes = append(changes, Change{Path: parent, Kind: ChangeModify}) - changedDirs[parent] = struct{}{} - } - } - - // Record change - changes = append(changes, change) - return nil - }) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - return changes, nil -} - -// FileInfo describes the information of a file. -type FileInfo struct { - parent *FileInfo - name string - stat *system.StatT - children map[string]*FileInfo - capability []byte - added bool -} - -// LookUp looks up the file information of a file. -func (info *FileInfo) LookUp(path string) *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - parent := info - if path == string(os.PathSeparator) { - return info - } - - pathElements := strings.Split(path, string(os.PathSeparator)) - for _, elem := range pathElements { - if elem != "" { - child := parent.children[elem] - if child == nil { - return nil - } - parent = child - } - } - return parent -} - -func (info *FileInfo) path() string { - if info.parent == nil { - // As this runs on the daemon side, file paths are OS specific. - return string(os.PathSeparator) - } - return filepath.Join(info.parent.path(), info.name) -} - -func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - - sizeAtEntry := len(*changes) - - if oldInfo == nil { - // add - change := Change{ - Path: info.path(), - Kind: ChangeAdd, - } - *changes = append(*changes, change) - info.added = true - } - - // We make a copy so we can modify it to detect additions - // also, we only recurse on the old dir if the new info is a directory - // otherwise any previous delete/change is considered recursive - oldChildren := make(map[string]*FileInfo) - if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } - } - - for name, newChild := range info.children { - oldChild := oldChildren[name] - if oldChild != nil { - // change? - oldStat := oldChild.stat - newStat := newChild.stat - // Note: We can't compare inode or ctime or blocksize here, because these change - // when copying a file into a container. However, that is not generally a problem - // because any content change will change mtime, and any status change should - // be visible when actually comparing the stat fields. The only time this - // breaks down is if some code intentionally hides a change by setting - // back mtime - if statDifferent(oldStat, newStat) || - !bytes.Equal(oldChild.capability, newChild.capability) { - change := Change{ - Path: newChild.path(), - Kind: ChangeModify, - } - *changes = append(*changes, change) - newChild.added = true - } - - // Remove from copy so we can detect deletions - delete(oldChildren, name) - } - - newChild.addChanges(oldChild, changes) - } - for _, oldChild := range oldChildren { - // delete - change := Change{ - Path: oldChild.path(), - Kind: ChangeDelete, - } - *changes = append(*changes, change) - } - - // If there were changes inside this directory, we need to add it, even if the directory - // itself wasn't changed. This is needed to properly save and restore filesystem permissions. - // As this runs on the daemon side, file paths are OS specific. - if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { - change := Change{ - Path: info.path(), - Kind: ChangeModify, - } - // Let's insert the directory entry before the recently added entries located inside this dir - *changes = append(*changes, change) // just to resize the slice, will be overwritten - copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) - (*changes)[sizeAtEntry] = change - } - -} - -// Changes add changes to file information. -func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { - var changes []Change - - info.addChanges(oldInfo, &changes) - - return changes -} - -func newRootFileInfo() *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - root := &FileInfo{ - name: string(os.PathSeparator), - children: make(map[string]*FileInfo), - } - return root -} - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// If oldDir is "", then all files in newDir will be Add-Changes. -func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) - if oldDir == "" { - emptyDir, err := ioutil.TempDir("", "empty") - if err != nil { - return nil, err - } - defer os.Remove(emptyDir) - oldDir = emptyDir - } - oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) - if err != nil { - return nil, err - } - - return newRoot.Changes(oldRoot), nil -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -func ChangesSize(newDir string, changes []Change) int64 { - var ( - size int64 - sf = make(map[uint64]struct{}) - ) - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - file := filepath.Join(newDir, change.Path) - fileInfo, err := os.Lstat(file) - if err != nil { - logrus.Errorf("Can not stat %q: %s", file, err) - continue - } - - if fileInfo != nil && !fileInfo.IsDir() { - if hasHardlinks(fileInfo) { - inode := getIno(fileInfo) - if _, ok := sf[inode]; !ok { - size += fileInfo.Size() - sf[inode] = struct{}{} - } - } else { - size += fileInfo.Size() - } - } - } - } - return size -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { - reader, writer := io.Pipe() - go func() { - ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - sort.Sort(changesByPath(changes)) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - for _, change := range changes { - if change.Kind == ChangeDelete { - whiteOutDir := filepath.Dir(change.Path) - whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) - timestamp := time.Now() - hdr := &tar.Header{ - Name: whiteOut[1:], - Size: 0, - ModTime: timestamp, - AccessTime: timestamp, - ChangeTime: timestamp, - } - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - logrus.Debugf("Can't write whiteout header: %s", err) - } - } else { - path := filepath.Join(dir, change.Path) - if err := ta.addTarFile(path, change.Path[1:]); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", path, err) - } - } - } - - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close layer: %s", err) - } - if err := writer.Close(); err != nil { - logrus.Debugf("failed close Changes writer: %s", err) - } - }() - return reader, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go deleted file mode 100644 index 78a5393c8..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go +++ /dev/null @@ -1,313 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "sort" - "syscall" - "unsafe" - - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -// walker is used to implement collectFileInfoForChanges on linux. Where this -// method in general returns the entire contents of two directory trees, we -// optimize some FS calls out on linux. In particular, we take advantage of the -// fact that getdents(2) returns the inode of each file in the directory being -// walked, which, when walking two trees in parallel to generate a list of -// changes, can be used to prune subtrees without ever having to lstat(2) them -// directly. Eliminating stat calls in this way can save up to seconds on large -// images. -type walker struct { - dir1 string - dir2 string - root1 *FileInfo - root2 *FileInfo -} - -// collectFileInfoForChanges returns a complete representation of the trees -// rooted at dir1 and dir2, with one important exception: any subtree or -// leaf where the inode and device numbers are an exact match between dir1 -// and dir2 will be pruned from the results. This method is *only* to be used -// to generating a list of changes between the two directories, as it does not -// reflect the full contents. -func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { - w := &walker{ - dir1: dir1, - dir2: dir2, - root1: newRootFileInfo(), - root2: newRootFileInfo(), - } - - i1, err := os.Lstat(w.dir1) - if err != nil { - return nil, nil, err - } - i2, err := os.Lstat(w.dir2) - if err != nil { - return nil, nil, err - } - - if err := w.walk("/", i1, i2); err != nil { - return nil, nil, err - } - - return w.root1, w.root2, nil -} - -// Given a FileInfo, its path info, and a reference to the root of the tree -// being constructed, register this file with the tree. -func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { - if fi == nil { - return nil - } - parent := root.LookUp(filepath.Dir(path)) - if parent == nil { - return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) - } - info := &FileInfo{ - name: filepath.Base(path), - children: make(map[string]*FileInfo), - parent: parent, - } - cpath := filepath.Join(dir, path) - stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) - if err != nil { - return err - } - info.stat = stat - info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access - parent.children[info.name] = info - return nil -} - -// Walk a subtree rooted at the same path in both trees being iterated. For -// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d -func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { - // Register these nodes with the return trees, unless we're still at the - // (already-created) roots: - if path != "/" { - if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { - return err - } - if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { - return err - } - } - - is1Dir := i1 != nil && i1.IsDir() - is2Dir := i2 != nil && i2.IsDir() - - sameDevice := false - if i1 != nil && i2 != nil { - si1 := i1.Sys().(*syscall.Stat_t) - si2 := i2.Sys().(*syscall.Stat_t) - if si1.Dev == si2.Dev { - sameDevice = true - } - } - - // If these files are both non-existent, or leaves (non-dirs), we are done. - if !is1Dir && !is2Dir { - return nil - } - - // Fetch the names of all the files contained in both directories being walked: - var names1, names2 []nameIno - if is1Dir { - names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access - if err != nil { - return err - } - } - if is2Dir { - names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access - if err != nil { - return err - } - } - - // We have lists of the files contained in both parallel directories, sorted - // in the same order. Walk them in parallel, generating a unique merged list - // of all items present in either or both directories. - var names []string - ix1 := 0 - ix2 := 0 - - for { - if ix1 >= len(names1) { - break - } - if ix2 >= len(names2) { - break - } - - ni1 := names1[ix1] - ni2 := names2[ix2] - - switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { - case -1: // ni1 < ni2 -- advance ni1 - // we will not encounter ni1 in names2 - names = append(names, ni1.name) - ix1++ - case 0: // ni1 == ni2 - if ni1.ino != ni2.ino || !sameDevice { - names = append(names, ni1.name) - } - ix1++ - ix2++ - case 1: // ni1 > ni2 -- advance ni2 - // we will not encounter ni2 in names1 - names = append(names, ni2.name) - ix2++ - } - } - for ix1 < len(names1) { - names = append(names, names1[ix1].name) - ix1++ - } - for ix2 < len(names2) { - names = append(names, names2[ix2].name) - ix2++ - } - - // For each of the names present in either or both of the directories being - // iterated, stat the name under each root, and recurse the pair of them: - for _, name := range names { - fname := filepath.Join(path, name) - var cInfo1, cInfo2 os.FileInfo - if is1Dir { - cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if is2Dir { - cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if err = w.walk(fname, cInfo1, cInfo2); err != nil { - return err - } - } - return nil -} - -// {name,inode} pairs used to support the early-pruning logic of the walker type -type nameIno struct { - name string - ino uint64 -} - -type nameInoSlice []nameIno - -func (s nameInoSlice) Len() int { return len(s) } -func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } - -// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode -// numbers further up the stack when reading directory contents. Unlike -// os.Readdirnames, which returns a list of filenames, this function returns a -// list of {filename,inode} pairs. -func readdirnames(dirname string) (names []nameIno, err error) { - var ( - size = 100 - buf = make([]byte, 4096) - nbuf int - bufp int - nb int - ) - - f, err := os.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - names = make([]nameIno, 0, size) // Empty with room to grow. - for { - // Refill the buffer if necessary - if bufp >= nbuf { - bufp = 0 - nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux - if nbuf < 0 { - nbuf = 0 - } - if err != nil { - return nil, os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - break // EOF - } - } - - // Drain the buffer - nb, names = parseDirent(buf[bufp:nbuf], names) - bufp += nb - } - - sl := nameInoSlice(names) - sort.Sort(sl) - return sl, nil -} - -// parseDirent is a minor modification of unix.ParseDirent (linux version) -// which returns {name,inode} pairs instead of just names. -func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { - origlen := len(buf) - for len(buf) > 0 { - dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) - buf = buf[dirent.Reclen:] - if dirent.Ino == 0 { // File absent in directory. - continue - } - bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) - var name = string(bytes[0:clen(bytes[:])]) - if name == "." || name == ".." { // Useless names - continue - } - names = append(names, nameIno{name, dirent.Ino}) - } - return origlen - len(buf), names -} - -func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } - } - return len(n) -} - -// OverlayChanges walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func OverlayChanges(layers []string, rw string) ([]Change, error) { - return changes(layers, rw, overlayDeletedFile, nil) -} - -func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { - if fi.Mode()&os.ModeCharDevice != 0 { - s := fi.Sys().(*syscall.Stat_t) - if unix.Major(uint64(s.Rdev)) == 0 && unix.Minor(uint64(s.Rdev)) == 0 { // nolint: unconvert - return path, nil - } - } - if fi.Mode()&os.ModeDir != 0 { - opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") - if err != nil { - return "", err - } - if len(opaque) == 1 && opaque[0] == 'y' { - return path, nil - } - } - - return "", nil - -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go deleted file mode 100644 index ba744741c..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_other.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/system" -) - -func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { - var ( - oldRoot, newRoot *FileInfo - err1, err2 error - errs = make(chan error, 2) - ) - go func() { - oldRoot, err1 = collectFileInfo(oldDir) - errs <- err1 - }() - go func() { - newRoot, err2 = collectFileInfo(newDir) - errs <- err2 - }() - - // block until both routines have returned - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - return nil, nil, err - } - } - - return oldRoot, newRoot, nil -} - -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() - - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(sourceDir, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - relPath = filepath.Join(string(os.PathSeparator), relPath) - - // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. - // Temporary workaround. If the returned path starts with two backslashes, - // trim it down to a single backslash. Only relevant on Windows. - if runtime.GOOS == "windows" { - if strings.HasPrefix(relPath, `\\`) { - relPath = relPath[1:] - } - } - - if relPath == string(os.PathSeparator) { - return nil - } - - parent := root.LookUp(filepath.Dir(relPath)) - if parent == nil { - return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) - } - - info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, - } - - s, err := system.Lstat(path) - if err != nil { - return err - } - info.stat = s - - info.capability, _ = system.Lgetxattr(path, "security.capability") - - parent.children[info.name] = info - - return nil - }) - if err != nil { - return nil, err - } - return root, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go deleted file mode 100644 index c06a209d8..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build !windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "os" - "syscall" - - "github.com/docker/docker/pkg/system" - "golang.org/x/sys/unix" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mode() != newStat.Mode() || - oldStat.UID() != newStat.UID() || - oldStat.GID() != newStat.GID() || - oldStat.Rdev() != newStat.Rdev() || - // Don't look at size for dirs, its not a good measure of change - (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && - (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 -} - -func getIno(fi os.FileInfo) uint64 { - return fi.Sys().(*syscall.Stat_t).Ino -} - -func hasHardlinks(fi os.FileInfo) bool { - return fi.Sys().(*syscall.Stat_t).Nlink > 1 -} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go deleted file mode 100644 index 6555c0136..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mtim() != newStat.Mtim() || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode().IsDir() -} - -func getIno(fi os.FileInfo) (inode uint64) { - return -} - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go deleted file mode 100644 index d0f13ca79..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ /dev/null @@ -1,472 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "errors" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -// Errors used or returned by this file. -var ( - ErrNotDirectory = errors.New("not a directory") - ErrDirNotExists = errors.New("no such directory") - ErrCannotCopyDir = errors.New("cannot copy directory") - ErrInvalidCopySource = errors.New("invalid copy source content") -) - -// PreserveTrailingDotOrSeparator returns the given cleaned path (after -// processing using any utility functions from the path or filepath stdlib -// packages) and appends a trailing `/.` or `/` if its corresponding original -// path (from before being processed by utility functions from the path or -// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned -// path already ends in a `.` path segment, then another is not added. If the -// clean path already ends in the separator, then another is not added. -func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { - // Ensure paths are in platform semantics - cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) - originalPath = strings.Replace(originalPath, "/", string(sep), -1) - - if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { - if !hasTrailingPathSeparator(cleanedPath, sep) { - // Add a separator if it doesn't already end with one (a cleaned - // path would only end in a separator if it is the root). - cleanedPath += string(sep) - } - cleanedPath += "." - } - - if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) { - cleanedPath += string(sep) - } - - return cleanedPath -} - -// assertsDirectory returns whether the given path is -// asserted to be a directory, i.e., the path ends with -// a trailing '/' or `/.`, assuming a path separator of `/`. -func assertsDirectory(path string, sep byte) bool { - return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path) -} - -// hasTrailingPathSeparator returns whether the given -// path ends with the system's path separator character. -func hasTrailingPathSeparator(path string, sep byte) bool { - return len(path) > 0 && path[len(path)-1] == sep -} - -// specifiesCurrentDir returns whether the given path specifies -// a "current directory", i.e., the last path segment is `.`. -func specifiesCurrentDir(path string) bool { - return filepath.Base(path) == "." -} - -// SplitPathDirEntry splits the given path between its directory name and its -// basename by first cleaning the path but preserves a trailing "." if the -// original path specified the current directory. -func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(filepath.FromSlash(path)) - - if specifiesCurrentDir(path) { - cleanedPath += string(os.PathSeparator) + "." - } - - return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) -} - -// TarResource archives the resource described by the given CopyInfo to a Tar -// archive. A non-nil error is returned if sourcePath does not exist or is -// asserted to be a directory but exists as another type of file. -// -// This function acts as a convenient wrapper around TarWithOptions, which -// requires a directory as the source path. TarResource accepts either a -// directory or a file path and correctly sets the Tar options. -func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { - return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) -} - -// TarResourceRebase is like TarResource but renames the first path element of -// items in the resulting tar archive to match the given rebaseName if not "". -func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { - sourcePath = normalizePath(sourcePath) - if _, err = os.Lstat(sourcePath); err != nil { - // Catches the case where the source does not exist or is not a - // directory if asserted to be a directory, as this also causes an - // error. - return - } - - // Separate the source path between its directory and - // the entry in that directory which we are archiving. - sourceDir, sourceBase := SplitPathDirEntry(sourcePath) - opts := TarResourceRebaseOpts(sourceBase, rebaseName) - - logrus.Debugf("copying %q from %q", sourceBase, sourceDir) - return TarWithOptions(sourceDir, opts) -} - -// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase -// parameters to be sent to TarWithOptions (the TarOptions struct) -func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { - filter := []string{sourceBase} - return &TarOptions{ - Compression: Uncompressed, - IncludeFiles: filter, - IncludeSourceDir: true, - RebaseNames: map[string]string{ - sourceBase: rebaseName, - }, - } -} - -// CopyInfo holds basic info about the source -// or destination path of a copy operation. -type CopyInfo struct { - Path string - Exists bool - IsDir bool - RebaseName string -} - -// CopyInfoSourcePath stats the given path to create a CopyInfo -// struct representing that resource for the source of an archive copy -// operation. The given path should be an absolute local path. A source path -// has all symlinks evaluated that appear before the last path separator ("/" -// on Unix). As it is to be a copy source, the path must exist. -func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { - // normalize the file path and then evaluate the symbol link - // we will use the target file instead of the symbol link if - // followLink is set - path = normalizePath(path) - - resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) - if err != nil { - return CopyInfo{}, err - } - - stat, err := os.Lstat(resolvedPath) - if err != nil { - return CopyInfo{}, err - } - - return CopyInfo{ - Path: resolvedPath, - Exists: true, - IsDir: stat.IsDir(), - RebaseName: rebaseName, - }, nil -} - -// CopyInfoDestinationPath stats the given path to create a CopyInfo -// struct representing that resource for the destination of an archive copy -// operation. The given path should be an absolute local path. -func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { - maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. - path = normalizePath(path) - originalPath := path - - stat, err := os.Lstat(path) - - if err == nil && stat.Mode()&os.ModeSymlink == 0 { - // The path exists and is not a symlink. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil - } - - // While the path is a symlink. - for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { - if n > maxSymlinkIter { - // Don't follow symlinks more than this arbitrary number of times. - return CopyInfo{}, errors.New("too many symlinks in " + originalPath) - } - - // The path is a symbolic link. We need to evaluate it so that the - // destination of the copy operation is the link target and not the - // link itself. This is notably different than CopyInfoSourcePath which - // only evaluates symlinks before the last appearing path separator. - // Also note that it is okay if the last path element is a broken - // symlink as the copy operation should create the target. - var linkTarget string - - linkTarget, err = os.Readlink(path) - if err != nil { - return CopyInfo{}, err - } - - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := SplitPathDirEntry(path) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - path = linkTarget - stat, err = os.Lstat(path) - } - - if err != nil { - // It's okay if the destination path doesn't exist. We can still - // continue the copy operation if the parent directory exists. - if !os.IsNotExist(err) { - return CopyInfo{}, err - } - - // Ensure destination parent dir exists. - dstParent, _ := SplitPathDirEntry(path) - - parentDirStat, err := os.Stat(dstParent) - if err != nil { - return CopyInfo{}, err - } - if !parentDirStat.IsDir() { - return CopyInfo{}, ErrNotDirectory - } - - return CopyInfo{Path: path}, nil - } - - // The path exists after resolving symlinks. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil -} - -// PrepareArchiveCopy prepares the given srcContent archive, which should -// contain the archived resource described by srcInfo, to the destination -// described by dstInfo. Returns the possibly modified content archive along -// with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { - // Ensure in platform semantics - srcInfo.Path = normalizePath(srcInfo.Path) - dstInfo.Path = normalizePath(dstInfo.Path) - - // Separate the destination path between its directory and base - // components in case the source archive contents need to be rebased. - dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) - _, srcBase := SplitPathDirEntry(srcInfo.Path) - - switch { - case dstInfo.Exists && dstInfo.IsDir: - // The destination exists as a directory. No alteration - // to srcContent is needed as its contents can be - // simply extracted to the destination directory. - return dstInfo.Path, ioutil.NopCloser(srcContent), nil - case dstInfo.Exists && srcInfo.IsDir: - // The destination exists as some type of file and the source - // content is a directory. This is an error condition since - // you cannot copy a directory to an existing file location. - return "", nil, ErrCannotCopyDir - case dstInfo.Exists: - // The destination exists as some type of file and the source content - // is also a file. The source content entry will have to be renamed to - // have a basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case srcInfo.IsDir: - // The destination does not exist and the source content is an archive - // of a directory. The archive should be extracted to the parent of - // the destination path instead, and when it is, the directory that is - // created as a result should take the name of the destination path. - // The source content entries will have to be renamed to have a - // basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case assertsDirectory(dstInfo.Path, os.PathSeparator): - // The destination does not exist and is asserted to be created as a - // directory, but the source content is not a directory. This is an - // error condition since you cannot create a directory from a file - // source. - return "", nil, ErrDirNotExists - default: - // The last remaining case is when the destination does not exist, is - // not asserted to be a directory, and the source content is not an - // archive of a directory. It this case, the destination file will need - // to be created when the archive is extracted and the source content - // entry will have to be renamed to have a basename which matches the - // destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - } - -} - -// RebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurrence of oldBase with newBase at the beginning of entry names. -func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { - if oldBase == string(os.PathSeparator) { - // If oldBase specifies the root directory, use an empty string as - // oldBase instead so that newBase doesn't replace the path separator - // that all paths will start with. - oldBase = "" - } - - rebased, w := io.Pipe() - - go func() { - srcTar := tar.NewReader(srcContent) - rebasedTar := tar.NewWriter(w) - - for { - hdr, err := srcTar.Next() - if err == io.EOF { - // Signals end of archive. - rebasedTar.Close() - w.Close() - return - } - if err != nil { - w.CloseWithError(err) - return - } - - hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) - if hdr.Typeflag == tar.TypeLink { - hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) - } - - if err = rebasedTar.WriteHeader(hdr); err != nil { - w.CloseWithError(err) - return - } - - if _, err = io.Copy(rebasedTar, srcTar); err != nil { - w.CloseWithError(err) - return - } - } - }() - - return rebased -} - -// TODO @gupta-ak. These might have to be changed in the future to be -// continuity driver aware as well to support LCOW. - -// CopyResource performs an archive copy from the given source path to the -// given destination path. The source path MUST exist and the destination -// path's parent directory must exist. -func CopyResource(srcPath, dstPath string, followLink bool) error { - var ( - srcInfo CopyInfo - err error - ) - - // Ensure in platform semantics - srcPath = normalizePath(srcPath) - dstPath = normalizePath(dstPath) - - // Clean the source and destination paths. - srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator) - dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator) - - if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { - return err - } - - content, err := TarResource(srcInfo) - if err != nil { - return err - } - defer content.Close() - - return CopyTo(content, srcInfo, dstPath) -} - -// CopyTo handles extracting the given content whose -// entries should be sourced from srcInfo to dstPath. -func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { - // The destination path need not exist, but CopyInfoDestinationPath will - // ensure that at least the parent directory exists. - dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) - if err != nil { - return err - } - - dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) - if err != nil { - return err - } - defer copyArchive.Close() - - options := &TarOptions{ - NoLchown: true, - NoOverwriteDirNonDir: true, - } - - return Untar(copyArchive, dstDir, options) -} - -// ResolveHostSourcePath decides real path need to be copied with parameters such as -// whether to follow symbol link or not, if followLink is true, resolvedPath will return -// link target of any symbol link file, else it will only resolve symlink of directory -// but return symbol link file itself without resolving. -func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { - if followLink { - resolvedPath, err = filepath.EvalSymlinks(path) - if err != nil { - return - } - - resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) - } else { - dirPath, basePath := filepath.Split(path) - - // if not follow symbol link, then resolve symbol link of parent dir - var resolvedDirPath string - resolvedDirPath, err = filepath.EvalSymlinks(dirPath) - if err != nil { - return - } - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - if hasTrailingPathSeparator(path, os.PathSeparator) && - filepath.Base(path) != filepath.Base(resolvedPath) { - rebaseName = filepath.Base(path) - } - } - return resolvedPath, rebaseName, nil -} - -// GetRebaseName normalizes and compares path and resolvedPath, -// return completed resolved path and rebased file name -func GetRebaseName(path, resolvedPath string) (string, string) { - // linkTarget will have been cleaned (no trailing path separators and dot) so - // we can manually join it with them - var rebaseName string - if specifiesCurrentDir(path) && - !specifiesCurrentDir(resolvedPath) { - resolvedPath += string(filepath.Separator) + "." - } - - if hasTrailingPathSeparator(path, os.PathSeparator) && - !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) { - resolvedPath += string(filepath.Separator) - } - - if filepath.Base(path) != filepath.Base(resolvedPath) { - // In the case where the path had a trailing separator and a symlink - // evaluation has changed the last path component, we will need to - // rebase the name in the archive that is being copied to match the - // originally requested name. - rebaseName = filepath.Base(path) - } - return resolvedPath, rebaseName -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go deleted file mode 100644 index 3958364f5..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.ToSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go deleted file mode 100644 index a878d1bac..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.FromSlash(path) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go deleted file mode 100644 index fae4b9de0..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ /dev/null @@ -1,258 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { - tr := tar.NewReader(layer) - trBuf := pools.BufioReader32KPool.Get(tr) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - unpackedPaths := make(map[string]struct{}) - - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - - aufsTempdir := "" - aufsHardlinks := make(map[string]*tar.Header) - - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return 0, err - } - - size += hdr.Size - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - // Windows does not support filenames with colons in them. Ignore - // these files. This is not a problem though (although it might - // appear that it is). Let's suppose a client is running docker pull. - // The daemon it points to is Windows. Would it make sense for the - // client to be doing a docker pull Ubuntu for example (which has files - // with colons in the name under /usr/share/man/man3)? No, absolutely - // not as it would really only make sense that they were pulling a - // Windows image. However, for development, it is necessary to be able - // to pull Linux images which are in the repository. - // - // TODO Windows. Once the registry is aware of what images are Windows- - // specific or Linux-specific, this warning should be changed to an error - // to cater for the situation where someone does manage to upload a Linux - // image but have it tagged as Windows inadvertently. - if runtime.GOOS == "windows" { - if strings.Contains(hdr.Name, ":") { - logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) - continue - } - } - - // Note as these operations are platform specific, so must the slash be. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600, "") - if err != nil { - return 0, err - } - } - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { - return 0, err - } - } - - if hdr.Name != WhiteoutOpaqueDir { - continue - } - } - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return 0, err - } - - // Note as these operations are platform specific, so must the slash be. - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - base := filepath.Base(path) - - if strings.HasPrefix(base, WhiteoutPrefix) { - dir := filepath.Dir(path) - if base == WhiteoutOpaqueDir { - _, err := os.Lstat(dir) - if err != nil { - return 0, err - } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) { - err = nil // parent was deleted - } - return err - } - if path == dir { - return nil - } - if _, exists := unpackedPaths[path]; !exists { - err := os.RemoveAll(path) - return err - } - return nil - }) - if err != nil { - return 0, err - } - } else { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return 0, err - } - } - } else { - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return 0, err - } - } - } - - trBuf.Reset(tr) - srcData := io.Reader(trBuf) - srcHdr := hdr - - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") - } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - - if err := remapIDs(idMappings, srcHdr); err != nil { - return 0, err - } - - if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { - return 0, err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - unpackedPaths[path] = struct{}{} - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return 0, err - } - } - - return size, nil -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer io.Reader) (int64, error) { - return applyLayerHandler(dest, layer, &TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} - -// do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - if err != nil { - return 0, err - } - defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform - - if decompress { - decompLayer, err := DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompLayer.Close() - layer = decompLayer - } - return UnpackLayer(dest, layer, options) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go deleted file mode 100644 index 495db809e..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/example_changes.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build ignore - -// Simple tool to create an archive stream from an old and new directory -// -// By default it will stream the comparison of two temporary directories with junk files -package main - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path" - - "github.com/docker/docker/pkg/archive" - "github.com/sirupsen/logrus" -) - -var ( - flDebug = flag.Bool("D", false, "debugging output") - flNewDir = flag.String("newdir", "", "") - flOldDir = flag.String("olddir", "", "") - log = logrus.New() -) - -func main() { - flag.Usage = func() { - fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") - fmt.Printf("%s [OPTIONS]\n", os.Args[0]) - flag.PrintDefaults() - } - flag.Parse() - log.Out = os.Stderr - if (len(os.Getenv("DEBUG")) > 0) || *flDebug { - logrus.SetLevel(logrus.DebugLevel) - } - var newDir, oldDir string - - if len(*flNewDir) == 0 { - var err error - newDir, err = ioutil.TempDir("", "docker-test-newDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(newDir) - if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { - log.Fatal(err) - } - } else { - newDir = *flNewDir - } - - if len(*flOldDir) == 0 { - oldDir, err := ioutil.TempDir("", "docker-test-oldDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(oldDir) - } else { - oldDir = *flOldDir - } - - changes, err := archive.ChangesDirs(newDir, oldDir) - if err != nil { - log.Fatal(err) - } - - a, err := archive.ExportChanges(newDir, changes) - if err != nil { - log.Fatal(err) - } - defer a.Close() - - i, err := io.Copy(os.Stdout, a) - if err != nil && err != io.EOF { - log.Fatal(err) - } - fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go deleted file mode 100644 index 797143ee8..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/time_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = (1 << 30) - 2 - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go deleted file mode 100644 index f58bf227f..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux - -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - nsec := int64(0) - if !time.IsZero() { - nsec = time.UnixNano() - } - return syscall.NsecToTimespec(nsec) -} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go deleted file mode 100644 index 4c072a87e..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go +++ /dev/null @@ -1,23 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -// Whiteouts are files with a special meaning for the layered filesystem. -// Docker uses AUFS whiteout files inside exported archives. In other -// filesystems these files are generated/handled on tar creation/extraction. - -// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a -// filename this means that file has been removed from the base layer. -const WhiteoutPrefix = ".wh." - -// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not -// for removing an actual file. Normally these files are excluded from exported -// archives. -const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix - -// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other -// layers. Normally these should not go into exported archives and all changed -// hardlinks should be copied to the top layer. -const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" - -// WhiteoutOpaqueDir file means directory has been made opaque - meaning -// readdir calls to this directory do not follow to lower layers. -const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go deleted file mode 100644 index 85435694c..000000000 --- a/vendor/github.com/docker/docker/pkg/archive/wrap.go +++ /dev/null @@ -1,59 +0,0 @@ -package archive // import "github.com/docker/docker/pkg/archive" - -import ( - "archive/tar" - "bytes" - "io" -) - -// Generate generates a new archive from the content provided -// as input. -// -// `files` is a sequence of path/content pairs. A new file is -// added to the archive for each pair. -// If the last pair is incomplete, the file is created with an -// empty content. For example: -// -// Generate("foo.txt", "hello world", "emptyfile") -// -// The above call will return an archive with 2 files: -// * ./foo.txt with content "hello world" -// * ./empty with empty content -// -// FIXME: stream content instead of buffering -// FIXME: specify permissions and other archive metadata -func Generate(input ...string) (io.Reader, error) { - files := parseStringPairs(input...) - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return buf, nil -} - -func parseStringPairs(input ...string) (output [][2]string) { - output = make([][2]string, 0, len(input)/2+1) - for i := 0; i < len(input); i += 2 { - var pair [2]string - pair[0] = input[i] - if i+1 < len(input) { - pair[1] = input[i+1] - } - output = append(output, pair) - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/authorization/api.go b/vendor/github.com/docker/docker/pkg/authorization/api.go deleted file mode 100644 index cc0c12d50..000000000 --- a/vendor/github.com/docker/docker/pkg/authorization/api.go +++ /dev/null @@ -1,88 +0,0 @@ -package authorization // import "github.com/docker/docker/pkg/authorization" - -import ( - "crypto/x509" - "encoding/json" - "encoding/pem" -) - -const ( - // AuthZApiRequest is the url for daemon request authorization - AuthZApiRequest = "AuthZPlugin.AuthZReq" - - // AuthZApiResponse is the url for daemon response authorization - AuthZApiResponse = "AuthZPlugin.AuthZRes" - - // AuthZApiImplements is the name of the interface all AuthZ plugins implement - AuthZApiImplements = "authz" -) - -// PeerCertificate is a wrapper around x509.Certificate which provides a sane -// encoding/decoding to/from PEM format and JSON. -type PeerCertificate x509.Certificate - -// MarshalJSON returns the JSON encoded pem bytes of a PeerCertificate. -func (pc *PeerCertificate) MarshalJSON() ([]byte, error) { - b := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: pc.Raw}) - return json.Marshal(b) -} - -// UnmarshalJSON populates a new PeerCertificate struct from JSON data. -func (pc *PeerCertificate) UnmarshalJSON(b []byte) error { - var buf []byte - if err := json.Unmarshal(b, &buf); err != nil { - return err - } - derBytes, _ := pem.Decode(buf) - c, err := x509.ParseCertificate(derBytes.Bytes) - if err != nil { - return err - } - *pc = PeerCertificate(*c) - return nil -} - -// Request holds data required for authZ plugins -type Request struct { - // User holds the user extracted by AuthN mechanism - User string `json:"User,omitempty"` - - // UserAuthNMethod holds the mechanism used to extract user details (e.g., krb) - UserAuthNMethod string `json:"UserAuthNMethod,omitempty"` - - // RequestMethod holds the HTTP method (GET/POST/PUT) - RequestMethod string `json:"RequestMethod,omitempty"` - - // RequestUri holds the full HTTP uri (e.g., /v1.21/version) - RequestURI string `json:"RequestUri,omitempty"` - - // RequestBody stores the raw request body sent to the docker daemon - RequestBody []byte `json:"RequestBody,omitempty"` - - // RequestHeaders stores the raw request headers sent to the docker daemon - RequestHeaders map[string]string `json:"RequestHeaders,omitempty"` - - // RequestPeerCertificates stores the request's TLS peer certificates in PEM format - RequestPeerCertificates []*PeerCertificate `json:"RequestPeerCertificates,omitempty"` - - // ResponseStatusCode stores the status code returned from docker daemon - ResponseStatusCode int `json:"ResponseStatusCode,omitempty"` - - // ResponseBody stores the raw response body sent from docker daemon - ResponseBody []byte `json:"ResponseBody,omitempty"` - - // ResponseHeaders stores the response headers sent to the docker daemon - ResponseHeaders map[string]string `json:"ResponseHeaders,omitempty"` -} - -// Response represents authZ plugin response -type Response struct { - // Allow indicating whether the user is allowed or not - Allow bool `json:"Allow"` - - // Msg stores the authorization message - Msg string `json:"Msg,omitempty"` - - // Err stores a message in case there's an error - Err string `json:"Err,omitempty"` -} diff --git a/vendor/github.com/docker/docker/pkg/authorization/authz.go b/vendor/github.com/docker/docker/pkg/authorization/authz.go deleted file mode 100644 index a1edbcd89..000000000 --- a/vendor/github.com/docker/docker/pkg/authorization/authz.go +++ /dev/null @@ -1,189 +0,0 @@ -package authorization // import "github.com/docker/docker/pkg/authorization" - -import ( - "bufio" - "bytes" - "fmt" - "io" - "mime" - "net/http" - "strings" - - "github.com/docker/docker/pkg/ioutils" - "github.com/sirupsen/logrus" -) - -const maxBodySize = 1048576 // 1MB - -// NewCtx creates new authZ context, it is used to store authorization information related to a specific docker -// REST http session -// A context provides two method: -// Authenticate Request: -// Call authZ plugins with current REST request and AuthN response -// Request contains full HTTP packet sent to the docker daemon -// https://docs.docker.com/engine/reference/api/ -// -// Authenticate Response: -// Call authZ plugins with full info about current REST request, REST response and AuthN response -// The response from this method may contains content that overrides the daemon response -// This allows authZ plugins to filter privileged content -// -// If multiple authZ plugins are specified, the block/allow decision is based on ANDing all plugin results -// For response manipulation, the response from each plugin is piped between plugins. Plugin execution order -// is determined according to daemon parameters -func NewCtx(authZPlugins []Plugin, user, userAuthNMethod, requestMethod, requestURI string) *Ctx { - return &Ctx{ - plugins: authZPlugins, - user: user, - userAuthNMethod: userAuthNMethod, - requestMethod: requestMethod, - requestURI: requestURI, - } -} - -// Ctx stores a single request-response interaction context -type Ctx struct { - user string - userAuthNMethod string - requestMethod string - requestURI string - plugins []Plugin - // authReq stores the cached request object for the current transaction - authReq *Request -} - -// AuthZRequest authorized the request to the docker daemon using authZ plugins -func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error { - var body []byte - if sendBody(ctx.requestURI, r.Header) && r.ContentLength > 0 && r.ContentLength < maxBodySize { - var err error - body, r.Body, err = drainBody(r.Body) - if err != nil { - return err - } - } - - var h bytes.Buffer - if err := r.Header.Write(&h); err != nil { - return err - } - - ctx.authReq = &Request{ - User: ctx.user, - UserAuthNMethod: ctx.userAuthNMethod, - RequestMethod: ctx.requestMethod, - RequestURI: ctx.requestURI, - RequestBody: body, - RequestHeaders: headers(r.Header), - } - - if r.TLS != nil { - for _, c := range r.TLS.PeerCertificates { - pc := PeerCertificate(*c) - ctx.authReq.RequestPeerCertificates = append(ctx.authReq.RequestPeerCertificates, &pc) - } - } - - for _, plugin := range ctx.plugins { - logrus.Debugf("AuthZ request using plugin %s", plugin.Name()) - - authRes, err := plugin.AuthZRequest(ctx.authReq) - if err != nil { - return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) - } - - if !authRes.Allow { - return newAuthorizationError(plugin.Name(), authRes.Msg) - } - } - - return nil -} - -// AuthZResponse authorized and manipulates the response from docker daemon using authZ plugins -func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error { - ctx.authReq.ResponseStatusCode = rm.StatusCode() - ctx.authReq.ResponseHeaders = headers(rm.Header()) - - if sendBody(ctx.requestURI, rm.Header()) { - ctx.authReq.ResponseBody = rm.RawBody() - } - - for _, plugin := range ctx.plugins { - logrus.Debugf("AuthZ response using plugin %s", plugin.Name()) - - authRes, err := plugin.AuthZResponse(ctx.authReq) - if err != nil { - return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) - } - - if !authRes.Allow { - return newAuthorizationError(plugin.Name(), authRes.Msg) - } - } - - rm.FlushAll() - - return nil -} - -// drainBody dump the body (if its length is less than 1MB) without modifying the request state -func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) { - bufReader := bufio.NewReaderSize(body, maxBodySize) - newBody := ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) - - data, err := bufReader.Peek(maxBodySize) - // Body size exceeds max body size - if err == nil { - logrus.Warnf("Request body is larger than: '%d' skipping body", maxBodySize) - return nil, newBody, nil - } - // Body size is less than maximum size - if err == io.EOF { - return data, newBody, nil - } - // Unknown error - return nil, newBody, err -} - -// sendBody returns true when request/response body should be sent to AuthZPlugin -func sendBody(url string, header http.Header) bool { - // Skip body for auth endpoint - if strings.HasSuffix(url, "/auth") { - return false - } - - // body is sent only for text or json messages - contentType, _, err := mime.ParseMediaType(header.Get("Content-Type")) - if err != nil { - return false - } - - return contentType == "application/json" -} - -// headers returns flatten version of the http headers excluding authorization -func headers(header http.Header) map[string]string { - v := make(map[string]string) - for k, values := range header { - // Skip authorization headers - if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") { - continue - } - for _, val := range values { - v[k] = val - } - } - return v -} - -// authorizationError represents an authorization deny error -type authorizationError struct { - error -} - -func (authorizationError) Forbidden() {} - -func newAuthorizationError(plugin, msg string) authorizationError { - return authorizationError{error: fmt.Errorf("authorization denied by plugin %s: %s", plugin, msg)} -} diff --git a/vendor/github.com/docker/docker/pkg/authorization/middleware.go b/vendor/github.com/docker/docker/pkg/authorization/middleware.go deleted file mode 100644 index 39c2dce85..000000000 --- a/vendor/github.com/docker/docker/pkg/authorization/middleware.go +++ /dev/null @@ -1,110 +0,0 @@ -package authorization // import "github.com/docker/docker/pkg/authorization" - -import ( - "context" - "net/http" - "sync" - - "github.com/docker/docker/pkg/plugingetter" - "github.com/sirupsen/logrus" -) - -// Middleware uses a list of plugins to -// handle authorization in the API requests. -type Middleware struct { - mu sync.Mutex - plugins []Plugin -} - -// NewMiddleware creates a new Middleware -// with a slice of plugins names. -func NewMiddleware(names []string, pg plugingetter.PluginGetter) *Middleware { - SetPluginGetter(pg) - return &Middleware{ - plugins: newPlugins(names), - } -} - -func (m *Middleware) getAuthzPlugins() []Plugin { - m.mu.Lock() - defer m.mu.Unlock() - return m.plugins -} - -// SetPlugins sets the plugin used for authorization -func (m *Middleware) SetPlugins(names []string) { - m.mu.Lock() - m.plugins = newPlugins(names) - m.mu.Unlock() -} - -// RemovePlugin removes a single plugin from this authz middleware chain -func (m *Middleware) RemovePlugin(name string) { - m.mu.Lock() - defer m.mu.Unlock() - plugins := m.plugins[:0] - for _, authPlugin := range m.plugins { - if authPlugin.Name() != name { - plugins = append(plugins, authPlugin) - } - } - m.plugins = plugins -} - -// WrapHandler returns a new handler function wrapping the previous one in the request chain. -func (m *Middleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - plugins := m.getAuthzPlugins() - if len(plugins) == 0 { - return handler(ctx, w, r, vars) - } - - user := "" - userAuthNMethod := "" - - // Default authorization using existing TLS connection credentials - // FIXME: Non trivial authorization mechanisms (such as advanced certificate validations, kerberos support - // and ldap) will be extracted using AuthN feature, which is tracked under: - // https://github.com/docker/docker/pull/20883 - if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 { - user = r.TLS.PeerCertificates[0].Subject.CommonName - userAuthNMethod = "TLS" - } - - authCtx := NewCtx(plugins, user, userAuthNMethod, r.Method, r.RequestURI) - - if err := authCtx.AuthZRequest(w, r); err != nil { - logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) - return err - } - - rw := NewResponseModifier(w) - - var errD error - - if errD = handler(ctx, rw, r, vars); errD != nil { - logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, errD) - } - - // There's a chance that the authCtx.plugins was updated. One of the reasons - // this can happen is when an authzplugin is disabled. - plugins = m.getAuthzPlugins() - if len(plugins) == 0 { - logrus.Debug("There are no authz plugins in the chain") - return nil - } - - authCtx.plugins = plugins - - if err := authCtx.AuthZResponse(rw, r); errD == nil && err != nil { - logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) - return err - } - - if errD != nil { - return errD - } - - return nil - } -} diff --git a/vendor/github.com/docker/docker/pkg/authorization/plugin.go b/vendor/github.com/docker/docker/pkg/authorization/plugin.go deleted file mode 100644 index 3316fd870..000000000 --- a/vendor/github.com/docker/docker/pkg/authorization/plugin.go +++ /dev/null @@ -1,118 +0,0 @@ -package authorization // import "github.com/docker/docker/pkg/authorization" - -import ( - "sync" - - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/plugins" -) - -// Plugin allows third party plugins to authorize requests and responses -// in the context of docker API -type Plugin interface { - // Name returns the registered plugin name - Name() string - - // AuthZRequest authorizes the request from the client to the daemon - AuthZRequest(*Request) (*Response, error) - - // AuthZResponse authorizes the response from the daemon to the client - AuthZResponse(*Request) (*Response, error) -} - -// newPlugins constructs and initializes the authorization plugins based on plugin names -func newPlugins(names []string) []Plugin { - plugins := []Plugin{} - pluginsMap := make(map[string]struct{}) - for _, name := range names { - if _, ok := pluginsMap[name]; ok { - continue - } - pluginsMap[name] = struct{}{} - plugins = append(plugins, newAuthorizationPlugin(name)) - } - return plugins -} - -var getter plugingetter.PluginGetter - -// SetPluginGetter sets the plugingetter -func SetPluginGetter(pg plugingetter.PluginGetter) { - getter = pg -} - -// GetPluginGetter gets the plugingetter -func GetPluginGetter() plugingetter.PluginGetter { - return getter -} - -// authorizationPlugin is an internal adapter to docker plugin system -type authorizationPlugin struct { - initErr error - plugin *plugins.Client - name string - once sync.Once -} - -func newAuthorizationPlugin(name string) Plugin { - return &authorizationPlugin{name: name} -} - -func (a *authorizationPlugin) Name() string { - return a.name -} - -// Set the remote for an authz pluginv2 -func (a *authorizationPlugin) SetName(remote string) { - a.name = remote -} - -func (a *authorizationPlugin) AuthZRequest(authReq *Request) (*Response, error) { - if err := a.initPlugin(); err != nil { - return nil, err - } - - authRes := &Response{} - if err := a.plugin.Call(AuthZApiRequest, authReq, authRes); err != nil { - return nil, err - } - - return authRes, nil -} - -func (a *authorizationPlugin) AuthZResponse(authReq *Request) (*Response, error) { - if err := a.initPlugin(); err != nil { - return nil, err - } - - authRes := &Response{} - if err := a.plugin.Call(AuthZApiResponse, authReq, authRes); err != nil { - return nil, err - } - - return authRes, nil -} - -// initPlugin initializes the authorization plugin if needed -func (a *authorizationPlugin) initPlugin() error { - // Lazy loading of plugins - a.once.Do(func() { - if a.plugin == nil { - var plugin plugingetter.CompatPlugin - var e error - - if pg := GetPluginGetter(); pg != nil { - plugin, e = pg.Get(a.name, AuthZApiImplements, plugingetter.Lookup) - a.SetName(plugin.Name()) - } else { - plugin, e = plugins.Get(a.name, AuthZApiImplements) - } - if e != nil { - a.initErr = e - return - } - a.plugin = plugin.Client() - } - }) - return a.initErr -} diff --git a/vendor/github.com/docker/docker/pkg/authorization/response.go b/vendor/github.com/docker/docker/pkg/authorization/response.go deleted file mode 100644 index 6b674bc29..000000000 --- a/vendor/github.com/docker/docker/pkg/authorization/response.go +++ /dev/null @@ -1,210 +0,0 @@ -package authorization // import "github.com/docker/docker/pkg/authorization" - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "net" - "net/http" - - "github.com/sirupsen/logrus" -) - -// ResponseModifier allows authorization plugins to read and modify the content of the http.response -type ResponseModifier interface { - http.ResponseWriter - http.Flusher - http.CloseNotifier - - // RawBody returns the current http content - RawBody() []byte - - // RawHeaders returns the current content of the http headers - RawHeaders() ([]byte, error) - - // StatusCode returns the current status code - StatusCode() int - - // OverrideBody replaces the body of the HTTP reply - OverrideBody(b []byte) - - // OverrideHeader replaces the headers of the HTTP reply - OverrideHeader(b []byte) error - - // OverrideStatusCode replaces the status code of the HTTP reply - OverrideStatusCode(statusCode int) - - // FlushAll flushes all data to the HTTP response - FlushAll() error - - // Hijacked indicates the response has been hijacked by the Docker daemon - Hijacked() bool -} - -// NewResponseModifier creates a wrapper to an http.ResponseWriter to allow inspecting and modifying the content -func NewResponseModifier(rw http.ResponseWriter) ResponseModifier { - return &responseModifier{rw: rw, header: make(http.Header)} -} - -const maxBufferSize = 64 * 1024 - -// responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore -// the http request/response from docker daemon -type responseModifier struct { - // The original response writer - rw http.ResponseWriter - // body holds the response body - body []byte - // header holds the response header - header http.Header - // statusCode holds the response status code - statusCode int - // hijacked indicates the request has been hijacked - hijacked bool -} - -func (rm *responseModifier) Hijacked() bool { - return rm.hijacked -} - -// WriterHeader stores the http status code -func (rm *responseModifier) WriteHeader(s int) { - - // Use original request if hijacked - if rm.hijacked { - rm.rw.WriteHeader(s) - return - } - - rm.statusCode = s -} - -// Header returns the internal http header -func (rm *responseModifier) Header() http.Header { - - // Use original header if hijacked - if rm.hijacked { - return rm.rw.Header() - } - - return rm.header -} - -// StatusCode returns the http status code -func (rm *responseModifier) StatusCode() int { - return rm.statusCode -} - -// OverrideBody replaces the body of the HTTP response -func (rm *responseModifier) OverrideBody(b []byte) { - rm.body = b -} - -// OverrideStatusCode replaces the status code of the HTTP response -func (rm *responseModifier) OverrideStatusCode(statusCode int) { - rm.statusCode = statusCode -} - -// OverrideHeader replaces the headers of the HTTP response -func (rm *responseModifier) OverrideHeader(b []byte) error { - header := http.Header{} - if err := json.Unmarshal(b, &header); err != nil { - return err - } - rm.header = header - return nil -} - -// Write stores the byte array inside content -func (rm *responseModifier) Write(b []byte) (int, error) { - if rm.hijacked { - return rm.rw.Write(b) - } - - if len(rm.body)+len(b) > maxBufferSize { - rm.Flush() - } - rm.body = append(rm.body, b...) - return len(b), nil -} - -// Body returns the response body -func (rm *responseModifier) RawBody() []byte { - return rm.body -} - -func (rm *responseModifier) RawHeaders() ([]byte, error) { - var b bytes.Buffer - if err := rm.header.Write(&b); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -// Hijack returns the internal connection of the wrapped http.ResponseWriter -func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) { - - rm.hijacked = true - rm.FlushAll() - - hijacker, ok := rm.rw.(http.Hijacker) - if !ok { - return nil, nil, fmt.Errorf("Internal response writer doesn't support the Hijacker interface") - } - return hijacker.Hijack() -} - -// CloseNotify uses the internal close notify API of the wrapped http.ResponseWriter -func (rm *responseModifier) CloseNotify() <-chan bool { - closeNotifier, ok := rm.rw.(http.CloseNotifier) - if !ok { - logrus.Error("Internal response writer doesn't support the CloseNotifier interface") - return nil - } - return closeNotifier.CloseNotify() -} - -// Flush uses the internal flush API of the wrapped http.ResponseWriter -func (rm *responseModifier) Flush() { - flusher, ok := rm.rw.(http.Flusher) - if !ok { - logrus.Error("Internal response writer doesn't support the Flusher interface") - return - } - - rm.FlushAll() - flusher.Flush() -} - -// FlushAll flushes all data to the HTTP response -func (rm *responseModifier) FlushAll() error { - // Copy the header - for k, vv := range rm.header { - for _, v := range vv { - rm.rw.Header().Add(k, v) - } - } - - // Copy the status code - // Also WriteHeader needs to be done after all the headers - // have been copied (above). - if rm.statusCode > 0 { - rm.rw.WriteHeader(rm.statusCode) - } - - var err error - if len(rm.body) > 0 { - // Write body - var n int - n, err = rm.rw.Write(rm.body) - // TODO(@cpuguy83): there is now a relatively small buffer limit, instead of discarding our buffer here and - // allocating again later this should just keep using the same buffer and track the buffer position (like a bytes.Buffer with a fixed size) - rm.body = rm.body[n:] - } - - // Clean previous data - rm.statusCode = 0 - rm.header = http.Header{} - return err -} diff --git a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go deleted file mode 100644 index 6bb285123..000000000 --- a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go +++ /dev/null @@ -1,49 +0,0 @@ -package broadcaster // import "github.com/docker/docker/pkg/broadcaster" - -import ( - "io" - "sync" -) - -// Unbuffered accumulates multiple io.WriteCloser by stream. -type Unbuffered struct { - mu sync.Mutex - writers []io.WriteCloser -} - -// Add adds new io.WriteCloser. -func (w *Unbuffered) Add(writer io.WriteCloser) { - w.mu.Lock() - w.writers = append(w.writers, writer) - w.mu.Unlock() -} - -// Write writes bytes to all writers. Failed writers will be evicted during -// this call. -func (w *Unbuffered) Write(p []byte) (n int, err error) { - w.mu.Lock() - var evict []int - for i, sw := range w.writers { - if n, err := sw.Write(p); err != nil || n != len(p) { - // On error, evict the writer - evict = append(evict, i) - } - } - for n, i := range evict { - w.writers = append(w.writers[:i-n], w.writers[i-n+1:]...) - } - w.mu.Unlock() - return len(p), nil -} - -// Clean closes and removes all writers. Last non-eol-terminated part of data -// will be saved. -func (w *Unbuffered) Clean() error { - w.mu.Lock() - for _, sw := range w.writers { - sw.Close() - } - w.writers = nil - w.mu.Unlock() - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go deleted file mode 100644 index 47c9a2b94..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go +++ /dev/null @@ -1,73 +0,0 @@ -package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" -) - -// NewArchiver returns a new Archiver which uses chrootarchive.Untar -func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { - if idMappings == nil { - idMappings = &idtools.IDMappings{} - } - return &archive.Archiver{ - Untar: Untar, - IDMappingsVar: idMappings, - } -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - if options == nil { - options = &archive.TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - rootIDs := idMappings.RootPair() - - dest = filepath.Clean(dest) - if _, err := os.Stat(dest); os.IsNotExist(err) { - if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { - return err - } - } - - r := ioutil.NopCloser(tarArchive) - if decompress { - decompressedArchive, err := archive.DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return invokeUnpack(r, dest, options) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go deleted file mode 100644 index 5df8afd66..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build !windows - -package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" -) - -// untar is the entry-point for docker-untar on re-exec. This is not used on -// Windows as it does not support chroot, hence no point sandboxing through -// chroot and rexec. -func untar() { - runtime.LockOSThread() - flag.Parse() - - var options *archive.TarOptions - - //read the options from the pipe "ExtraFiles" - if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { - fatal(err) - } - - if err := chroot(flag.Arg(0)); err != nil { - fatal(err) - } - - if err := archive.Unpack(os.Stdin, "/", options); err != nil { - fatal(err) - } - // fully consume stdin in case it is zero padded - if _, err := flush(os.Stdin); err != nil { - fatal(err) - } - - os.Exit(0) -} - -func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { - - // We can't pass a potentially large exclude list directly via cmd line - // because we easily overrun the kernel's max argument/environment size - // when the full image list is passed (e.g. when this is used by - // `docker load`). We will marshall the options via a pipe to the - // child - r, w, err := os.Pipe() - if err != nil { - return fmt.Errorf("Untar pipe failure: %v", err) - } - - cmd := reexec.Command("docker-untar", dest) - cmd.Stdin = decompressedArchive - - cmd.ExtraFiles = append(cmd.ExtraFiles, r) - output := bytes.NewBuffer(nil) - cmd.Stdout = output - cmd.Stderr = output - - if err := cmd.Start(); err != nil { - w.Close() - return fmt.Errorf("Untar error on re-exec cmd: %v", err) - } - //write the options to the pipe for the untar exec to read - if err := json.NewEncoder(w).Encode(options); err != nil { - w.Close() - return fmt.Errorf("Untar json encode to pipe failed: %v", err) - } - w.Close() - - if err := cmd.Wait(); err != nil { - // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, - // we need to exhaust `xz`'s output, otherwise the `xz` side will be - // pending on write pipe forever - io.Copy(ioutil.Discard, decompressedArchive) - - return fmt.Errorf("Error processing tar file(%v): %s", err, output) - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go deleted file mode 100644 index f2973132a..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go +++ /dev/null @@ -1,22 +0,0 @@ -package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" - -import ( - "io" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/longpath" -) - -// chroot is not supported by Windows -func chroot(path string) error { - return nil -} - -func invokeUnpack(decompressedArchive io.ReadCloser, - dest string, - options *archive.TarOptions) error { - // Windows is different to Linux here because Windows does not support - // chroot. Hence there is no point sandboxing a chrooted process to - // do the unpack. We call inline instead within the daemon process. - return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go deleted file mode 100644 index 9802fad51..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go +++ /dev/null @@ -1,113 +0,0 @@ -package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/mount" - rsystem "github.com/opencontainers/runc/libcontainer/system" - "golang.org/x/sys/unix" -) - -// chroot on linux uses pivot_root instead of chroot -// pivot_root takes a new root and an old root. -// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. -// New root is where the new rootfs is set to. -// Old root is removed after the call to pivot_root so it is no longer available under the new root. -// This is similar to how libcontainer sets up a container's rootfs -func chroot(path string) (err error) { - // if the engine is running in a user namespace we need to use actual chroot - if rsystem.RunningInUserNS() { - return realChroot(path) - } - if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { - return fmt.Errorf("Error creating mount namespace before pivot: %v", err) - } - - // Make everything in new ns slave. - // Don't use `private` here as this could race where the mountns gets a - // reference to a mount and an unmount from the host does not propagate, - // which could potentially cause transient errors for other operations, - // even though this should be relatively small window here `slave` should - // not cause any problems. - if err := mount.MakeRSlave("/"); err != nil { - return err - } - - if mounted, _ := mount.Mounted(path); !mounted { - if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { - return realChroot(path) - } - } - - // setup oldRoot for pivot_root - pivotDir, err := ioutil.TempDir(path, ".pivot_root") - if err != nil { - return fmt.Errorf("Error setting up pivot dir: %v", err) - } - - var mounted bool - defer func() { - if mounted { - // make sure pivotDir is not mounted before we try to remove it - if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil { - if err == nil { - err = errCleanup - } - return - } - } - - errCleanup := os.Remove(pivotDir) - // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful - // because we already cleaned it up on failed pivot_root - if errCleanup != nil && !os.IsNotExist(errCleanup) { - errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) - if err == nil { - err = errCleanup - } - } - }() - - if err := unix.PivotRoot(path, pivotDir); err != nil { - // If pivot fails, fall back to the normal chroot after cleaning up temp dir - if err := os.Remove(pivotDir); err != nil { - return fmt.Errorf("Error cleaning up after failed pivot: %v", err) - } - return realChroot(path) - } - mounted = true - - // This is the new path for where the old root (prior to the pivot) has been moved to - // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction - pivotDir = filepath.Join("/", filepath.Base(pivotDir)) - - if err := unix.Chdir("/"); err != nil { - return fmt.Errorf("Error changing to new root: %v", err) - } - - // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host - if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { - return fmt.Errorf("Error making old root private after pivot: %v", err) - } - - // Now unmount the old root so it's no longer visible from the new root - if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { - return fmt.Errorf("Error while unmounting old root after pivot: %v", err) - } - mounted = false - - return nil -} - -func realChroot(path string) error { - if err := unix.Chroot(path); err != nil { - return fmt.Errorf("Error after fallback to chroot: %v", err) - } - if err := unix.Chdir("/"); err != nil { - return fmt.Errorf("Error changing to new root after chroot: %v", err) - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go deleted file mode 100644 index 9a1ee5875..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows,!linux - -package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" - -import "golang.org/x/sys/unix" - -func chroot(path string) error { - if err := unix.Chroot(path); err != nil { - return err - } - return unix.Chdir("/") -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go deleted file mode 100644 index 7712cc17c..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go +++ /dev/null @@ -1,23 +0,0 @@ -package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" - -import ( - "io" - - "github.com/docker/docker/pkg/archive" -) - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can only be -// uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { - return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go deleted file mode 100644 index d96a09f8f..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go +++ /dev/null @@ -1,130 +0,0 @@ -//+build !windows - -package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/docker/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" -) - -type applyLayerResponse struct { - LayerSize int64 `json:"layerSize"` -} - -// applyLayer is the entry-point for docker-applylayer on re-exec. This is not -// used on Windows as it does not support chroot, hence no point sandboxing -// through chroot and rexec. -func applyLayer() { - - var ( - tmpDir string - err error - options *archive.TarOptions - ) - runtime.LockOSThread() - flag.Parse() - - inUserns := rsystem.RunningInUserNS() - if err := chroot(flag.Arg(0)); err != nil { - fatal(err) - } - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - defer system.Umask(oldmask) - if err != nil { - fatal(err) - } - - if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { - fatal(err) - } - - if inUserns { - options.InUserNS = true - } - - if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { - fatal(err) - } - - os.Setenv("TMPDIR", tmpDir) - size, err := archive.UnpackLayer("/", os.Stdin, options) - os.RemoveAll(tmpDir) - if err != nil { - fatal(err) - } - - encoder := json.NewEncoder(os.Stdout) - if err := encoder.Encode(applyLayerResponse{size}); err != nil { - fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) - } - - if _, err := flush(os.Stdin); err != nil { - fatal(err) - } - - os.Exit(0) -} - -// applyLayerHandler parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { - dest = filepath.Clean(dest) - if decompress { - decompressed, err := archive.DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompressed.Close() - - layer = decompressed - } - if options == nil { - options = &archive.TarOptions{} - if rsystem.RunningInUserNS() { - options.InUserNS = true - } - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - data, err := json.Marshal(options) - if err != nil { - return 0, fmt.Errorf("ApplyLayer json encode: %v", err) - } - - cmd := reexec.Command("docker-applyLayer", dest) - cmd.Stdin = layer - cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) - - outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) - cmd.Stdout, cmd.Stderr = outBuf, errBuf - - if err = cmd.Run(); err != nil { - return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) - } - - // Stdout should be a valid JSON struct representing an applyLayerResponse. - response := applyLayerResponse{} - decoder := json.NewDecoder(outBuf) - if err = decoder.Decode(&response); err != nil { - return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) - } - - return response.LayerSize, nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go deleted file mode 100644 index 8f3f3a4a8..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/longpath" -) - -// applyLayerHandler parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { - dest = filepath.Clean(dest) - - // Ensure it is a Windows-style volume path - dest = longpath.AddPrefix(dest) - - if decompress { - decompressed, err := archive.DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompressed.Close() - - layer = decompressed - } - - tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") - if err != nil { - return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) - } - - s, err := archive.UnpackLayer(dest, layer, nil) - os.RemoveAll(tmpDir) - if err != nil { - return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) - } - - return s, nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go deleted file mode 100644 index a15e4bb83..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build !windows - -package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" - -import ( - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/docker/docker/pkg/reexec" -) - -func init() { - reexec.Register("docker-applyLayer", applyLayer) - reexec.Register("docker-untar", untar) -} - -func fatal(err error) { - fmt.Fprint(os.Stderr, err) - os.Exit(1) -} - -// flush consumes all the bytes from the reader discarding -// any errors -func flush(r io.Reader) (bytes int64, err error) { - return io.Copy(ioutil.Discard, r) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go deleted file mode 100644 index 15ed874e7..000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" - -func init() { -} diff --git a/vendor/github.com/docker/docker/pkg/containerfs/archiver.go b/vendor/github.com/docker/docker/pkg/containerfs/archiver.go deleted file mode 100644 index 1fb7ff7bd..000000000 --- a/vendor/github.com/docker/docker/pkg/containerfs/archiver.go +++ /dev/null @@ -1,203 +0,0 @@ -package containerfs // import "github.com/docker/docker/pkg/containerfs" - -import ( - "archive/tar" - "fmt" - "io" - "os" - "path/filepath" - "time" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -// TarFunc provides a function definition for a custom Tar function -type TarFunc func(string, *archive.TarOptions) (io.ReadCloser, error) - -// UntarFunc provides a function definition for a custom Untar function -type UntarFunc func(io.Reader, string, *archive.TarOptions) error - -// Archiver provides a similar implementation of the archive.Archiver package with the rootfs abstraction -type Archiver struct { - SrcDriver Driver - DstDriver Driver - Tar TarFunc - Untar UntarFunc - IDMappingsVar *idtools.IDMappings -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func (archiver *Archiver) TarUntar(src, dst string) error { - logrus.Debugf("TarUntar(%s %s)", src, dst) - tarArchive, err := archiver.Tar(src, &archive.TarOptions{Compression: archive.Uncompressed}) - if err != nil { - return err - } - defer tarArchive.Close() - options := &archive.TarOptions{ - UIDMaps: archiver.IDMappingsVar.UIDs(), - GIDMaps: archiver.IDMappingsVar.GIDs(), - } - return archiver.Untar(tarArchive, dst, options) -} - -// UntarPath untar a file from path to a destination, src is the source tar file path. -func (archiver *Archiver) UntarPath(src, dst string) error { - tarArchive, err := archiver.SrcDriver.Open(src) - if err != nil { - return err - } - defer tarArchive.Close() - options := &archive.TarOptions{ - UIDMaps: archiver.IDMappingsVar.UIDs(), - GIDMaps: archiver.IDMappingsVar.GIDs(), - } - return archiver.Untar(tarArchive, dst, options) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func (archiver *Archiver) CopyWithTar(src, dst string) error { - srcSt, err := archiver.SrcDriver.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return archiver.CopyFileWithTar(src, dst) - } - - // if this archiver is set up with ID mapping we need to create - // the new destination directory with the remapped root UID/GID pair - // as owner - rootIDs := archiver.IDMappingsVar.RootPair() - // Create dst, copy src's content into it - if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { - return err - } - logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) - return archiver.TarUntar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) - srcDriver := archiver.SrcDriver - dstDriver := archiver.DstDriver - - srcSt, err := srcDriver.Stat(src) - if err != nil { - return err - } - - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - - // Clean up the trailing slash. This must be done in an operating - // system specific manner. - if dst[len(dst)-1] == dstDriver.Separator() { - dst = dstDriver.Join(dst, srcDriver.Base(src)) - } - - // The original call was system.MkdirAll, which is just - // os.MkdirAll on not-Windows and changed for Windows. - if dstDriver.OS() == "windows" { - // Now we are WCOW - if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { - return err - } - } else { - // We can just use the driver.MkdirAll function - if err := dstDriver.MkdirAll(dstDriver.Dir(dst), 0700); err != nil { - return err - } - } - - r, w := io.Pipe() - errC := make(chan error, 1) - - go func() { - defer close(errC) - errC <- func() error { - defer w.Close() - - srcF, err := srcDriver.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - hdr, err := tar.FileInfoHeader(srcSt, "") - if err != nil { - return err - } - hdr.Format = tar.FormatPAX - hdr.ModTime = hdr.ModTime.Truncate(time.Second) - hdr.AccessTime = time.Time{} - hdr.ChangeTime = time.Time{} - hdr.Name = dstDriver.Base(dst) - if dstDriver.OS() == "windows" { - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - } else { - hdr.Mode = int64(os.FileMode(hdr.Mode)) - } - - if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil { - return err - } - - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }() - }() - defer func() { - if er := <-errC; err == nil && er != nil { - err = er - } - }() - - err = archiver.Untar(r, dstDriver.Dir(dst), nil) - if err != nil { - r.CloseWithError(err) - } - return err -} - -// IDMappings returns the IDMappings of the archiver. -func (archiver *Archiver) IDMappings() *idtools.IDMappings { - return archiver.IDMappingsVar -} - -func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { - ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) - hdr.Uid, hdr.Gid = ids.UID, ids.GID - return err -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) - permPart := perm & os.ModePerm - noPermPart := perm &^ os.ModePerm - // Add the x bit: make everything +x from windows - permPart |= 0111 - permPart &= 0755 - - return noPermPart | permPart -} diff --git a/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go b/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go deleted file mode 100644 index 7bb1d8c36..000000000 --- a/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go +++ /dev/null @@ -1,87 +0,0 @@ -package containerfs // import "github.com/docker/docker/pkg/containerfs" - -import ( - "path/filepath" - "runtime" - - "github.com/containerd/continuity/driver" - "github.com/containerd/continuity/pathdriver" - "github.com/docker/docker/pkg/symlink" -) - -// ContainerFS is that represents a root file system -type ContainerFS interface { - // Path returns the path to the root. Note that this may not exist - // on the local system, so the continuity operations must be used - Path() string - - // ResolveScopedPath evaluates the given path scoped to the root. - // For example, if root=/a, and path=/b/c, then this function would return /a/b/c. - // If rawPath is true, then the function will not preform any modifications - // before path resolution. Otherwise, the function will clean the given path - // by making it an absolute path. - ResolveScopedPath(path string, rawPath bool) (string, error) - - Driver -} - -// Driver combines both continuity's Driver and PathDriver interfaces with a Platform -// field to determine the OS. -type Driver interface { - // OS returns the OS where the rootfs is located. Essentially, - // runtime.GOOS for everything aside from LCOW, which is "linux" - OS() string - - // Architecture returns the hardware architecture where the - // container is located. - Architecture() string - - // Driver & PathDriver provide methods to manipulate files & paths - driver.Driver - pathdriver.PathDriver -} - -// NewLocalContainerFS is a helper function to implement daemon's Mount interface -// when the graphdriver mount point is a local path on the machine. -func NewLocalContainerFS(path string) ContainerFS { - return &local{ - path: path, - Driver: driver.LocalDriver, - PathDriver: pathdriver.LocalPathDriver, - } -} - -// NewLocalDriver provides file and path drivers for a local file system. They are -// essentially a wrapper around the `os` and `filepath` functions. -func NewLocalDriver() Driver { - return &local{ - Driver: driver.LocalDriver, - PathDriver: pathdriver.LocalPathDriver, - } -} - -type local struct { - path string - driver.Driver - pathdriver.PathDriver -} - -func (l *local) Path() string { - return l.path -} - -func (l *local) ResolveScopedPath(path string, rawPath bool) (string, error) { - cleanedPath := path - if !rawPath { - cleanedPath = cleanScopedPath(path) - } - return symlink.FollowSymlinkInScope(filepath.Join(l.path, cleanedPath), l.path) -} - -func (l *local) OS() string { - return runtime.GOOS -} - -func (l *local) Architecture() string { - return runtime.GOARCH -} diff --git a/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go b/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go deleted file mode 100644 index 6a9945951..000000000 --- a/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package containerfs // import "github.com/docker/docker/pkg/containerfs" - -import "path/filepath" - -// cleanScopedPath preappends a to combine with a mnt path. -func cleanScopedPath(path string) string { - return filepath.Join(string(filepath.Separator), path) -} diff --git a/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go b/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go deleted file mode 100644 index 9fb708462..000000000 --- a/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go +++ /dev/null @@ -1,15 +0,0 @@ -package containerfs // import "github.com/docker/docker/pkg/containerfs" - -import "path/filepath" - -// cleanScopedPath removes the C:\ syntax, and prepares to combine -// with a volume path -func cleanScopedPath(path string) string { - if len(path) >= 2 { - c := path[0] - if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { - path = path[2:] - } - } - return filepath.Join(string(filepath.Separator), path) -} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go deleted file mode 100644 index 63243637a..000000000 --- a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper.go +++ /dev/null @@ -1,826 +0,0 @@ -// +build linux,cgo - -package devicemapper // import "github.com/docker/docker/pkg/devicemapper" - -import ( - "errors" - "fmt" - "os" - "runtime" - "unsafe" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// Same as DM_DEVICE_* enum values from libdevmapper.h -// nolint: deadcode -const ( - deviceCreate TaskType = iota - deviceReload - deviceRemove - deviceRemoveAll - deviceSuspend - deviceResume - deviceInfo - deviceDeps - deviceRename - deviceVersion - deviceStatus - deviceTable - deviceWaitevent - deviceList - deviceClear - deviceMknodes - deviceListVersions - deviceTargetMsg - deviceSetGeometry -) - -const ( - addNodeOnResume AddNodeType = iota - addNodeOnCreate -) - -// List of errors returned when using devicemapper. -var ( - ErrTaskRun = errors.New("dm_task_run failed") - ErrTaskSetName = errors.New("dm_task_set_name failed") - ErrTaskSetMessage = errors.New("dm_task_set_message failed") - ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") - ErrTaskSetRo = errors.New("dm_task_set_ro failed") - ErrTaskAddTarget = errors.New("dm_task_add_target failed") - ErrTaskSetSector = errors.New("dm_task_set_sector failed") - ErrTaskGetDeps = errors.New("dm_task_get_deps failed") - ErrTaskGetInfo = errors.New("dm_task_get_info failed") - ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") - ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") - ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") - ErrNilCookie = errors.New("cookie ptr can't be nil") - ErrGetBlockSize = errors.New("Can't get block size") - ErrUdevWait = errors.New("wait on udev cookie failed") - ErrSetDevDir = errors.New("dm_set_dev_dir failed") - ErrGetLibraryVersion = errors.New("dm_get_library_version failed") - ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") - ErrRunRemoveDevice = errors.New("running RemoveDevice failed") - ErrInvalidAddNode = errors.New("Invalid AddNode type") - ErrBusy = errors.New("Device is Busy") - ErrDeviceIDExists = errors.New("Device Id Exists") - ErrEnxio = errors.New("No such device or address") - ErrEnoData = errors.New("No data available") -) - -var ( - dmSawBusy bool - dmSawExist bool - dmSawEnxio bool // No Such Device or Address - dmSawEnoData bool // No data available -) - -type ( - // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl - // command to execute. - Task struct { - unmanaged *cdmTask - } - // Deps represents dependents (layer) of a device. - Deps struct { - Count uint32 - Filler uint32 - Device []uint64 - } - // Info represents information about a device. - Info struct { - Exists int - Suspended int - LiveTable int - InactiveTable int - OpenCount int32 - EventNr uint32 - Major uint32 - Minor uint32 - ReadOnly int - TargetCount int32 - DeferredRemove int - } - // TaskType represents a type of task - TaskType int - // AddNodeType represents a type of node to be added - AddNodeType int -) - -// DeviceIDExists returns whether error conveys the information about device Id already -// exist or not. This will be true if device creation or snap creation -// operation fails if device or snap device already exists in pool. -// Current implementation is little crude as it scans the error string -// for exact pattern match. Replacing it with more robust implementation -// is desirable. -func DeviceIDExists(err error) bool { - return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) -} - -func (t *Task) destroy() { - if t != nil { - DmTaskDestroy(t.unmanaged) - runtime.SetFinalizer(t, nil) - } -} - -// TaskCreateNamed is a convenience function for TaskCreate when a name -// will be set on the task as well -func TaskCreateNamed(t TaskType, name string) (*Task, error) { - task := TaskCreate(t) - if task == nil { - return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) - } - if err := task.setName(name); err != nil { - return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) - } - return task, nil -} - -// TaskCreate initializes a devicemapper task of tasktype -func TaskCreate(tasktype TaskType) *Task { - Ctask := DmTaskCreate(int(tasktype)) - if Ctask == nil { - return nil - } - task := &Task{unmanaged: Ctask} - runtime.SetFinalizer(task, (*Task).destroy) - return task -} - -func (t *Task) run() error { - if res := DmTaskRun(t.unmanaged); res != 1 { - return ErrTaskRun - } - runtime.KeepAlive(t) - return nil -} - -func (t *Task) setName(name string) error { - if res := DmTaskSetName(t.unmanaged, name); res != 1 { - return ErrTaskSetName - } - return nil -} - -func (t *Task) setMessage(message string) error { - if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { - return ErrTaskSetMessage - } - return nil -} - -func (t *Task) setSector(sector uint64) error { - if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { - return ErrTaskSetSector - } - return nil -} - -func (t *Task) setCookie(cookie *uint, flags uint16) error { - if cookie == nil { - return ErrNilCookie - } - if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { - return ErrTaskSetCookie - } - return nil -} - -func (t *Task) setAddNode(addNode AddNodeType) error { - if addNode != addNodeOnResume && addNode != addNodeOnCreate { - return ErrInvalidAddNode - } - if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { - return ErrTaskSetAddNode - } - return nil -} - -func (t *Task) setRo() error { - if res := DmTaskSetRo(t.unmanaged); res != 1 { - return ErrTaskSetRo - } - return nil -} - -func (t *Task) addTarget(start, size uint64, ttype, params string) error { - if res := DmTaskAddTarget(t.unmanaged, start, size, - ttype, params); res != 1 { - return ErrTaskAddTarget - } - return nil -} - -func (t *Task) getDeps() (*Deps, error) { - var deps *Deps - if deps = DmTaskGetDeps(t.unmanaged); deps == nil { - return nil, ErrTaskGetDeps - } - return deps, nil -} - -func (t *Task) getInfo() (*Info, error) { - info := &Info{} - if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { - return nil, ErrTaskGetInfo - } - return info, nil -} - -func (t *Task) getInfoWithDeferred() (*Info, error) { - info := &Info{} - if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { - return nil, ErrTaskGetInfo - } - return info, nil -} - -func (t *Task) getDriverVersion() (string, error) { - res := DmTaskGetDriverVersion(t.unmanaged) - if res == "" { - return "", ErrTaskGetDriverVersion - } - return res, nil -} - -func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, - length uint64, targetType string, params string) { - - return DmGetNextTarget(t.unmanaged, next, &start, &length, - &targetType, ¶ms), - start, length, targetType, params -} - -// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. -func UdevWait(cookie *uint) error { - if res := DmUdevWait(*cookie); res != 1 { - logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res) - return ErrUdevWait - } - return nil -} - -// SetDevDir sets the dev folder for the device mapper library (usually /dev). -func SetDevDir(dir string) error { - if res := DmSetDevDir(dir); res != 1 { - logrus.Debug("devicemapper: Error dm_set_dev_dir") - return ErrSetDevDir - } - return nil -} - -// GetLibraryVersion returns the device mapper library version. -func GetLibraryVersion() (string, error) { - var version string - if res := DmGetLibraryVersion(&version); res != 1 { - return "", ErrGetLibraryVersion - } - return version, nil -} - -// UdevSyncSupported returns whether device-mapper is able to sync with udev -// -// This is essential otherwise race conditions can arise where both udev and -// device-mapper attempt to create and destroy devices. -func UdevSyncSupported() bool { - return DmUdevGetSyncSupport() != 0 -} - -// UdevSetSyncSupport allows setting whether the udev sync should be enabled. -// The return bool indicates the state of whether the sync is enabled. -func UdevSetSyncSupport(enable bool) bool { - if enable { - DmUdevSetSyncSupport(1) - } else { - DmUdevSetSyncSupport(0) - } - - return UdevSyncSupported() -} - -// CookieSupported returns whether the version of device-mapper supports the -// use of cookie's in the tasks. -// This is largely a lower level call that other functions use. -func CookieSupported() bool { - return DmCookieSupported() != 0 -} - -// RemoveDevice is a useful helper for cleaning up a device. -func RemoveDevice(name string) error { - task, err := TaskCreateNamed(deviceRemove, name) - if task == nil { - return err - } - - cookie := new(uint) - if err := task.setCookie(cookie, 0); err != nil { - return fmt.Errorf("devicemapper: Can not set cookie: %s", err) - } - defer UdevWait(cookie) - - dmSawBusy = false // reset before the task is run - dmSawEnxio = false - if err = task.run(); err != nil { - if dmSawBusy { - return ErrBusy - } - if dmSawEnxio { - return ErrEnxio - } - return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) - } - - return nil -} - -// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. -func RemoveDeviceDeferred(name string) error { - logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) - defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) - task, err := TaskCreateNamed(deviceRemove, name) - if task == nil { - return err - } - - if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { - return ErrTaskDeferredRemove - } - - // set a task cookie and disable library fallback, or else libdevmapper will - // disable udev dm rules and delete the symlink under /dev/mapper by itself, - // even if the removal is deferred by the kernel. - cookie := new(uint) - flags := uint16(DmUdevDisableLibraryFallback) - if err := task.setCookie(cookie, flags); err != nil { - return fmt.Errorf("devicemapper: Can not set cookie: %s", err) - } - - // libdevmapper and udev relies on System V semaphore for synchronization, - // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. - // So these two function call must come in pairs, otherwise semaphores will - // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` - // will be reached, which will eventually make all following calls to 'task.SetCookie' - // fail. - // this call will not wait for the deferred removal's final executing, since no - // udev event will be generated, and the semaphore's value will not be incremented - // by udev, what UdevWait is just cleaning up the semaphore. - defer UdevWait(cookie) - - dmSawEnxio = false - if err = task.run(); err != nil { - if dmSawEnxio { - return ErrEnxio - } - return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) - } - - return nil -} - -// CancelDeferredRemove cancels a deferred remove for a device. -func CancelDeferredRemove(deviceName string) error { - task, err := TaskCreateNamed(deviceTargetMsg, deviceName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawBusy = false - dmSawEnxio = false - if err := task.run(); err != nil { - // A device might be being deleted already - if dmSawBusy { - return ErrBusy - } else if dmSawEnxio { - return ErrEnxio - } - return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) - - } - return nil -} - -// GetBlockDeviceSize returns the size of a block device identified by the specified file. -func GetBlockDeviceSize(file *os.File) (uint64, error) { - size, err := ioctlBlkGetSize64(file.Fd()) - if err != nil { - logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) - return 0, ErrGetBlockSize - } - return uint64(size), nil -} - -// BlockDeviceDiscard runs discard for the given path. -// This is used as a workaround for the kernel not discarding block so -// on the thin pool when we remove a thinp device, so we do it -// manually -func BlockDeviceDiscard(path string) error { - file, err := os.OpenFile(path, os.O_RDWR, 0) - if err != nil { - return err - } - defer file.Close() - - size, err := GetBlockDeviceSize(file) - if err != nil { - return err - } - - if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { - return err - } - - // Without this sometimes the remove of the device that happens after - // discard fails with EBUSY. - unix.Sync() - - return nil -} - -// CreatePool is the programmatic example of "dmsetup create". -// It creates a device with the specified poolName, data and metadata file and block size. -func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := TaskCreateNamed(deviceCreate, poolName) - if task == nil { - return err - } - - size, err := GetBlockDeviceSize(dataFile) - if err != nil { - return fmt.Errorf("devicemapper: Can't get data size %s", err) - } - - params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) - if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("devicemapper: Can't add target %s", err) - } - - cookie := new(uint) - flags := uint16(DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag) - if err := task.setCookie(cookie, flags); err != nil { - return fmt.Errorf("devicemapper: Can't set cookie %s", err) - } - defer UdevWait(cookie) - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) - } - - return nil -} - -// ReloadPool is the programmatic example of "dmsetup reload". -// It reloads the table with the specified poolName, data and metadata file and block size. -func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := TaskCreateNamed(deviceReload, poolName) - if task == nil { - return err - } - - size, err := GetBlockDeviceSize(dataFile) - if err != nil { - return fmt.Errorf("devicemapper: Can't get data size %s", err) - } - - params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) - if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("devicemapper: Can't add target %s", err) - } - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running ReloadPool %s", err) - } - - return nil -} - -// GetDeps is the programmatic example of "dmsetup deps". -// It outputs a list of devices referenced by the live table for the specified device. -func GetDeps(name string) (*Deps, error) { - task, err := TaskCreateNamed(deviceDeps, name) - if task == nil { - return nil, err - } - if err := task.run(); err != nil { - return nil, err - } - return task.getDeps() -} - -// GetInfo is the programmatic example of "dmsetup info". -// It outputs some brief information about the device. -func GetInfo(name string) (*Info, error) { - task, err := TaskCreateNamed(deviceInfo, name) - if task == nil { - return nil, err - } - if err := task.run(); err != nil { - return nil, err - } - return task.getInfo() -} - -// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. -// It outputs some brief information about the device. -func GetInfoWithDeferred(name string) (*Info, error) { - task, err := TaskCreateNamed(deviceInfo, name) - if task == nil { - return nil, err - } - if err := task.run(); err != nil { - return nil, err - } - return task.getInfoWithDeferred() -} - -// GetDriverVersion is the programmatic example of "dmsetup version". -// It outputs version information of the driver. -func GetDriverVersion() (string, error) { - task := TaskCreate(deviceVersion) - if task == nil { - return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") - } - if err := task.run(); err != nil { - return "", err - } - return task.getDriverVersion() -} - -// GetStatus is the programmatic example of "dmsetup status". -// It outputs status information for the specified device name. -func GetStatus(name string) (uint64, uint64, string, string, error) { - task, err := TaskCreateNamed(deviceStatus, name) - if task == nil { - logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) - return 0, 0, "", "", err - } - if err := task.run(); err != nil { - logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) - return 0, 0, "", "", err - } - - devinfo, err := task.getInfo() - if err != nil { - logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) - return 0, 0, "", "", err - } - if devinfo.Exists == 0 { - logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) - return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) - } - - _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) - return start, length, targetType, params, nil -} - -// GetTable is the programmatic example for "dmsetup table". -// It outputs the current table for the specified device name. -func GetTable(name string) (uint64, uint64, string, string, error) { - task, err := TaskCreateNamed(deviceTable, name) - if task == nil { - logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) - return 0, 0, "", "", err - } - if err := task.run(); err != nil { - logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) - return 0, 0, "", "", err - } - - devinfo, err := task.getInfo() - if err != nil { - logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) - return 0, 0, "", "", err - } - if devinfo.Exists == 0 { - logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) - return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) - } - - _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) - return start, length, targetType, params, nil -} - -// SetTransactionID sets a transaction id for the specified device name. -func SetTransactionID(poolName string, oldID uint64, newID uint64) error { - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) - } - return nil -} - -// SuspendDevice is the programmatic example of "dmsetup suspend". -// It suspends the specified device. -func SuspendDevice(name string) error { - task, err := TaskCreateNamed(deviceSuspend, name) - if task == nil { - return err - } - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) - } - return nil -} - -// ResumeDevice is the programmatic example of "dmsetup resume". -// It un-suspends the specified device. -func ResumeDevice(name string) error { - task, err := TaskCreateNamed(deviceResume, name) - if task == nil { - return err - } - - cookie := new(uint) - if err := task.setCookie(cookie, 0); err != nil { - return fmt.Errorf("devicemapper: Can't set cookie %s", err) - } - defer UdevWait(cookie) - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceResume %s", err) - } - - return nil -} - -// CreateDevice creates a device with the specified poolName with the specified device id. -func CreateDevice(poolName string, deviceID int) error { - logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawExist = false // reset before the task is run - if err := task.run(); err != nil { - // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. - if dmSawExist { - return ErrDeviceIDExists - } - - return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) - - } - return nil -} - -// DeleteDevice deletes a device with the specified poolName with the specified device id. -func DeleteDevice(poolName string, deviceID int) error { - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawBusy = false - dmSawEnoData = false - if err := task.run(); err != nil { - if dmSawBusy { - return ErrBusy - } - if dmSawEnoData { - logrus.Debugf("devicemapper: Device(id: %d) from pool(%s) does not exist", deviceID, poolName) - return nil - } - return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) - } - return nil -} - -// ActivateDevice activates the device identified by the specified -// poolName, name and deviceID with the specified size. -func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { - return activateDevice(poolName, name, deviceID, size, "") -} - -// ActivateDeviceWithExternal activates the device identified by the specified -// poolName, name and deviceID with the specified size. -func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { - return activateDevice(poolName, name, deviceID, size, external) -} - -func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { - task, err := TaskCreateNamed(deviceCreate, name) - if task == nil { - return err - } - - var params string - if len(external) > 0 { - params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) - } else { - params = fmt.Sprintf("%s %d", poolName, deviceID) - } - if err := task.addTarget(0, size/512, "thin", params); err != nil { - return fmt.Errorf("devicemapper: Can't add target %s", err) - } - if err := task.setAddNode(addNodeOnCreate); err != nil { - return fmt.Errorf("devicemapper: Can't add node %s", err) - } - - cookie := new(uint) - if err := task.setCookie(cookie, 0); err != nil { - return fmt.Errorf("devicemapper: Can't set cookie %s", err) - } - - defer UdevWait(cookie) - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) - } - - return nil -} - -// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active. -func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error { - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawExist = false // reset before the task is run - if err := task.run(); err != nil { - // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. - if dmSawExist { - return ErrDeviceIDExists - } - return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err) - } - - return nil -} - -// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, -func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { - devinfo, _ := GetInfo(baseName) - doSuspend := devinfo != nil && devinfo.Exists != 0 - - if doSuspend { - if err := SuspendDevice(baseName); err != nil { - return err - } - } - - if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { - if doSuspend { - if err2 := ResumeDevice(baseName); err2 != nil { - return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2) - } - } - return err - } - - if doSuspend { - if err := ResumeDevice(baseName); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go deleted file mode 100644 index 5a5773d44..000000000 --- a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_log.go +++ /dev/null @@ -1,124 +0,0 @@ -// +build linux,cgo - -package devicemapper // import "github.com/docker/docker/pkg/devicemapper" - -import "C" - -import ( - "fmt" - "strings" - - "github.com/sirupsen/logrus" -) - -// DevmapperLogger defines methods required to register as a callback for -// logging events received from devicemapper. Note that devicemapper will send -// *all* logs regardless to callbacks (including debug logs) so it's -// recommended to not spam the console with the outputs. -type DevmapperLogger interface { - // DMLog is the logging callback containing all of the information from - // devicemapper. The interface is identical to the C libdm counterpart. - DMLog(level int, file string, line int, dmError int, message string) -} - -// dmLogger is the current logger in use that is being forwarded our messages. -var dmLogger DevmapperLogger - -// LogInit changes the logging callback called after processing libdm logs for -// error message information. The default logger simply forwards all logs to -// logrus. Calling LogInit(nil) disables the calling of callbacks. -func LogInit(logger DevmapperLogger) { - dmLogger = logger -} - -// Due to the way cgo works this has to be in a separate file, as devmapper.go has -// definitions in the cgo block, which is incompatible with using "//export" - -// DevmapperLogCallback exports the devmapper log callback for cgo. Note that -// because we are using callbacks, this function will be called for *every* log -// in libdm (even debug ones because there's no way of setting the verbosity -// level for an external logging callback). -//export DevmapperLogCallback -func DevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) { - msg := C.GoString(message) - - // Track what errno libdm saw, because the library only gives us 0 or 1. - if level < LogLevelDebug { - if strings.Contains(msg, "busy") { - dmSawBusy = true - } - - if strings.Contains(msg, "File exists") { - dmSawExist = true - } - - if strings.Contains(msg, "No such device or address") { - dmSawEnxio = true - } - if strings.Contains(msg, "No data available") { - dmSawEnoData = true - } - } - - if dmLogger != nil { - dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) - } -} - -// DefaultLogger is the default logger used by pkg/devicemapper. It forwards -// all logs that are of higher or equal priority to the given level to the -// corresponding logrus level. -type DefaultLogger struct { - // Level corresponds to the highest libdm level that will be forwarded to - // logrus. In order to change this, register a new DefaultLogger. - Level int -} - -// DMLog is the logging callback containing all of the information from -// devicemapper. The interface is identical to the C libdm counterpart. -func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) { - if level <= l.Level { - // Forward the log to the correct logrus level, if allowed by dmLogLevel. - logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - switch level { - case LogLevelFatal, LogLevelErr: - logrus.Error(logMsg) - case LogLevelWarn: - logrus.Warn(logMsg) - case LogLevelNotice, LogLevelInfo: - logrus.Info(logMsg) - case LogLevelDebug: - logrus.Debug(logMsg) - default: - // Don't drop any "unknown" levels. - logrus.Info(logMsg) - } - } -} - -// registerLogCallback registers our own logging callback function for libdm -// (which is DevmapperLogCallback). -// -// Because libdm only gives us {0,1} error codes we need to parse the logs -// produced by libdm (to set dmSawBusy and so on). Note that by registering a -// callback using DevmapperLogCallback, libdm will no longer output logs to -// stderr so we have to log everything ourselves. None of this handling is -// optional because we depend on log callbacks to parse the logs, and if we -// don't forward the log information we'll be in a lot of trouble when -// debugging things. -func registerLogCallback() { - LogWithErrnoInit() -} - -func init() { - // Use the default logger by default. We only allow LogLevelFatal by - // default, because internally we mask a lot of libdm errors by retrying - // and similar tricks. Also, libdm is very chatty and we don't want to - // worry users for no reason. - dmLogger = DefaultLogger{ - Level: LogLevelFatal, - } - - // Register as early as possible so we don't miss anything. - registerLogCallback() -} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go deleted file mode 100644 index 0b88f4969..000000000 --- a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper.go +++ /dev/null @@ -1,252 +0,0 @@ -// +build linux,cgo - -package devicemapper // import "github.com/docker/docker/pkg/devicemapper" - -/* -#define _GNU_SOURCE -#include -#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? - -// FIXME: Can't we find a way to do the logging in pure Go? -extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); - -static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) -{ - char *buffer = NULL; - va_list ap; - int ret; - - va_start(ap, f); - ret = vasprintf(&buffer, f, ap); - va_end(ap); - if (ret < 0) { - // memory allocation failed -- should never happen? - return; - } - - DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); - free(buffer); -} - -static void log_with_errno_init() -{ - dm_log_with_errno_init(log_cb); -} -*/ -import "C" - -import ( - "reflect" - "unsafe" -) - -type ( - cdmTask C.struct_dm_task -) - -// IOCTL consts -const ( - BlkGetSize64 = C.BLKGETSIZE64 - BlkDiscard = C.BLKDISCARD -) - -// Devicemapper cookie flags. -const ( - DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG - DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG - DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG - DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK -) - -// DeviceMapper mapped functions. -var ( - DmGetLibraryVersion = dmGetLibraryVersionFct - DmGetNextTarget = dmGetNextTargetFct - DmSetDevDir = dmSetDevDirFct - DmTaskAddTarget = dmTaskAddTargetFct - DmTaskCreate = dmTaskCreateFct - DmTaskDestroy = dmTaskDestroyFct - DmTaskGetDeps = dmTaskGetDepsFct - DmTaskGetInfo = dmTaskGetInfoFct - DmTaskGetDriverVersion = dmTaskGetDriverVersionFct - DmTaskRun = dmTaskRunFct - DmTaskSetAddNode = dmTaskSetAddNodeFct - DmTaskSetCookie = dmTaskSetCookieFct - DmTaskSetMessage = dmTaskSetMessageFct - DmTaskSetName = dmTaskSetNameFct - DmTaskSetRo = dmTaskSetRoFct - DmTaskSetSector = dmTaskSetSectorFct - DmUdevWait = dmUdevWaitFct - DmUdevSetSyncSupport = dmUdevSetSyncSupportFct - DmUdevGetSyncSupport = dmUdevGetSyncSupportFct - DmCookieSupported = dmCookieSupportedFct - LogWithErrnoInit = logWithErrnoInitFct - DmTaskDeferredRemove = dmTaskDeferredRemoveFct - DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct -) - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func dmTaskDestroyFct(task *cdmTask) { - C.dm_task_destroy((*C.struct_dm_task)(task)) -} - -func dmTaskCreateFct(taskType int) *cdmTask { - return (*cdmTask)(C.dm_task_create(C.int(taskType))) -} - -func dmTaskRunFct(task *cdmTask) int { - ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) - return int(ret) -} - -func dmTaskSetNameFct(task *cdmTask, name string) int { - Cname := C.CString(name) - defer free(Cname) - - return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) -} - -func dmTaskSetMessageFct(task *cdmTask, message string) int { - Cmessage := C.CString(message) - defer free(Cmessage) - - return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) -} - -func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { - return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) -} - -func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { - cCookie := C.uint32_t(*cookie) - defer func() { - *cookie = uint(cCookie) - }() - return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) -} - -func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { - return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) -} - -func dmTaskSetRoFct(task *cdmTask) int { - return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) -} - -func dmTaskAddTargetFct(task *cdmTask, - start, size uint64, ttype, params string) int { - - Cttype := C.CString(ttype) - defer free(Cttype) - - Cparams := C.CString(params) - defer free(Cparams) - - return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) -} - -func dmTaskGetDepsFct(task *cdmTask) *Deps { - Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) - if Cdeps == nil { - return nil - } - - // golang issue: https://github.com/golang/go/issues/11925 - hdr := reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), - Len: int(Cdeps.count), - Cap: int(Cdeps.count), - } - devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) - - deps := &Deps{ - Count: uint32(Cdeps.count), - Filler: uint32(Cdeps.filler), - } - for _, device := range devices { - deps.Device = append(deps.Device, uint64(device)) - } - return deps -} - -func dmTaskGetInfoFct(task *cdmTask, info *Info) int { - Cinfo := C.struct_dm_info{} - defer func() { - info.Exists = int(Cinfo.exists) - info.Suspended = int(Cinfo.suspended) - info.LiveTable = int(Cinfo.live_table) - info.InactiveTable = int(Cinfo.inactive_table) - info.OpenCount = int32(Cinfo.open_count) - info.EventNr = uint32(Cinfo.event_nr) - info.Major = uint32(Cinfo.major) - info.Minor = uint32(Cinfo.minor) - info.ReadOnly = int(Cinfo.read_only) - info.TargetCount = int32(Cinfo.target_count) - }() - return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) -} - -func dmTaskGetDriverVersionFct(task *cdmTask) string { - buffer := C.malloc(128) - defer C.free(buffer) - res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) - if res == 0 { - return "" - } - return C.GoString((*C.char)(buffer)) -} - -func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { - var ( - Cstart, Clength C.uint64_t - CtargetType, Cparams *C.char - ) - defer func() { - *start = uint64(Cstart) - *length = uint64(Clength) - *target = C.GoString(CtargetType) - *params = C.GoString(Cparams) - }() - - nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) - return nextp -} - -func dmUdevSetSyncSupportFct(syncWithUdev int) { - C.dm_udev_set_sync_support(C.int(syncWithUdev)) -} - -func dmUdevGetSyncSupportFct() int { - return int(C.dm_udev_get_sync_support()) -} - -func dmUdevWaitFct(cookie uint) int { - return int(C.dm_udev_wait(C.uint32_t(cookie))) -} - -func dmCookieSupportedFct() int { - return int(C.dm_cookie_supported()) -} - -func logWithErrnoInitFct() { - C.log_with_errno_init() -} - -func dmSetDevDirFct(dir string) int { - Cdir := C.CString(dir) - defer free(Cdir) - - return int(C.dm_set_dev_dir(Cdir)) -} - -func dmGetLibraryVersionFct(version *string) int { - buffer := C.CString(string(make([]byte, 128))) - defer free(buffer) - defer func() { - *version = C.GoString(buffer) - }() - return int(C.dm_get_library_version(buffer, 128)) -} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic.go deleted file mode 100644 index 8a1098f7d..000000000 --- a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux,cgo,!static_build - -package devicemapper // import "github.com/docker/docker/pkg/devicemapper" - -// #cgo pkg-config: devmapper -import "C" diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic_deferred_remove.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic_deferred_remove.go deleted file mode 100644 index 3d3021c4e..000000000 --- a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic_deferred_remove.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build linux,cgo,!static_build -// +build !libdm_dlsym_deferred_remove,!libdm_no_deferred_remove - -package devicemapper // import "github.com/docker/docker/pkg/devicemapper" - -/* -#include -*/ -import "C" - -// LibraryDeferredRemovalSupport tells if the feature is supported by the -// current Docker invocation. -const LibraryDeferredRemovalSupport = true - -func dmTaskDeferredRemoveFct(task *cdmTask) int { - return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) -} - -func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { - Cinfo := C.struct_dm_info{} - defer func() { - info.Exists = int(Cinfo.exists) - info.Suspended = int(Cinfo.suspended) - info.LiveTable = int(Cinfo.live_table) - info.InactiveTable = int(Cinfo.inactive_table) - info.OpenCount = int32(Cinfo.open_count) - info.EventNr = uint32(Cinfo.event_nr) - info.Major = uint32(Cinfo.major) - info.Minor = uint32(Cinfo.minor) - info.ReadOnly = int(Cinfo.read_only) - info.TargetCount = int32(Cinfo.target_count) - info.DeferredRemove = int(Cinfo.deferred_remove) - }() - return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) -} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic_dlsym_deferred_remove.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic_dlsym_deferred_remove.go deleted file mode 100644 index 5dfb369f1..000000000 --- a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_dynamic_dlsym_deferred_remove.go +++ /dev/null @@ -1,128 +0,0 @@ -// +build linux,cgo,!static_build -// +build libdm_dlsym_deferred_remove,!libdm_no_deferred_remove - -package devicemapper - -/* -#cgo LDFLAGS: -ldl -#include -#include -#include - -// Yes, I know this looks scary. In order to be able to fill our own internal -// dm_info with deferred_remove we need to have a struct definition that is -// correct (regardless of the version of libdm that was used to compile it). To -// this end, we define struct_backport_dm_info. This code comes from lvm2, and -// I have verified that the structure has only ever had elements *appended* to -// it (since 2001). -// -// It is also important that this structure be _larger_ than the dm_info that -// libdevmapper expected. Otherwise libdm might try to write to memory it -// shouldn't (they don't have a "known size" API). -struct backport_dm_info { - int exists; - int suspended; - int live_table; - int inactive_table; - int32_t open_count; - uint32_t event_nr; - uint32_t major; - uint32_t minor; - int read_only; - - int32_t target_count; - - int deferred_remove; - int internal_suspend; - - // Padding, purely for our own safety. This is to avoid cases where libdm - // was updated underneath us and we call into dm_task_get_info() with too - // small of a buffer. - char _[512]; -}; - -// We have to wrap this in CGo, because Go really doesn't like function pointers. -int call_dm_task_deferred_remove(void *fn, struct dm_task *task) -{ - int (*_dm_task_deferred_remove)(struct dm_task *task) = fn; - return _dm_task_deferred_remove(task); -} -*/ -import "C" - -import ( - "unsafe" - - "github.com/sirupsen/logrus" -) - -// dm_task_deferred_remove is not supported by all distributions, due to -// out-dated versions of devicemapper. However, in the case where the -// devicemapper library was updated without rebuilding Docker (which can happen -// in some distributions) then we should attempt to dynamically load the -// relevant object rather than try to link to it. - -// dmTaskDeferredRemoveFct is a "bound" version of dm_task_deferred_remove. -// It is nil if dm_task_deferred_remove was not found in the libdevmapper that -// is currently loaded. -var dmTaskDeferredRemovePtr unsafe.Pointer - -// LibraryDeferredRemovalSupport tells if the feature is supported by the -// current Docker invocation. This value is fixed during init. -var LibraryDeferredRemovalSupport bool - -func init() { - // Clear any errors. - var err *C.char - C.dlerror() - - // The symbol we want to fetch. - symName := C.CString("dm_task_deferred_remove") - defer C.free(unsafe.Pointer(symName)) - - // See if we can find dm_task_deferred_remove. Since we already are linked - // to libdevmapper, we can search our own address space (rather than trying - // to guess what libdevmapper is called). We use NULL here, as RTLD_DEFAULT - // is not available in CGO (even if you set _GNU_SOURCE for some reason). - // The semantics are identical on glibc. - sym := C.dlsym(nil, symName) - err = C.dlerror() - if err != nil { - logrus.Debugf("devmapper: could not load dm_task_deferred_remove: %s", C.GoString(err)) - return - } - - logrus.Debugf("devmapper: found dm_task_deferred_remove at %x", uintptr(sym)) - dmTaskDeferredRemovePtr = sym - LibraryDeferredRemovalSupport = true -} - -func dmTaskDeferredRemoveFct(task *cdmTask) int { - sym := dmTaskDeferredRemovePtr - if sym == nil || !LibraryDeferredRemovalSupport { - return -1 - } - return int(C.call_dm_task_deferred_remove(sym, (*C.struct_dm_task)(task))) -} - -func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { - if !LibraryDeferredRemovalSupport { - return -1 - } - - Cinfo := C.struct_backport_dm_info{} - defer func() { - info.Exists = int(Cinfo.exists) - info.Suspended = int(Cinfo.suspended) - info.LiveTable = int(Cinfo.live_table) - info.InactiveTable = int(Cinfo.inactive_table) - info.OpenCount = int32(Cinfo.open_count) - info.EventNr = uint32(Cinfo.event_nr) - info.Major = uint32(Cinfo.major) - info.Minor = uint32(Cinfo.minor) - info.ReadOnly = int(Cinfo.read_only) - info.TargetCount = int32(Cinfo.target_count) - info.DeferredRemove = int(Cinfo.deferred_remove) - }() - return int(C.dm_task_get_info((*C.struct_dm_task)(task), (*C.struct_dm_info)(unsafe.Pointer(&Cinfo)))) -} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go deleted file mode 100644 index 8889f0f46..000000000 --- a/vendor/github.com/docker/docker/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build linux,cgo -// +build !libdm_dlsym_deferred_remove,libdm_no_deferred_remove - -package devicemapper // import "github.com/docker/docker/pkg/devicemapper" - -// LibraryDeferredRemovalSupport tells if the feature is supported by the -// current Docker invocation. -const LibraryDeferredRemovalSupport = false - -func dmTaskDeferredRemoveFct(task *cdmTask) int { - // Error. Nobody should be calling it. - return -1 -} - -func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { - return -1 -} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go b/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go deleted file mode 100644 index ec5a0b33b..000000000 --- a/vendor/github.com/docker/docker/pkg/devicemapper/ioctl.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build linux,cgo - -package devicemapper // import "github.com/docker/docker/pkg/devicemapper" - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -func ioctlBlkGetSize64(fd uintptr) (int64, error) { - var size int64 - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { - return 0, err - } - return size, nil -} - -func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { - var r [2]uint64 - r[0] = offset - r[1] = length - - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/devicemapper/log.go b/vendor/github.com/docker/docker/pkg/devicemapper/log.go deleted file mode 100644 index dd330ba4f..000000000 --- a/vendor/github.com/docker/docker/pkg/devicemapper/log.go +++ /dev/null @@ -1,11 +0,0 @@ -package devicemapper // import "github.com/docker/docker/pkg/devicemapper" - -// definitions from lvm2 lib/log/log.h -const ( - LogLevelFatal = 2 + iota // _LOG_FATAL - LogLevelErr // _LOG_ERR - LogLevelWarn // _LOG_WARN - LogLevelNotice // _LOG_NOTICE - LogLevelInfo // _LOG_INFO - LogLevelDebug // _LOG_DEBUG -) diff --git a/vendor/github.com/docker/docker/pkg/directory/directory.go b/vendor/github.com/docker/docker/pkg/directory/directory.go deleted file mode 100644 index 51d4a6ea2..000000000 --- a/vendor/github.com/docker/docker/pkg/directory/directory.go +++ /dev/null @@ -1,26 +0,0 @@ -package directory // import "github.com/docker/docker/pkg/directory" - -import ( - "io/ioutil" - "os" - "path/filepath" -) - -// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path -func MoveToSubdir(oldpath, subdir string) error { - - infos, err := ioutil.ReadDir(oldpath) - if err != nil { - return err - } - for _, info := range infos { - if info.Name() != subdir { - oldName := filepath.Join(oldpath, info.Name()) - newName := filepath.Join(oldpath, subdir, info.Name()) - if err := os.Rename(oldName, newName); err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_unix.go b/vendor/github.com/docker/docker/pkg/directory/directory_unix.go deleted file mode 100644 index f56dd7a8f..000000000 --- a/vendor/github.com/docker/docker/pkg/directory/directory_unix.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build linux freebsd darwin - -package directory // import "github.com/docker/docker/pkg/directory" - -import ( - "context" - "os" - "path/filepath" - "syscall" -) - -// Size walks a directory tree and returns its total size in bytes. -func Size(ctx context.Context, dir string) (size int64, err error) { - data := make(map[uint64]struct{}) - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { - if err != nil { - // if dir does not exist, Size() returns the error. - // if dir/x disappeared while walking, Size() ignores dir/x. - if os.IsNotExist(err) && d != dir { - return nil - } - return err - } - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - // Ignore directory sizes - if fileInfo == nil { - return nil - } - - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil - } - - // Check inode to handle hard links correctly - inode := fileInfo.Sys().(*syscall.Stat_t).Ino - // inode is not a uint64 on all platforms. Cast it to avoid issues. - if _, exists := data[inode]; exists { - return nil - } - // inode is not a uint64 on all platforms. Cast it to avoid issues. - data[inode] = struct{}{} - - size += s - - return nil - }) - return -} diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_windows.go b/vendor/github.com/docker/docker/pkg/directory/directory_windows.go deleted file mode 100644 index f07f24188..000000000 --- a/vendor/github.com/docker/docker/pkg/directory/directory_windows.go +++ /dev/null @@ -1,42 +0,0 @@ -package directory // import "github.com/docker/docker/pkg/directory" - -import ( - "context" - "os" - "path/filepath" -) - -// Size walks a directory tree and returns its total size in bytes. -func Size(ctx context.Context, dir string) (size int64, err error) { - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { - if err != nil { - // if dir does not exist, Size() returns the error. - // if dir/x disappeared while walking, Size() ignores dir/x. - if os.IsNotExist(err) && d != dir { - return nil - } - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - // Ignore directory sizes - if fileInfo == nil { - return nil - } - - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil - } - - size += s - - return nil - }) - return -} diff --git a/vendor/github.com/docker/docker/pkg/discovery/backends.go b/vendor/github.com/docker/docker/pkg/discovery/backends.go deleted file mode 100644 index 1d038285a..000000000 --- a/vendor/github.com/docker/docker/pkg/discovery/backends.go +++ /dev/null @@ -1,107 +0,0 @@ -package discovery // import "github.com/docker/docker/pkg/discovery" - -import ( - "fmt" - "net" - "strings" - "time" - - "github.com/sirupsen/logrus" -) - -var ( - // Backends is a global map of discovery backends indexed by their - // associated scheme. - backends = make(map[string]Backend) -) - -// Register makes a discovery backend available by the provided scheme. -// If Register is called twice with the same scheme an error is returned. -func Register(scheme string, d Backend) error { - if _, exists := backends[scheme]; exists { - return fmt.Errorf("scheme already registered %s", scheme) - } - logrus.WithField("name", scheme).Debugf("Registering discovery service") - backends[scheme] = d - return nil -} - -func parse(rawurl string) (string, string) { - parts := strings.SplitN(rawurl, "://", 2) - - // nodes:port,node2:port => nodes://node1:port,node2:port - if len(parts) == 1 { - return "nodes", parts[0] - } - return parts[0], parts[1] -} - -// ParseAdvertise parses the --cluster-advertise daemon config which accepts -// : or : -func ParseAdvertise(advertise string) (string, error) { - var ( - iface *net.Interface - addrs []net.Addr - err error - ) - - addr, port, err := net.SplitHostPort(advertise) - - if err != nil { - return "", fmt.Errorf("invalid --cluster-advertise configuration: %s: %v", advertise, err) - } - - ip := net.ParseIP(addr) - // If it is a valid ip-address, use it as is - if ip != nil { - return advertise, nil - } - - // If advertise is a valid interface name, get the valid IPv4 address and use it to advertise - ifaceName := addr - iface, err = net.InterfaceByName(ifaceName) - if err != nil { - return "", fmt.Errorf("invalid cluster advertise IP address or interface name (%s) : %v", advertise, err) - } - - addrs, err = iface.Addrs() - if err != nil { - return "", fmt.Errorf("unable to get advertise IP address from interface (%s) : %v", advertise, err) - } - - if len(addrs) == 0 { - return "", fmt.Errorf("no available advertise IP address in interface (%s)", advertise) - } - - addr = "" - for _, a := range addrs { - ip, _, err := net.ParseCIDR(a.String()) - if err != nil { - return "", fmt.Errorf("error deriving advertise ip-address in interface (%s) : %v", advertise, err) - } - if ip.To4() == nil || ip.IsLoopback() { - continue - } - addr = ip.String() - break - } - if addr == "" { - return "", fmt.Errorf("could not find a valid ip-address in interface %s", advertise) - } - - addr = net.JoinHostPort(addr, port) - return addr, nil -} - -// New returns a new Discovery given a URL, heartbeat and ttl settings. -// Returns an error if the URL scheme is not supported. -func New(rawurl string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) (Backend, error) { - scheme, uri := parse(rawurl) - if backend, exists := backends[scheme]; exists { - logrus.WithFields(logrus.Fields{"name": scheme, "uri": uri}).Debugf("Initializing discovery service") - err := backend.Initialize(uri, heartbeat, ttl, clusterOpts) - return backend, err - } - - return nil, ErrNotSupported -} diff --git a/vendor/github.com/docker/docker/pkg/discovery/discovery.go b/vendor/github.com/docker/docker/pkg/discovery/discovery.go deleted file mode 100644 index 828c5ca48..000000000 --- a/vendor/github.com/docker/docker/pkg/discovery/discovery.go +++ /dev/null @@ -1,35 +0,0 @@ -package discovery // import "github.com/docker/docker/pkg/discovery" - -import ( - "errors" - "time" -) - -var ( - // ErrNotSupported is returned when a discovery service is not supported. - ErrNotSupported = errors.New("discovery service not supported") - - // ErrNotImplemented is returned when discovery feature is not implemented - // by discovery backend. - ErrNotImplemented = errors.New("not implemented in this discovery service") -) - -// Watcher provides watching over a cluster for nodes joining and leaving. -type Watcher interface { - // Watch the discovery for entry changes. - // Returns a channel that will receive changes or an error. - // Providing a non-nil stopCh can be used to stop watching. - Watch(stopCh <-chan struct{}) (<-chan Entries, <-chan error) -} - -// Backend is implemented by discovery backends which manage cluster entries. -type Backend interface { - // Watcher must be provided by every backend. - Watcher - - // Initialize the discovery with URIs, a heartbeat, a ttl and optional settings. - Initialize(string, time.Duration, time.Duration, map[string]string) error - - // Register to the discovery. - Register(string) error -} diff --git a/vendor/github.com/docker/docker/pkg/discovery/entry.go b/vendor/github.com/docker/docker/pkg/discovery/entry.go deleted file mode 100644 index be06c7578..000000000 --- a/vendor/github.com/docker/docker/pkg/discovery/entry.go +++ /dev/null @@ -1,94 +0,0 @@ -package discovery // import "github.com/docker/docker/pkg/discovery" - -import "net" - -// NewEntry creates a new entry. -func NewEntry(url string) (*Entry, error) { - host, port, err := net.SplitHostPort(url) - if err != nil { - return nil, err - } - return &Entry{host, port}, nil -} - -// An Entry represents a host. -type Entry struct { - Host string - Port string -} - -// Equals returns true if cmp contains the same data. -func (e *Entry) Equals(cmp *Entry) bool { - return e.Host == cmp.Host && e.Port == cmp.Port -} - -// String returns the string form of an entry. -func (e *Entry) String() string { - return net.JoinHostPort(e.Host, e.Port) -} - -// Entries is a list of *Entry with some helpers. -type Entries []*Entry - -// Equals returns true if cmp contains the same data. -func (e Entries) Equals(cmp Entries) bool { - // Check if the file has really changed. - if len(e) != len(cmp) { - return false - } - for i := range e { - if !e[i].Equals(cmp[i]) { - return false - } - } - return true -} - -// Contains returns true if the Entries contain a given Entry. -func (e Entries) Contains(entry *Entry) bool { - for _, curr := range e { - if curr.Equals(entry) { - return true - } - } - return false -} - -// Diff compares two entries and returns the added and removed entries. -func (e Entries) Diff(cmp Entries) (Entries, Entries) { - added := Entries{} - for _, entry := range cmp { - if !e.Contains(entry) { - added = append(added, entry) - } - } - - removed := Entries{} - for _, entry := range e { - if !cmp.Contains(entry) { - removed = append(removed, entry) - } - } - - return added, removed -} - -// CreateEntries returns an array of entries based on the given addresses. -func CreateEntries(addrs []string) (Entries, error) { - entries := Entries{} - if addrs == nil { - return entries, nil - } - - for _, addr := range addrs { - if len(addr) == 0 { - continue - } - entry, err := NewEntry(addr) - if err != nil { - return nil, err - } - entries = append(entries, entry) - } - return entries, nil -} diff --git a/vendor/github.com/docker/docker/pkg/discovery/file/file.go b/vendor/github.com/docker/docker/pkg/discovery/file/file.go deleted file mode 100644 index 1494af485..000000000 --- a/vendor/github.com/docker/docker/pkg/discovery/file/file.go +++ /dev/null @@ -1,107 +0,0 @@ -package file // import "github.com/docker/docker/pkg/discovery/file" - -import ( - "fmt" - "io/ioutil" - "strings" - "time" - - "github.com/docker/docker/pkg/discovery" -) - -// Discovery is exported -type Discovery struct { - heartbeat time.Duration - path string -} - -func init() { - Init() -} - -// Init is exported -func Init() { - discovery.Register("file", &Discovery{}) -} - -// Initialize is exported -func (s *Discovery) Initialize(path string, heartbeat time.Duration, ttl time.Duration, _ map[string]string) error { - s.path = path - s.heartbeat = heartbeat - return nil -} - -func parseFileContent(content []byte) []string { - var result []string - for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") { - line = strings.TrimSpace(line) - // Ignoring line starts with # - if strings.HasPrefix(line, "#") { - continue - } - // Inlined # comment also ignored. - if strings.Contains(line, "#") { - line = line[0:strings.Index(line, "#")] - // Trim additional spaces caused by above stripping. - line = strings.TrimSpace(line) - } - result = append(result, discovery.Generate(line)...) - } - return result -} - -func (s *Discovery) fetch() (discovery.Entries, error) { - fileContent, err := ioutil.ReadFile(s.path) - if err != nil { - return nil, fmt.Errorf("failed to read '%s': %v", s.path, err) - } - return discovery.CreateEntries(parseFileContent(fileContent)) -} - -// Watch is exported -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - errCh := make(chan error) - ticker := time.NewTicker(s.heartbeat) - - go func() { - defer close(errCh) - defer close(ch) - - // Send the initial entries if available. - currentEntries, err := s.fetch() - if err != nil { - errCh <- err - } else { - ch <- currentEntries - } - - // Periodically send updates. - for { - select { - case <-ticker.C: - newEntries, err := s.fetch() - if err != nil { - errCh <- err - continue - } - - // Check if the file has really changed. - if !newEntries.Equals(currentEntries) { - ch <- newEntries - } - currentEntries = newEntries - case <-stopCh: - ticker.Stop() - return - } - } - }() - - return ch, errCh -} - -// Register is exported -func (s *Discovery) Register(addr string) error { - return discovery.ErrNotImplemented -} diff --git a/vendor/github.com/docker/docker/pkg/discovery/generator.go b/vendor/github.com/docker/docker/pkg/discovery/generator.go deleted file mode 100644 index 788015fe2..000000000 --- a/vendor/github.com/docker/docker/pkg/discovery/generator.go +++ /dev/null @@ -1,35 +0,0 @@ -package discovery // import "github.com/docker/docker/pkg/discovery" - -import ( - "fmt" - "regexp" - "strconv" -) - -// Generate takes care of IP generation -func Generate(pattern string) []string { - re, _ := regexp.Compile(`\[(.+):(.+)\]`) - submatch := re.FindStringSubmatch(pattern) - if submatch == nil { - return []string{pattern} - } - - from, err := strconv.Atoi(submatch[1]) - if err != nil { - return []string{pattern} - } - to, err := strconv.Atoi(submatch[2]) - if err != nil { - return []string{pattern} - } - - template := re.ReplaceAllString(pattern, "%d") - - var result []string - for val := from; val <= to; val++ { - entry := fmt.Sprintf(template, val) - result = append(result, entry) - } - - return result -} diff --git a/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go b/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go deleted file mode 100644 index 30fe6714c..000000000 --- a/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go +++ /dev/null @@ -1,192 +0,0 @@ -package kv // import "github.com/docker/docker/pkg/discovery/kv" - -import ( - "fmt" - "path" - "strings" - "time" - - "github.com/docker/docker/pkg/discovery" - "github.com/docker/go-connections/tlsconfig" - "github.com/docker/libkv" - "github.com/docker/libkv/store" - "github.com/docker/libkv/store/consul" - "github.com/docker/libkv/store/etcd" - "github.com/docker/libkv/store/zookeeper" - "github.com/sirupsen/logrus" -) - -const ( - defaultDiscoveryPath = "docker/nodes" -) - -// Discovery is exported -type Discovery struct { - backend store.Backend - store store.Store - heartbeat time.Duration - ttl time.Duration - prefix string - path string -} - -func init() { - Init() -} - -// Init is exported -func Init() { - // Register to libkv - zookeeper.Register() - consul.Register() - etcd.Register() - - // Register to internal discovery service - discovery.Register("zk", &Discovery{backend: store.ZK}) - discovery.Register("consul", &Discovery{backend: store.CONSUL}) - discovery.Register("etcd", &Discovery{backend: store.ETCD}) -} - -// Initialize is exported -func (s *Discovery) Initialize(uris string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) error { - var ( - parts = strings.SplitN(uris, "/", 2) - addrs = strings.Split(parts[0], ",") - err error - ) - - // A custom prefix to the path can be optionally used. - if len(parts) == 2 { - s.prefix = parts[1] - } - - s.heartbeat = heartbeat - s.ttl = ttl - - // Use a custom path if specified in discovery options - dpath := defaultDiscoveryPath - if clusterOpts["kv.path"] != "" { - dpath = clusterOpts["kv.path"] - } - - s.path = path.Join(s.prefix, dpath) - - var config *store.Config - if clusterOpts["kv.cacertfile"] != "" && clusterOpts["kv.certfile"] != "" && clusterOpts["kv.keyfile"] != "" { - logrus.Info("Initializing discovery with TLS") - tlsConfig, err := tlsconfig.Client(tlsconfig.Options{ - CAFile: clusterOpts["kv.cacertfile"], - CertFile: clusterOpts["kv.certfile"], - KeyFile: clusterOpts["kv.keyfile"], - }) - if err != nil { - return err - } - config = &store.Config{ - // Set ClientTLS to trigger https (bug in libkv/etcd) - ClientTLS: &store.ClientTLSConfig{ - CACertFile: clusterOpts["kv.cacertfile"], - CertFile: clusterOpts["kv.certfile"], - KeyFile: clusterOpts["kv.keyfile"], - }, - // The actual TLS config that will be used - TLS: tlsConfig, - } - } else { - logrus.Info("Initializing discovery without TLS") - } - - // Creates a new store, will ignore options given - // if not supported by the chosen store - s.store, err = libkv.NewStore(s.backend, addrs, config) - return err -} - -// Watch the store until either there's a store error or we receive a stop request. -// Returns false if we shouldn't attempt watching the store anymore (stop request received). -func (s *Discovery) watchOnce(stopCh <-chan struct{}, watchCh <-chan []*store.KVPair, discoveryCh chan discovery.Entries, errCh chan error) bool { - for { - select { - case pairs := <-watchCh: - if pairs == nil { - return true - } - - logrus.WithField("discovery", s.backend).Debugf("Watch triggered with %d nodes", len(pairs)) - - // Convert `KVPair` into `discovery.Entry`. - addrs := make([]string, len(pairs)) - for _, pair := range pairs { - addrs = append(addrs, string(pair.Value)) - } - - entries, err := discovery.CreateEntries(addrs) - if err != nil { - errCh <- err - } else { - discoveryCh <- entries - } - case <-stopCh: - // We were requested to stop watching. - return false - } - } -} - -// Watch is exported -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - errCh := make(chan error) - - go func() { - defer close(ch) - defer close(errCh) - - // Forever: Create a store watch, watch until we get an error and then try again. - // Will only stop if we receive a stopCh request. - for { - // Create the path to watch if it does not exist yet - exists, err := s.store.Exists(s.path) - if err != nil { - errCh <- err - } - if !exists { - if err := s.store.Put(s.path, []byte(""), &store.WriteOptions{IsDir: true}); err != nil { - errCh <- err - } - } - - // Set up a watch. - watchCh, err := s.store.WatchTree(s.path, stopCh) - if err != nil { - errCh <- err - } else { - if !s.watchOnce(stopCh, watchCh, ch, errCh) { - return - } - } - - // If we get here it means the store watch channel was closed. This - // is unexpected so let's retry later. - errCh <- fmt.Errorf("Unexpected watch error") - time.Sleep(s.heartbeat) - } - }() - return ch, errCh -} - -// Register is exported -func (s *Discovery) Register(addr string) error { - opts := &store.WriteOptions{TTL: s.ttl} - return s.store.Put(path.Join(s.path, addr), []byte(addr), opts) -} - -// Store returns the underlying store used by KV discovery. -func (s *Discovery) Store() store.Store { - return s.store -} - -// Prefix returns the store prefix -func (s *Discovery) Prefix() string { - return s.prefix -} diff --git a/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go b/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go deleted file mode 100644 index 81f973e28..000000000 --- a/vendor/github.com/docker/docker/pkg/discovery/memory/memory.go +++ /dev/null @@ -1,93 +0,0 @@ -package memory // import "github.com/docker/docker/pkg/discovery/memory" - -import ( - "sync" - "time" - - "github.com/docker/docker/pkg/discovery" -) - -// Discovery implements a discovery backend that keeps -// data in memory. -type Discovery struct { - heartbeat time.Duration - values []string - mu sync.Mutex -} - -func init() { - Init() -} - -// Init registers the memory backend on demand. -func Init() { - discovery.Register("memory", &Discovery{}) -} - -// Initialize sets the heartbeat for the memory backend. -func (s *Discovery) Initialize(_ string, heartbeat time.Duration, _ time.Duration, _ map[string]string) error { - s.heartbeat = heartbeat - s.values = make([]string, 0) - return nil -} - -// Watch sends periodic discovery updates to a channel. -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - errCh := make(chan error) - ticker := time.NewTicker(s.heartbeat) - - go func() { - defer close(errCh) - defer close(ch) - - // Send the initial entries if available. - var currentEntries discovery.Entries - var err error - - s.mu.Lock() - if len(s.values) > 0 { - currentEntries, err = discovery.CreateEntries(s.values) - } - s.mu.Unlock() - - if err != nil { - errCh <- err - } else if currentEntries != nil { - ch <- currentEntries - } - - // Periodically send updates. - for { - select { - case <-ticker.C: - s.mu.Lock() - newEntries, err := discovery.CreateEntries(s.values) - s.mu.Unlock() - if err != nil { - errCh <- err - continue - } - - // Check if the file has really changed. - if !newEntries.Equals(currentEntries) { - ch <- newEntries - } - currentEntries = newEntries - case <-stopCh: - ticker.Stop() - return - } - } - }() - - return ch, errCh -} - -// Register adds a new address to the discovery. -func (s *Discovery) Register(addr string) error { - s.mu.Lock() - s.values = append(s.values, addr) - s.mu.Unlock() - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go b/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go deleted file mode 100644 index b1d45aa2e..000000000 --- a/vendor/github.com/docker/docker/pkg/discovery/nodes/nodes.go +++ /dev/null @@ -1,54 +0,0 @@ -package nodes // import "github.com/docker/docker/pkg/discovery/nodes" - -import ( - "fmt" - "strings" - "time" - - "github.com/docker/docker/pkg/discovery" -) - -// Discovery is exported -type Discovery struct { - entries discovery.Entries -} - -func init() { - Init() -} - -// Init is exported -func Init() { - discovery.Register("nodes", &Discovery{}) -} - -// Initialize is exported -func (s *Discovery) Initialize(uris string, _ time.Duration, _ time.Duration, _ map[string]string) error { - for _, input := range strings.Split(uris, ",") { - for _, ip := range discovery.Generate(input) { - entry, err := discovery.NewEntry(ip) - if err != nil { - return fmt.Errorf("%s, please check you are using the correct discovery (missing token:// ?)", err.Error()) - } - s.entries = append(s.entries, entry) - } - } - - return nil -} - -// Watch is exported -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - go func() { - defer close(ch) - ch <- s.entries - <-stopCh - }() - return ch, nil -} - -// Register is exported -func (s *Discovery) Register(addr string) error { - return discovery.ErrNotImplemented -} diff --git a/vendor/github.com/docker/docker/pkg/dmesg/dmesg_linux.go b/vendor/github.com/docker/docker/pkg/dmesg/dmesg_linux.go deleted file mode 100644 index bc71b5b31..000000000 --- a/vendor/github.com/docker/docker/pkg/dmesg/dmesg_linux.go +++ /dev/null @@ -1,18 +0,0 @@ -package dmesg // import "github.com/docker/docker/pkg/dmesg" - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -// Dmesg returns last messages from the kernel log, up to size bytes -func Dmesg(size int) []byte { - t := uintptr(3) // SYSLOG_ACTION_READ_ALL - b := make([]byte, size) - amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))) - if err != 0 { - return []byte{} - } - return b[:amt] -} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go deleted file mode 100644 index 8b6cb56f1..000000000 --- a/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go +++ /dev/null @@ -1,40 +0,0 @@ -// Package filenotify provides a mechanism for watching file(s) for changes. -// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. -// These are wrapped up in a common interface so that either can be used interchangeably in your code. -package filenotify // import "github.com/docker/docker/pkg/filenotify" - -import "github.com/fsnotify/fsnotify" - -// FileWatcher is an interface for implementing file notification watchers -type FileWatcher interface { - Events() <-chan fsnotify.Event - Errors() <-chan error - Add(name string) error - Remove(name string) error - Close() error -} - -// New tries to use an fs-event watcher, and falls back to the poller if there is an error -func New() (FileWatcher, error) { - if watcher, err := NewEventWatcher(); err == nil { - return watcher, nil - } - return NewPollingWatcher(), nil -} - -// NewPollingWatcher returns a poll-based file watcher -func NewPollingWatcher() FileWatcher { - return &filePoller{ - events: make(chan fsnotify.Event), - errors: make(chan error), - } -} - -// NewEventWatcher returns an fs-event based file watcher -func NewEventWatcher() (FileWatcher, error) { - watcher, err := fsnotify.NewWatcher() - if err != nil { - return nil, err - } - return &fsNotifyWatcher{watcher}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go deleted file mode 100644 index 5a737d653..000000000 --- a/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go +++ /dev/null @@ -1,18 +0,0 @@ -package filenotify // import "github.com/docker/docker/pkg/filenotify" - -import "github.com/fsnotify/fsnotify" - -// fsNotifyWatcher wraps the fsnotify package to satisfy the FileNotifier interface -type fsNotifyWatcher struct { - *fsnotify.Watcher -} - -// Events returns the fsnotify event channel receiver -func (w *fsNotifyWatcher) Events() <-chan fsnotify.Event { - return w.Watcher.Events -} - -// Errors returns the fsnotify error channel receiver -func (w *fsNotifyWatcher) Errors() <-chan error { - return w.Watcher.Errors -} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/poller.go b/vendor/github.com/docker/docker/pkg/filenotify/poller.go deleted file mode 100644 index 22f189703..000000000 --- a/vendor/github.com/docker/docker/pkg/filenotify/poller.go +++ /dev/null @@ -1,204 +0,0 @@ -package filenotify // import "github.com/docker/docker/pkg/filenotify" - -import ( - "errors" - "fmt" - "os" - "sync" - "time" - - "github.com/sirupsen/logrus" - - "github.com/fsnotify/fsnotify" -) - -var ( - // errPollerClosed is returned when the poller is closed - errPollerClosed = errors.New("poller is closed") - // errNoSuchWatch is returned when trying to remove a watch that doesn't exist - errNoSuchWatch = errors.New("watch does not exist") -) - -// watchWaitTime is the time to wait between file poll loops -const watchWaitTime = 200 * time.Millisecond - -// filePoller is used to poll files for changes, especially in cases where fsnotify -// can't be run (e.g. when inotify handles are exhausted) -// filePoller satisfies the FileWatcher interface -type filePoller struct { - // watches is the list of files currently being polled, close the associated channel to stop the watch - watches map[string]chan struct{} - // events is the channel to listen to for watch events - events chan fsnotify.Event - // errors is the channel to listen to for watch errors - errors chan error - // mu locks the poller for modification - mu sync.Mutex - // closed is used to specify when the poller has already closed - closed bool -} - -// Add adds a filename to the list of watches -// once added the file is polled for changes in a separate goroutine -func (w *filePoller) Add(name string) error { - w.mu.Lock() - defer w.mu.Unlock() - - if w.closed { - return errPollerClosed - } - - f, err := os.Open(name) - if err != nil { - return err - } - fi, err := os.Stat(name) - if err != nil { - return err - } - - if w.watches == nil { - w.watches = make(map[string]chan struct{}) - } - if _, exists := w.watches[name]; exists { - return fmt.Errorf("watch exists") - } - chClose := make(chan struct{}) - w.watches[name] = chClose - - go w.watch(f, fi, chClose) - return nil -} - -// Remove stops and removes watch with the specified name -func (w *filePoller) Remove(name string) error { - w.mu.Lock() - defer w.mu.Unlock() - return w.remove(name) -} - -func (w *filePoller) remove(name string) error { - if w.closed { - return errPollerClosed - } - - chClose, exists := w.watches[name] - if !exists { - return errNoSuchWatch - } - close(chClose) - delete(w.watches, name) - return nil -} - -// Events returns the event channel -// This is used for notifications on events about watched files -func (w *filePoller) Events() <-chan fsnotify.Event { - return w.events -} - -// Errors returns the errors channel -// This is used for notifications about errors on watched files -func (w *filePoller) Errors() <-chan error { - return w.errors -} - -// Close closes the poller -// All watches are stopped, removed, and the poller cannot be added to -func (w *filePoller) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - - if w.closed { - return nil - } - - w.closed = true - for name := range w.watches { - w.remove(name) - delete(w.watches, name) - } - return nil -} - -// sendEvent publishes the specified event to the events channel -func (w *filePoller) sendEvent(e fsnotify.Event, chClose <-chan struct{}) error { - select { - case w.events <- e: - case <-chClose: - return fmt.Errorf("closed") - } - return nil -} - -// sendErr publishes the specified error to the errors channel -func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error { - select { - case w.errors <- e: - case <-chClose: - return fmt.Errorf("closed") - } - return nil -} - -// watch is responsible for polling the specified file for changes -// upon finding changes to a file or errors, sendEvent/sendErr is called -func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) { - defer f.Close() - for { - time.Sleep(watchWaitTime) - select { - case <-chClose: - logrus.Debugf("watch for %s closed", f.Name()) - return - default: - } - - fi, err := os.Stat(f.Name()) - if err != nil { - // if we got an error here and lastFi is not set, we can presume that nothing has changed - // This should be safe since before `watch()` is called, a stat is performed, there is any error `watch` is not called - if lastFi == nil { - continue - } - // If it doesn't exist at this point, it must have been removed - // no need to send the error here since this is a valid operation - if os.IsNotExist(err) { - if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Remove, Name: f.Name()}, chClose); err != nil { - return - } - lastFi = nil - continue - } - // at this point, send the error - if err := w.sendErr(err, chClose); err != nil { - return - } - continue - } - - if lastFi == nil { - if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Create, Name: fi.Name()}, chClose); err != nil { - return - } - lastFi = fi - continue - } - - if fi.Mode() != lastFi.Mode() { - if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Chmod, Name: fi.Name()}, chClose); err != nil { - return - } - lastFi = fi - continue - } - - if fi.ModTime() != lastFi.ModTime() || fi.Size() != lastFi.Size() { - if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Write, Name: fi.Name()}, chClose); err != nil { - return - } - lastFi = fi - continue - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go deleted file mode 100644 index 28cad499a..000000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ /dev/null @@ -1,298 +0,0 @@ -package fileutils // import "github.com/docker/docker/pkg/fileutils" - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "strings" - "text/scanner" - - "github.com/sirupsen/logrus" -) - -// PatternMatcher allows checking paths against a list of patterns -type PatternMatcher struct { - patterns []*Pattern - exclusions bool -} - -// NewPatternMatcher creates a new matcher object for specific patterns that can -// be used later to match against patterns against paths -func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { - pm := &PatternMatcher{ - patterns: make([]*Pattern, 0, len(patterns)), - } - for _, p := range patterns { - // Eliminate leading and trailing whitespace. - p = strings.TrimSpace(p) - if p == "" { - continue - } - p = filepath.Clean(p) - newp := &Pattern{} - if p[0] == '!' { - if len(p) == 1 { - return nil, errors.New("illegal exclusion pattern: \"!\"") - } - newp.exclusion = true - p = p[1:] - pm.exclusions = true - } - // Do some syntax checking on the pattern. - // filepath's Match() has some really weird rules that are inconsistent - // so instead of trying to dup their logic, just call Match() for its - // error state and if there is an error in the pattern return it. - // If this becomes an issue we can remove this since its really only - // needed in the error (syntax) case - which isn't really critical. - if _, err := filepath.Match(p, "."); err != nil { - return nil, err - } - newp.cleanedPattern = p - newp.dirs = strings.Split(p, string(os.PathSeparator)) - pm.patterns = append(pm.patterns, newp) - } - return pm, nil -} - -// Matches matches path against all the patterns. Matches is not safe to be -// called concurrently -func (pm *PatternMatcher) Matches(file string) (bool, error) { - matched := false - file = filepath.FromSlash(file) - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - - for _, pattern := range pm.patterns { - negative := false - - if pattern.exclusion { - negative = true - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - if len(pattern.dirs) <= len(parentPathDirs) { - match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) - } - } - - if match { - matched = !negative - } - } - - if matched { - logrus.Debugf("Skipping excluded path: %s", file) - } - - return matched, nil -} - -// Exclusions returns true if any of the patterns define exclusions -func (pm *PatternMatcher) Exclusions() bool { - return pm.exclusions -} - -// Patterns returns array of active patterns -func (pm *PatternMatcher) Patterns() []*Pattern { - return pm.patterns -} - -// Pattern defines a single regexp used used to filter file paths. -type Pattern struct { - cleanedPattern string - dirs []string - regexp *regexp.Regexp - exclusion bool -} - -func (p *Pattern) String() string { - return p.cleanedPattern -} - -// Exclusion returns true if this pattern defines exclusion -func (p *Pattern) Exclusion() bool { - return p.exclusion -} - -func (p *Pattern) match(path string) (bool, error) { - - if p.regexp == nil { - if err := p.compile(); err != nil { - return false, filepath.ErrBadPattern - } - } - - b := p.regexp.MatchString(path) - - return b, nil -} - -func (p *Pattern) compile() error { - regStr := "^" - pattern := p.cleanedPattern - // Go through the pattern and convert it to a regexp. - // We use a scanner so we can support utf-8 chars. - var scan scanner.Scanner - scan.Init(strings.NewReader(pattern)) - - sl := string(os.PathSeparator) - escSL := sl - if sl == `\` { - escSL += `\` - } - - for scan.Peek() != scanner.EOF { - ch := scan.Next() - - if ch == '*' { - if scan.Peek() == '*' { - // is some flavor of "**" - scan.Next() - - // Treat **/ as ** so eat the "/" - if string(scan.Peek()) == sl { - scan.Next() - } - - if scan.Peek() == scanner.EOF { - // is "**EOF" - to align with .gitignore just accept all - regStr += ".*" - } else { - // is "**" - // Note that this allows for any # of /'s (even 0) because - // the .* will eat everything, even /'s - regStr += "(.*" + escSL + ")?" - } - } else { - // is "*" so map it to anything but "/" - regStr += "[^" + escSL + "]*" - } - } else if ch == '?' { - // "?" is any char except "/" - regStr += "[^" + escSL + "]" - } else if ch == '.' || ch == '$' { - // Escape some regexp special chars that have no meaning - // in golang's filepath.Match - regStr += `\` + string(ch) - } else if ch == '\\' { - // escape next char. Note that a trailing \ in the pattern - // will be left alone (but need to escape it) - if sl == `\` { - // On windows map "\" to "\\", meaning an escaped backslash, - // and then just continue because filepath.Match on - // Windows doesn't allow escaping at all - regStr += escSL - continue - } - if scan.Peek() != scanner.EOF { - regStr += `\` + string(scan.Next()) - } else { - regStr += `\` - } - } else { - regStr += string(ch) - } - } - - regStr += "$" - - re, err := regexp.Compile(regStr) - if err != nil { - return err - } - - p.regexp = re - return nil -} - -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func Matches(file string, patterns []string) (bool, error) { - pm, err := NewPatternMatcher(patterns) - if err != nil { - return false, err - } - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - return pm.Matches(file) -} - -// CopyFile copies from src to dst until either EOF is reached -// on src or an error occurs. It verifies src exists and removes -// the dst if it exists. -func CopyFile(src, dst string) (int64, error) { - cleanSrc := filepath.Clean(src) - cleanDst := filepath.Clean(dst) - if cleanSrc == cleanDst { - return 0, nil - } - sf, err := os.Open(cleanSrc) - if err != nil { - return 0, err - } - defer sf.Close() - if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { - return 0, err - } - df, err := os.Create(cleanDst) - if err != nil { - return 0, err - } - defer df.Close() - return io.Copy(df, sf) -} - -// ReadSymlinkedDirectory returns the target directory of a symlink. -// The target of the symbolic link may not be a file. -func ReadSymlinkedDirectory(path string) (string, error) { - var realPath string - var err error - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) - } - realPathInfo, err := os.Stat(realPath) - if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) - } - if !realPathInfo.Mode().IsDir() { - return "", fmt.Errorf("canonical path points to a file '%s'", realPath) - } - return realPath, nil -} - -// CreateIfNotExists creates a file or a directory only if it does not already exist. -func CreateIfNotExists(path string, isDir bool) error { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - if isDir { - return os.MkdirAll(path, 0755) - } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go deleted file mode 100644 index e40cc271b..000000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go +++ /dev/null @@ -1,27 +0,0 @@ -package fileutils // import "github.com/docker/docker/pkg/fileutils" - -import ( - "os" - "os/exec" - "strconv" - "strings" -) - -// GetTotalUsedFds returns the number of used File Descriptors by -// executing `lsof -p PID` -func GetTotalUsedFds() int { - pid := os.Getpid() - - cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) - - output, err := cmd.CombinedOutput() - if err != nil { - return -1 - } - - outputStr := strings.TrimSpace(string(output)) - - fds := strings.Split(outputStr, "\n") - - return len(fds) - 1 -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go deleted file mode 100644 index 565396f1c..000000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build linux freebsd - -package fileutils // import "github.com/docker/docker/pkg/fileutils" - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/sirupsen/logrus" -) - -// GetTotalUsedFds Returns the number of used File Descriptors by -// reading it via /proc filesystem. -func GetTotalUsedFds() int { - if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) - } else { - return len(fds) - } - return -1 -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go deleted file mode 100644 index 3f1ebb656..000000000 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils // import "github.com/docker/docker/pkg/fileutils" - -// GetTotalUsedFds Returns the number of used File Descriptors. Not supported -// on Windows. -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go b/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go deleted file mode 100644 index 104211ade..000000000 --- a/vendor/github.com/docker/docker/pkg/fsutils/fsutils_linux.go +++ /dev/null @@ -1,86 +0,0 @@ -package fsutils // import "github.com/docker/docker/pkg/fsutils" - -import ( - "fmt" - "io/ioutil" - "os" - "unsafe" - - "golang.org/x/sys/unix" -) - -func locateDummyIfEmpty(path string) (string, error) { - children, err := ioutil.ReadDir(path) - if err != nil { - return "", err - } - if len(children) != 0 { - return "", nil - } - dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") - if err != nil { - return "", err - } - name := dummyFile.Name() - err = dummyFile.Close() - return name, err -} - -// SupportsDType returns whether the filesystem mounted on path supports d_type -func SupportsDType(path string) (bool, error) { - // locate dummy so that we have at least one dirent - dummy, err := locateDummyIfEmpty(path) - if err != nil { - return false, err - } - if dummy != "" { - defer os.Remove(dummy) - } - - visited := 0 - supportsDType := true - fn := func(ent *unix.Dirent) bool { - visited++ - if ent.Type == unix.DT_UNKNOWN { - supportsDType = false - // stop iteration - return true - } - // continue iteration - return false - } - if err = iterateReadDir(path, fn); err != nil { - return false, err - } - if visited == 0 { - return false, fmt.Errorf("did not hit any dirent during iteration %s", path) - } - return supportsDType, nil -} - -func iterateReadDir(path string, fn func(*unix.Dirent) bool) error { - d, err := os.Open(path) - if err != nil { - return err - } - defer d.Close() - fd := int(d.Fd()) - buf := make([]byte, 4096) - for { - nbytes, err := unix.ReadDirent(fd, buf) - if err != nil { - return err - } - if nbytes == 0 { - break - } - for off := 0; off < nbytes; { - ent := (*unix.Dirent)(unsafe.Pointer(&buf[off])) - if stop := fn(ent); stop { - return nil - } - off += int(ent.Reclen) - } - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go deleted file mode 100644 index ee15ed52b..000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ /dev/null @@ -1,21 +0,0 @@ -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "os" - - "github.com/docker/docker/pkg/idtools" -) - -// GetStatic returns the home directory for the current user without calling -// os/user.Current(). This is useful for static-linked binary on glibc-based -// system, because a call to os/user.Current() in a static binary leads to -// segfault due to a glibc issue that won't be fixed in a short term. -// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) -func GetStatic() (string, error) { - uid := os.Getuid() - usr, err := idtools.LookupUID(uid) - if err != nil { - return "", err - } - return usr.Home, nil -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go deleted file mode 100644 index 75ada2fe5..000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "errors" -) - -// GetStatic is not needed for non-linux systems. -// (Precisely, it is needed only for glibc-based linux systems.) -func GetStatic() (string, error) { - return "", errors.New("homedir.GetStatic() is not supported on this system") -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go deleted file mode 100644 index d85e12448..000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build !windows - -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "os" - - "github.com/opencontainers/runc/libcontainer/user" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - home := os.Getenv(Key()) - if home == "" { - if u, err := user.CurrentUser(); err == nil { - return u.Home - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "~" -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go deleted file mode 100644 index 2f81813b2..000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "os" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "USERPROFILE" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - return os.Getenv(Key()) -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "%USERPROFILE%" // be careful while using in format functions -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go deleted file mode 100644 index d1f173a31..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ /dev/null @@ -1,266 +0,0 @@ -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "bufio" - "fmt" - "os" - "sort" - "strconv" - "strings" -) - -// IDMap contains a single entry for user namespace range remapping. An array -// of IDMap entries represents the structure that will be provided to the Linux -// kernel for creating a user namespace. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -type subIDRange struct { - Start int - Length int -} - -type ranges []subIDRange - -func (e ranges) Len() int { return len(e) } -func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } - -const ( - subuidFileName = "/etc/subuid" - subgidFileName = "/etc/subgid" -) - -// MkdirAllAndChown creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership to the requested uid/gid pair. -func MkdirAllAndChown(path string, mode os.FileMode, owner IDPair) error { - return mkdirAs(path, mode, owner.UID, owner.GID, true, true) -} - -// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership. -// Note that unlike os.Mkdir(), this function does not return IsExist error -// in case path already exists. -func MkdirAndChown(path string, mode os.FileMode, owner IDPair) error { - return mkdirAs(path, mode, owner.UID, owner.GID, false, true) -} - -// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership will be performed -func MkdirAllAndChownNew(path string, mode os.FileMode, owner IDPair) error { - return mkdirAs(path, mode, owner.UID, owner.GID, true, false) -} - -// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - uid, err := toHost(0, uidMap) - if err != nil { - return -1, -1, err - } - gid, err := toHost(0, gidMap) - if err != nil { - return -1, -1, err - } - return uid, gid, nil -} - -// toContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func toContainer(hostID int, idMap []IDMap) (int, error) { - if idMap == nil { - return hostID, nil - } - for _, m := range idMap { - if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { - contID := m.ContainerID + (hostID - m.HostID) - return contID, nil - } - } - return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) -} - -// toHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func toHost(contID int, idMap []IDMap) (int, error) { - if idMap == nil { - return contID, nil - } - for _, m := range idMap { - if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (contID - m.ContainerID) - return hostID, nil - } - } - return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) -} - -// IDPair is a UID and GID pair -type IDPair struct { - UID int - GID int -} - -// IDMappings contains a mappings of UIDs and GIDs -type IDMappings struct { - uids []IDMap - gids []IDMap -} - -// NewIDMappings takes a requested user and group name and -// using the data from /etc/sub{uid,gid} ranges, creates the -// proper uid and gid remapping ranges for that user/group pair -func NewIDMappings(username, groupname string) (*IDMappings, error) { - subuidRanges, err := parseSubuid(username) - if err != nil { - return nil, err - } - subgidRanges, err := parseSubgid(groupname) - if err != nil { - return nil, err - } - if len(subuidRanges) == 0 { - return nil, fmt.Errorf("No subuid ranges found for user %q", username) - } - if len(subgidRanges) == 0 { - return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) - } - - return &IDMappings{ - uids: createIDMap(subuidRanges), - gids: createIDMap(subgidRanges), - }, nil -} - -// NewIDMappingsFromMaps creates a new mapping from two slices -// Deprecated: this is a temporary shim while transitioning to IDMapping -func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { - return &IDMappings{uids: uids, gids: gids} -} - -// RootPair returns a uid and gid pair for the root user. The error is ignored -// because a root user always exists, and the defaults are correct when the uid -// and gid maps are empty. -func (i *IDMappings) RootPair() IDPair { - uid, gid, _ := GetRootUIDGID(i.uids, i.gids) - return IDPair{UID: uid, GID: gid} -} - -// ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids -func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { - var err error - target := i.RootPair() - - if pair.UID != target.UID { - target.UID, err = toHost(pair.UID, i.uids) - if err != nil { - return target, err - } - } - - if pair.GID != target.GID { - target.GID, err = toHost(pair.GID, i.gids) - } - return target, err -} - -// ToContainer returns the container UID and GID for the host uid and gid -func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { - uid, err := toContainer(pair.UID, i.uids) - if err != nil { - return -1, -1, err - } - gid, err := toContainer(pair.GID, i.gids) - return uid, gid, err -} - -// Empty returns true if there are no id mappings -func (i *IDMappings) Empty() bool { - return len(i.uids) == 0 && len(i.gids) == 0 -} - -// UIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IDMappings) UIDs() []IDMap { - return i.uids -} - -// GIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IDMappings) GIDs() []IDMap { - return i.gids -} - -func createIDMap(subidRanges ranges) []IDMap { - idMap := []IDMap{} - - // sort the ranges by lowest ID first - sort.Sort(subidRanges) - containerID := 0 - for _, idrange := range subidRanges { - idMap = append(idMap, IDMap{ - ContainerID: containerID, - HostID: idrange.Start, - Size: idrange.Length, - }) - containerID = containerID + idrange.Length - } - return idMap -} - -func parseSubuid(username string) (ranges, error) { - return parseSubidFile(subuidFileName, username) -} - -func parseSubgid(username string) (ranges, error) { - return parseSubidFile(subgidFileName, username) -} - -// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) -// and return all found ranges for a specified username. If the special value -// "ALL" is supplied for username, then all ranges in the file will be returned -func parseSubidFile(path, username string) (ranges, error) { - var rangeList ranges - - subidFile, err := os.Open(path) - if err != nil { - return rangeList, err - } - defer subidFile.Close() - - s := bufio.NewScanner(subidFile) - for s.Scan() { - if err := s.Err(); err != nil { - return rangeList, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" || strings.HasPrefix(text, "#") { - continue - } - parts := strings.Split(text, ":") - if len(parts) != 3 { - return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) - } - if parts[0] == username || username == "ALL" { - startid, err := strconv.Atoi(parts[1]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - length, err := strconv.Atoi(parts[2]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - rangeList = append(rangeList, subIDRange{startid, length}) - } - } - return rangeList, nil -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go deleted file mode 100644 index 1d87ea3bc..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ /dev/null @@ -1,230 +0,0 @@ -// +build !windows - -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "syscall" - - "github.com/docker/docker/pkg/system" - "github.com/opencontainers/runc/libcontainer/user" -) - -var ( - entOnce sync.Once - getentCmd string -) - -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - // make an array containing the original path asked for, plus (for mkAll == true) - // all path components leading up to the complete path that don't exist before we MkdirAll - // so that we can chown all of them properly at the end. If chownExisting is false, we won't - // chown the full directory path if it exists - var paths []string - - stat, err := system.Stat(path) - if err == nil { - if !stat.IsDir() { - return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} - } - if !chownExisting { - return nil - } - - // short-circuit--we were called with an existing directory and chown was requested - return lazyChown(path, ownerUID, ownerGID, stat) - } - - if os.IsNotExist(err) { - paths = []string{path} - } - - if mkAll { - // walk back to "/" looking for directories which do not exist - // and add them to the paths array for chown after creation - dirPath := path - for { - dirPath = filepath.Dir(dirPath) - if dirPath == "/" { - break - } - if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { - paths = append(paths, dirPath) - } - } - if err := system.MkdirAll(path, mode, ""); err != nil { - return err - } - } else { - if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { - return err - } - } - // even if it existed, we will chown the requested path + any subpaths that - // didn't exist when we called MkdirAll - for _, pathComponent := range paths { - if err := lazyChown(pathComponent, ownerUID, ownerGID, nil); err != nil { - return err - } - } - return nil -} - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -func CanAccess(path string, pair IDPair) bool { - statInfo, err := system.Stat(path) - if err != nil { - return false - } - fileMode := os.FileMode(statInfo.Mode()) - permBits := fileMode.Perm() - return accessible(statInfo.UID() == uint32(pair.UID), - statInfo.GID() == uint32(pair.GID), permBits) -} - -func accessible(isOwner, isGroup bool, perms os.FileMode) bool { - if isOwner && (perms&0100 == 0100) { - return true - } - if isGroup && (perms&0010 == 0010) { - return true - } - if perms&0001 == 0001 { - return true - } - return false -} - -// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUser(username string) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUser(username) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) - if err != nil { - return user.User{}, err - } - return usr, nil -} - -// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUID(uid int) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUid(uid) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) -} - -func getentUser(args string) (user.User, error) { - reader, err := callGetent(args) - if err != nil { - return user.User{}, err - } - users, err := user.ParsePasswd(reader) - if err != nil { - return user.User{}, err - } - if len(users) == 0 { - return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) - } - return users[0], nil -} - -// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGroup(groupname string) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGroup(groupname) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) -} - -// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGID(gid int) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGid(gid) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %d", "group", gid)) -} - -func getentGroup(args string) (user.Group, error) { - reader, err := callGetent(args) - if err != nil { - return user.Group{}, err - } - groups, err := user.ParseGroup(reader) - if err != nil { - return user.Group{}, err - } - if len(groups) == 0 { - return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) - } - return groups[0], nil -} - -func callGetent(args string) (io.Reader, error) { - entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) - // if no `getent` command on host, can't do anything else - if getentCmd == "" { - return nil, fmt.Errorf("") - } - out, err := execCmd(getentCmd, args) - if err != nil { - exitCode, errC := system.GetExitCode(err) - if errC != nil { - return nil, err - } - switch exitCode { - case 1: - return nil, fmt.Errorf("getent reported invalid parameters/database unknown") - case 2: - terms := strings.Split(args, " ") - return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) - case 3: - return nil, fmt.Errorf("getent database doesn't support enumeration") - default: - return nil, err - } - - } - return bytes.NewReader(out), nil -} - -// lazyChown performs a chown only if the uid/gid don't match what's requested -// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the -// dir is on an NFS share, so don't call chown unless we absolutely must. -func lazyChown(p string, uid, gid int, stat *system.StatT) error { - if stat == nil { - var err error - stat, err = system.Stat(p) - if err != nil { - return err - } - } - if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { - return nil - } - return os.Chown(p, uid, gid) -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go deleted file mode 100644 index d72cc2892..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -// Platforms such as Windows do not support the UID/GID concept. So make this -// just a wrapper around system.MkdirAll. -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode, ""); err != nil { - return err - } - return nil -} - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -// Windows does not require/support this function, so always return true -func CanAccess(path string, pair IDPair) bool { - return true -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go deleted file mode 100644 index 6272c5a40..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ /dev/null @@ -1,164 +0,0 @@ -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "fmt" - "regexp" - "sort" - "strconv" - "strings" - "sync" -) - -// add a user and/or group to Linux /etc/passwd, /etc/group using standard -// Linux distribution commands: -// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group -// useradd -r -s /bin/false - -var ( - once sync.Once - userCommand string - - cmdTemplates = map[string]string{ - "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", - "useradd": "-r -s /bin/false %s", - "usermod": "-%s %d-%d %s", - } - - idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) - // default length for a UID/GID subordinate range - defaultRangeLen = 65536 - defaultRangeStart = 100000 - userMod = "usermod" -) - -// AddNamespaceRangesUser takes a username and uses the standard system -// utility to create a system user/group pair used to hold the -// /etc/sub{uid,gid} ranges which will be used for user namespace -// mapping ranges in containers. -func AddNamespaceRangesUser(name string) (int, int, error) { - if err := addUser(name); err != nil { - return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) - } - - // Query the system for the created uid and gid pair - out, err := execCmd("id", name) - if err != nil { - return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) - } - matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) - if len(matches) != 3 { - return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) - } - uid, err := strconv.Atoi(matches[1]) - if err != nil { - return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) - } - gid, err := strconv.Atoi(matches[2]) - if err != nil { - return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) - } - - // Now we need to create the subuid/subgid ranges for our new user/group (system users - // do not get auto-created ranges in subuid/subgid) - - if err := createSubordinateRanges(name); err != nil { - return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) - } - return uid, gid, nil -} - -func addUser(userName string) error { - once.Do(func() { - // set up which commands are used for adding users/groups dependent on distro - if _, err := resolveBinary("adduser"); err == nil { - userCommand = "adduser" - } else if _, err := resolveBinary("useradd"); err == nil { - userCommand = "useradd" - } - }) - if userCommand == "" { - return fmt.Errorf("Cannot add user; no useradd/adduser binary found") - } - args := fmt.Sprintf(cmdTemplates[userCommand], userName) - out, err := execCmd(userCommand, args) - if err != nil { - return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) - } - return nil -} - -func createSubordinateRanges(name string) error { - - // first, we should verify that ranges weren't automatically created - // by the distro tooling - ranges, err := parseSubuid(name) - if err != nil { - return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) - } - if len(ranges) == 0 { - // no UID ranges; let's create one - startID, err := findNextUIDRange() - if err != nil { - return fmt.Errorf("Can't find available subuid range: %v", err) - } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) - if err != nil { - return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) - } - } - - ranges, err = parseSubgid(name) - if err != nil { - return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) - } - if len(ranges) == 0 { - // no GID ranges; let's create one - startID, err := findNextGIDRange() - if err != nil { - return fmt.Errorf("Can't find available subgid range: %v", err) - } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) - if err != nil { - return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) - } - } - return nil -} - -func findNextUIDRange() (int, error) { - ranges, err := parseSubuid("ALL") - if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextGIDRange() (int, error) { - ranges, err := parseSubgid("ALL") - if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextRangeStart(rangeList ranges) (int, error) { - startID := defaultRangeStart - for _, arange := range rangeList { - if wouldOverlap(arange, startID) { - startID = arange.Start + arange.Length - } - } - return startID, nil -} - -func wouldOverlap(arange subIDRange, ID int) bool { - low := ID - high := ID + defaultRangeLen - if (low >= arange.Start && low <= arange.Start+arange.Length) || - (high <= arange.Start+arange.Length && high >= arange.Start) { - return true - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go deleted file mode 100644 index e7c4d6311..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux - -package idtools // import "github.com/docker/docker/pkg/idtools" - -import "fmt" - -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -func AddNamespaceRangesUser(name string) (int, int, error) { - return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") -} diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go deleted file mode 100644 index 903ac4501..000000000 --- a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build !windows - -package idtools // import "github.com/docker/docker/pkg/idtools" - -import ( - "fmt" - "os/exec" - "path/filepath" - "strings" -) - -func resolveBinary(binname string) (string, error) { - binaryPath, err := exec.LookPath(binname) - if err != nil { - return "", err - } - resolvedPath, err := filepath.EvalSymlinks(binaryPath) - if err != nil { - return "", err - } - //only return no error if the final resolved binary basename - //matches what was searched for - if filepath.Base(resolvedPath) == binname { - return resolvedPath, nil - } - return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) -} - -func execCmd(cmd, args string) ([]byte, error) { - execCmd := exec.Command(cmd, strings.Split(args, " ")...) - return execCmd.CombinedOutput() -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go deleted file mode 100644 index 466f79294..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go +++ /dev/null @@ -1,51 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "errors" - "io" -) - -var errBufferFull = errors.New("buffer is full") - -type fixedBuffer struct { - buf []byte - pos int - lastRead int -} - -func (b *fixedBuffer) Write(p []byte) (int, error) { - n := copy(b.buf[b.pos:cap(b.buf)], p) - b.pos += n - - if n < len(p) { - if b.pos == cap(b.buf) { - return n, errBufferFull - } - return n, io.ErrShortWrite - } - return n, nil -} - -func (b *fixedBuffer) Read(p []byte) (int, error) { - n := copy(p, b.buf[b.lastRead:b.pos]) - b.lastRead += n - return n, nil -} - -func (b *fixedBuffer) Len() int { - return b.pos - b.lastRead -} - -func (b *fixedBuffer) Cap() int { - return cap(b.buf) -} - -func (b *fixedBuffer) Reset() { - b.pos = 0 - b.lastRead = 0 - b.buf = b.buf[:0] -} - -func (b *fixedBuffer) String() string { - return string(b.buf[b.lastRead:b.pos]) -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go deleted file mode 100644 index d4bbf3c9d..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ /dev/null @@ -1,186 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "errors" - "io" - "sync" -) - -// maxCap is the highest capacity to use in byte slices that buffer data. -const maxCap = 1e6 - -// minCap is the lowest capacity to use in byte slices that buffer data -const minCap = 64 - -// blockThreshold is the minimum number of bytes in the buffer which will cause -// a write to BytesPipe to block when allocating a new slice. -const blockThreshold = 1e6 - -var ( - // ErrClosed is returned when Write is called on a closed BytesPipe. - ErrClosed = errors.New("write to closed BytesPipe") - - bufPools = make(map[int]*sync.Pool) - bufPoolsLock sync.Mutex -) - -// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). -// All written data may be read at most once. Also, BytesPipe allocates -// and releases new byte slices to adjust to current needs, so the buffer -// won't be overgrown after peak loads. -type BytesPipe struct { - mu sync.Mutex - wait *sync.Cond - buf []*fixedBuffer - bufLen int - closeErr error // error to return from next Read. set to nil if not closed. -} - -// NewBytesPipe creates new BytesPipe, initialized by specified slice. -// If buf is nil, then it will be initialized with slice which cap is 64. -// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe() *BytesPipe { - bp := &BytesPipe{} - bp.buf = append(bp.buf, getBuffer(minCap)) - bp.wait = sync.NewCond(&bp.mu) - return bp -} - -// Write writes p to BytesPipe. -// It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (int, error) { - bp.mu.Lock() - - written := 0 -loop0: - for { - if bp.closeErr != nil { - bp.mu.Unlock() - return written, ErrClosed - } - - if len(bp.buf) == 0 { - bp.buf = append(bp.buf, getBuffer(64)) - } - // get the last buffer - b := bp.buf[len(bp.buf)-1] - - n, err := b.Write(p) - written += n - bp.bufLen += n - - // errBufferFull is an error we expect to get if the buffer is full - if err != nil && err != errBufferFull { - bp.wait.Broadcast() - bp.mu.Unlock() - return written, err - } - - // if there was enough room to write all then break - if len(p) == n { - break - } - - // more data: write to the next slice - p = p[n:] - - // make sure the buffer doesn't grow too big from this write - for bp.bufLen >= blockThreshold { - bp.wait.Wait() - if bp.closeErr != nil { - continue loop0 - } - } - - // add new byte slice to the buffers slice and continue writing - nextCap := b.Cap() * 2 - if nextCap > maxCap { - nextCap = maxCap - } - bp.buf = append(bp.buf, getBuffer(nextCap)) - } - bp.wait.Broadcast() - bp.mu.Unlock() - return written, nil -} - -// CloseWithError causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) CloseWithError(err error) error { - bp.mu.Lock() - if err != nil { - bp.closeErr = err - } else { - bp.closeErr = io.EOF - } - bp.wait.Broadcast() - bp.mu.Unlock() - return nil -} - -// Close causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) Close() error { - return bp.CloseWithError(nil) -} - -// Read reads bytes from BytesPipe. -// Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (n int, err error) { - bp.mu.Lock() - if bp.bufLen == 0 { - if bp.closeErr != nil { - bp.mu.Unlock() - return 0, bp.closeErr - } - bp.wait.Wait() - if bp.bufLen == 0 && bp.closeErr != nil { - err := bp.closeErr - bp.mu.Unlock() - return 0, err - } - } - - for bp.bufLen > 0 { - b := bp.buf[0] - read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error - n += read - bp.bufLen -= read - - if b.Len() == 0 { - // it's empty so return it to the pool and move to the next one - returnBuffer(b) - bp.buf[0] = nil - bp.buf = bp.buf[1:] - } - - if len(p) == read { - break - } - - p = p[read:] - } - - bp.wait.Broadcast() - bp.mu.Unlock() - return -} - -func returnBuffer(b *fixedBuffer) { - b.Reset() - bufPoolsLock.Lock() - pool := bufPools[b.Cap()] - bufPoolsLock.Unlock() - if pool != nil { - pool.Put(b) - } -} - -func getBuffer(size int) *fixedBuffer { - bufPoolsLock.Lock() - pool, ok := bufPools[size] - if !ok { - pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} - bufPools[size] = pool - } - bufPoolsLock.Unlock() - return pool.Get().(*fixedBuffer) -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go deleted file mode 100644 index 534d66ac2..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go +++ /dev/null @@ -1,162 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" -) - -// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a -// temporary file and closing it atomically changes the temporary file to -// destination path. Writing and closing concurrently is not allowed. -func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return nil, err - } - - abspath, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - return &atomicFileWriter{ - f: f, - fn: abspath, - perm: perm, - }, nil -} - -// AtomicWriteFile atomically writes data to a file named by filename. -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := NewAtomicFileWriter(filename, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - f.(*atomicFileWriter).writeErr = err - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type atomicFileWriter struct { - f *os.File - fn string - writeErr error - perm os.FileMode -} - -func (w *atomicFileWriter) Write(dt []byte) (int, error) { - n, err := w.f.Write(dt) - if err != nil { - w.writeErr = err - } - return n, err -} - -func (w *atomicFileWriter) Close() (retErr error) { - defer func() { - if retErr != nil || w.writeErr != nil { - os.Remove(w.f.Name()) - } - }() - if err := w.f.Sync(); err != nil { - w.f.Close() - return err - } - if err := w.f.Close(); err != nil { - return err - } - if err := os.Chmod(w.f.Name(), w.perm); err != nil { - return err - } - if w.writeErr == nil { - return os.Rename(w.f.Name(), w.fn) - } - return nil -} - -// AtomicWriteSet is used to atomically write a set -// of files and ensure they are visible at the same time. -// Must be committed to a new directory. -type AtomicWriteSet struct { - root string -} - -// NewAtomicWriteSet creates a new atomic write set to -// atomically create a set of files. The given directory -// is used as the base directory for storing files before -// commit. If no temporary directory is given the system -// default is used. -func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { - td, err := ioutil.TempDir(tmpDir, "write-set-") - if err != nil { - return nil, err - } - - return &AtomicWriteSet{ - root: td, - }, nil -} - -// WriteFile writes a file to the set, guaranteeing the file -// has been synced. -func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type syncFileCloser struct { - *os.File -} - -func (w syncFileCloser) Close() error { - err := w.File.Sync() - if err1 := w.File.Close(); err == nil { - err = err1 - } - return err -} - -// FileWriter opens a file writer inside the set. The file -// should be synced and closed before calling commit. -func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { - f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) - if err != nil { - return nil, err - } - return syncFileCloser{f}, nil -} - -// Cancel cancels the set and removes all temporary data -// created in the set. -func (ws *AtomicWriteSet) Cancel() error { - return os.RemoveAll(ws.root) -} - -// Commit moves all created files to the target directory. The -// target directory must not exist and the parent of the target -// directory must exist. -func (ws *AtomicWriteSet) Commit(target string) error { - return os.Rename(ws.root, target) -} - -// String returns the location the set is writing to. -func (ws *AtomicWriteSet) String() string { - return ws.root -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go deleted file mode 100644 index 1f657bd3d..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ /dev/null @@ -1,157 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "io" -) - -// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser -// It calls the given callback function when closed. It should be constructed -// with NewReadCloserWrapper -type ReadCloserWrapper struct { - io.Reader - closer func() error -} - -// Close calls back the passed closer function -func (r *ReadCloserWrapper) Close() error { - return r.closer() -} - -// NewReadCloserWrapper returns a new io.ReadCloser. -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &ReadCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -// NewReaderErrWrapper returns a new io.Reader. -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -// HashData returns the sha256 sum of src. -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - -// OnEOFReader wraps an io.ReadCloser and a function -// the function will run at the end of file or close the file. -type OnEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *OnEOFReader) Read(p []byte) (n int, err error) { - n, err = r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -// Close closes the file and run the function. -func (r *OnEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *OnEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} - -// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read -// operations. -type cancelReadCloser struct { - cancel func() - pR *io.PipeReader // Stream to read from - pW *io.PipeWriter -} - -// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the -// context is cancelled. The returned io.ReadCloser must be closed when it is -// no longer needed. -func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { - pR, pW := io.Pipe() - - // Create a context used to signal when the pipe is closed - doneCtx, cancel := context.WithCancel(context.Background()) - - p := &cancelReadCloser{ - cancel: cancel, - pR: pR, - pW: pW, - } - - go func() { - _, err := io.Copy(pW, in) - select { - case <-ctx.Done(): - // If the context was closed, p.closeWithError - // was already called. Calling it again would - // change the error that Read returns. - default: - p.closeWithError(err) - } - in.Close() - }() - go func() { - for { - select { - case <-ctx.Done(): - p.closeWithError(ctx.Err()) - case <-doneCtx.Done(): - return - } - } - }() - - return p -} - -// Read wraps the Read method of the pipe that provides data from the wrapped -// ReadCloser. -func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { - return p.pR.Read(buf) -} - -// closeWithError closes the wrapper and its underlying reader. It will -// cause future calls to Read to return err. -func (p *cancelReadCloser) closeWithError(err error) { - p.pW.CloseWithError(err) - p.cancel() -} - -// Close closes the wrapper its underlying reader. It will cause -// future calls to Read to return io.EOF. -func (p *cancelReadCloser) Close() error { - p.closeWithError(io.EOF) - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go deleted file mode 100644 index dc894f913..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import "io/ioutil" - -// TempDir on Unix systems is equivalent to ioutil.TempDir. -func TempDir(dir, prefix string) (string, error) { - return ioutil.TempDir(dir, prefix) -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go deleted file mode 100644 index ecaba2e36..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io/ioutil" - - "github.com/docker/docker/pkg/longpath" -) - -// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. -func TempDir(dir, prefix string) (string, error) { - tempDir, err := ioutil.TempDir(dir, prefix) - if err != nil { - return "", err - } - return longpath.AddPrefix(tempDir), nil -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go deleted file mode 100644 index 91b8d1826..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ /dev/null @@ -1,92 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import ( - "io" - "sync" -) - -// WriteFlusher wraps the Write and Flush operation ensuring that every write -// is a flush. In addition, the Close method can be called to intercept -// Read/Write calls if the targets lifecycle has already ended. -type WriteFlusher struct { - w io.Writer - flusher flusher - flushed chan struct{} - flushedOnce sync.Once - closed chan struct{} - closeLock sync.Mutex -} - -type flusher interface { - Flush() -} - -var errWriteFlusherClosed = io.EOF - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - select { - case <-wf.closed: - return 0, errWriteFlusherClosed - default: - } - - n, err = wf.w.Write(b) - wf.Flush() // every write is a flush. - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - select { - case <-wf.closed: - return - default: - } - - wf.flushedOnce.Do(func() { - close(wf.flushed) - }) - wf.flusher.Flush() -} - -// Flushed returns the state of flushed. -// If it's flushed, return true, or else it return false. -func (wf *WriteFlusher) Flushed() bool { - // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to - // be used to detect whether or a response code has been issued or not. - // Another hook should be used instead. - var flushed bool - select { - case <-wf.flushed: - flushed = true - default: - } - return flushed -} - -// Close closes the write flusher, disallowing any further writes to the -// target. After the flusher is closed, all calls to write or flush will -// result in an error. -func (wf *WriteFlusher) Close() error { - wf.closeLock.Lock() - defer wf.closeLock.Unlock() - - select { - case <-wf.closed: - return errWriteFlusherClosed - default: - close(wf.closed) - } - return nil -} - -// NewWriteFlusher returns a new WriteFlusher. -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var fl flusher - if f, ok := w.(flusher); ok { - fl = f - } else { - fl = &NopFlusher{} - } - return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go deleted file mode 100644 index 61c679497..000000000 --- a/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ /dev/null @@ -1,66 +0,0 @@ -package ioutils // import "github.com/docker/docker/pkg/ioutils" - -import "io" - -// NopWriter represents a type which write operation is nop. -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -// NopWriteCloser returns a nopWriteCloser. -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -// NopFlusher represents a type which flush operation is nop. -type NopFlusher struct{} - -// Flush is a nop operation. -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (r *writeCloserWrapper) Close() error { - return r.closer() -} - -// NewWriteCloserWrapper returns a new io.WriteCloser. -func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: r, - closer: closer, - } -} - -// WriteCounter wraps a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -// NewWriteCounter returns a new WriteCounter. -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go deleted file mode 100644 index dd95f3670..000000000 --- a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go +++ /dev/null @@ -1,335 +0,0 @@ -package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" - -import ( - "encoding/json" - "fmt" - "io" - "os" - "strings" - "time" - - "github.com/Nvveen/Gotty" - "github.com/docker/docker/pkg/term" - "github.com/docker/go-units" -) - -// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to -// ensure the formatted time isalways the same number of characters. -const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - -// JSONError wraps a concrete Code and Message, `Code` is -// is an integer error code, `Message` is the error message. -type JSONError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (e *JSONError) Error() string { - return e.Message -} - -// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, -// Start is the initial value for the operation. Current is the current status and -// value of the progress made towards Total. Total is the end value describing when -// we made 100% progress for an operation. -type JSONProgress struct { - terminalFd uintptr - Current int64 `json:"current,omitempty"` - Total int64 `json:"total,omitempty"` - Start int64 `json:"start,omitempty"` - // If true, don't show xB/yB - HideCounts bool `json:"hidecounts,omitempty"` - Units string `json:"units,omitempty"` - nowFunc func() time.Time - winSize int -} - -func (p *JSONProgress) String() string { - var ( - width = p.width() - pbBox string - numbersBox string - timeLeftBox string - ) - if p.Current <= 0 && p.Total <= 0 { - return "" - } - if p.Total <= 0 { - switch p.Units { - case "": - current := units.HumanSize(float64(p.Current)) - return fmt.Sprintf("%8v", current) - default: - return fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 - if percentage > 50 { - percentage = 50 - } - if width > 110 { - // this number can't be negative gh#7136 - numSpaces := 0 - if 50-percentage > 0 { - numSpaces = 50 - percentage - } - pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) - } - - switch { - case p.HideCounts: - case p.Units == "": // no units, use bytes - current := units.HumanSize(float64(p.Current)) - total := units.HumanSize(float64(p.Total)) - - numbersBox = fmt.Sprintf("%8v/%v", current, total) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%8v", current) - } - default: - numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) - } - } - - if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := p.now().Sub(time.Unix(p.Start, 0)) - perEntry := fromStart / time.Duration(p.Current) - left := time.Duration(p.Total-p.Current) * perEntry - left = (left / time.Second) * time.Second - - if width > 50 { - timeLeftBox = " " + left.String() - } - } - return pbBox + numbersBox + timeLeftBox -} - -// shim for testing -func (p *JSONProgress) now() time.Time { - if p.nowFunc == nil { - p.nowFunc = func() time.Time { - return time.Now().UTC() - } - } - return p.nowFunc() -} - -// shim for testing -func (p *JSONProgress) width() int { - if p.winSize != 0 { - return p.winSize - } - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - return int(ws.Width) - } - return 200 -} - -// JSONMessage defines a message struct. It describes -// the created time, where it from, status, ID of the -// message. It's used for docker events. -type JSONMessage struct { - Stream string `json:"stream,omitempty"` - Status string `json:"status,omitempty"` - Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` //deprecated - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` - Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` //deprecated - // Aux contains out-of-band data, such as digests for push signing and image id after building. - Aux *json.RawMessage `json:"aux,omitempty"` -} - -/* Satisfied by gotty.TermInfo as well as noTermInfo from below */ -type termInfo interface { - Parse(attr string, params ...interface{}) (string, error) -} - -type noTermInfo struct{} // canary used when no terminfo. - -func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) { - return "", fmt.Errorf("noTermInfo") -} - -func clearLine(out io.Writer, ti termInfo) { - // el2 (clear whole line) is not exposed by terminfo. - - // First clear line from beginning to cursor - if attr, err := ti.Parse("el1"); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[1K") - } - // Then clear line from cursor to end - if attr, err := ti.Parse("el"); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[K") - } -} - -func cursorUp(out io.Writer, ti termInfo, l int) { - if l == 0 { // Should never be the case, but be tolerant - return - } - if attr, err := ti.Parse("cuu", l); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[%dA", l) - } -} - -func cursorDown(out io.Writer, ti termInfo, l int) { - if l == 0 { // Should never be the case, but be tolerant - return - } - if attr, err := ti.Parse("cud", l); err == nil { - fmt.Fprintf(out, "%s", attr) - } else { - fmt.Fprintf(out, "\x1b[%dB", l) - } -} - -// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out` -// is a terminal. If this is the case, it will erase the entire current line -// when displaying the progressbar. -func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { - if jm.Error != nil { - if jm.Error.Code == 401 { - return fmt.Errorf("authentication is required") - } - return jm.Error - } - var endl string - if termInfo != nil && jm.Stream == "" && jm.Progress != nil { - clearLine(out, termInfo) - endl = "\r" - fmt.Fprintf(out, endl) - } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal - return nil - } - if jm.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) - } else if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) - } - if jm.ID != "" { - fmt.Fprintf(out, "%s: ", jm.ID) - } - if jm.From != "" { - fmt.Fprintf(out, "(from %s) ", jm.From) - } - if jm.Progress != nil && termInfo != nil { - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - } else if jm.ProgressMessage != "" { //deprecated - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - } else if jm.Stream != "" { - fmt.Fprintf(out, "%s%s", jm.Stream, endl) - } else { - fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - } - return nil -} - -// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` -// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of -// each line and move the cursor while displaying. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { - var ( - dec = json.NewDecoder(in) - ids = make(map[string]int) - ) - - var termInfo termInfo - - if isTerminal { - term := os.Getenv("TERM") - if term == "" { - term = "vt102" - } - - var err error - if termInfo, err = gotty.OpenTermInfo(term); err != nil { - termInfo = &noTermInfo{} - } - } - - for { - diff := 0 - var jm JSONMessage - if err := dec.Decode(&jm); err != nil { - if err == io.EOF { - break - } - return err - } - - if jm.Aux != nil { - if auxCallback != nil { - auxCallback(jm) - } - continue - } - - if jm.Progress != nil { - jm.Progress.terminalFd = terminalFd - } - if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { - line, ok := ids[jm.ID] - if !ok { - // NOTE: This approach of using len(id) to - // figure out the number of lines of history - // only works as long as we clear the history - // when we output something that's not - // accounted for in the map, such as a line - // with no ID. - line = len(ids) - ids[jm.ID] = line - if termInfo != nil { - fmt.Fprintf(out, "\n") - } - } - diff = len(ids) - line - if termInfo != nil { - cursorUp(out, termInfo, diff) - } - } else { - // When outputting something that isn't progress - // output, clear the history of previous lines. We - // don't want progress entries from some previous - // operation to be updated (for example, pull -a - // with multiple tags). - ids = make(map[string]int) - } - err := jm.Display(out, termInfo) - if jm.ID != "" && termInfo != nil { - cursorDown(out, termInfo, diff) - } - if err != nil { - return err - } - } - return nil -} - -type stream interface { - io.Writer - FD() uintptr - IsTerminal() bool -} - -// DisplayJSONMessagesToStream prints json messages to the output stream -func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error { - return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) -} diff --git a/vendor/github.com/docker/docker/pkg/locker/locker.go b/vendor/github.com/docker/docker/pkg/locker/locker.go deleted file mode 100644 index dbd47fc46..000000000 --- a/vendor/github.com/docker/docker/pkg/locker/locker.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Package locker provides a mechanism for creating finer-grained locking to help -free up more global locks to handle other tasks. - -The implementation looks close to a sync.Mutex, however the user must provide a -reference to use to refer to the underlying lock when locking and unlocking, -and unlock may generate an error. - -If a lock with a given name does not exist when `Lock` is called, one is -created. -Lock references are automatically cleaned up on `Unlock` if nothing else is -waiting for the lock. -*/ -package locker // import "github.com/docker/docker/pkg/locker" - -import ( - "errors" - "sync" - "sync/atomic" -) - -// ErrNoSuchLock is returned when the requested lock does not exist -var ErrNoSuchLock = errors.New("no such lock") - -// Locker provides a locking mechanism based on the passed in reference name -type Locker struct { - mu sync.Mutex - locks map[string]*lockCtr -} - -// lockCtr is used by Locker to represent a lock with a given name. -type lockCtr struct { - mu sync.Mutex - // waiters is the number of waiters waiting to acquire the lock - // this is int32 instead of uint32 so we can add `-1` in `dec()` - waiters int32 -} - -// inc increments the number of waiters waiting for the lock -func (l *lockCtr) inc() { - atomic.AddInt32(&l.waiters, 1) -} - -// dec decrements the number of waiters waiting on the lock -func (l *lockCtr) dec() { - atomic.AddInt32(&l.waiters, -1) -} - -// count gets the current number of waiters -func (l *lockCtr) count() int32 { - return atomic.LoadInt32(&l.waiters) -} - -// Lock locks the mutex -func (l *lockCtr) Lock() { - l.mu.Lock() -} - -// Unlock unlocks the mutex -func (l *lockCtr) Unlock() { - l.mu.Unlock() -} - -// New creates a new Locker -func New() *Locker { - return &Locker{ - locks: make(map[string]*lockCtr), - } -} - -// Lock locks a mutex with the given name. If it doesn't exist, one is created -func (l *Locker) Lock(name string) { - l.mu.Lock() - if l.locks == nil { - l.locks = make(map[string]*lockCtr) - } - - nameLock, exists := l.locks[name] - if !exists { - nameLock = &lockCtr{} - l.locks[name] = nameLock - } - - // increment the nameLock waiters while inside the main mutex - // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently - nameLock.inc() - l.mu.Unlock() - - // Lock the nameLock outside the main mutex so we don't block other operations - // once locked then we can decrement the number of waiters for this lock - nameLock.Lock() - nameLock.dec() -} - -// Unlock unlocks the mutex with the given name -// If the given lock is not being waited on by any other callers, it is deleted -func (l *Locker) Unlock(name string) error { - l.mu.Lock() - nameLock, exists := l.locks[name] - if !exists { - l.mu.Unlock() - return ErrNoSuchLock - } - - if nameLock.count() == 0 { - delete(l.locks, name) - } - nameLock.Unlock() - - l.mu.Unlock() - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go deleted file mode 100644 index 4177affba..000000000 --- a/vendor/github.com/docker/docker/pkg/longpath/longpath.go +++ /dev/null @@ -1,26 +0,0 @@ -// longpath introduces some constants and helper functions for handling long paths -// in Windows, which are expected to be prepended with `\\?\` and followed by either -// a drive letter, a UNC server\share, or a volume identifier. - -package longpath // import "github.com/docker/docker/pkg/longpath" - -import ( - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix will add the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} diff --git a/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go b/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go deleted file mode 100644 index 94feb8fc7..000000000 --- a/vendor/github.com/docker/docker/pkg/loopback/attach_loopback.go +++ /dev/null @@ -1,137 +0,0 @@ -// +build linux,cgo - -package loopback // import "github.com/docker/docker/pkg/loopback" - -import ( - "errors" - "fmt" - "os" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// Loopback related errors -var ( - ErrAttachLoopbackDevice = errors.New("loopback attach failed") - ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") - ErrSetCapacity = errors.New("Unable set loopback capacity") -) - -func stringToLoopName(src string) [LoNameSize]uint8 { - var dst [LoNameSize]uint8 - copy(dst[:], src[:]) - return dst -} - -func getNextFreeLoopbackIndex() (int, error) { - f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) - if err != nil { - return 0, err - } - defer f.Close() - - index, err := ioctlLoopCtlGetFree(f.Fd()) - if index < 0 { - index = 0 - } - return index, err -} - -func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { - // Start looking for a free /dev/loop - for { - target := fmt.Sprintf("/dev/loop%d", index) - index++ - - fi, err := os.Stat(target) - if err != nil { - if os.IsNotExist(err) { - logrus.Error("There are no more loopback devices available.") - } - return nil, ErrAttachLoopbackDevice - } - - if fi.Mode()&os.ModeDevice != os.ModeDevice { - logrus.Errorf("Loopback device %s is not a block device.", target) - continue - } - - // OpenFile adds O_CLOEXEC - loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) - if err != nil { - logrus.Errorf("Error opening loopback device: %s", err) - return nil, ErrAttachLoopbackDevice - } - - // Try to attach to the loop file - if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { - loopFile.Close() - - // If the error is EBUSY, then try the next loopback - if err != unix.EBUSY { - logrus.Errorf("Cannot set up loopback device %s: %s", target, err) - return nil, ErrAttachLoopbackDevice - } - - // Otherwise, we keep going with the loop - continue - } - // In case of success, we finished. Break the loop. - break - } - - // This can't happen, but let's be sure - if loopFile == nil { - logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) - return nil, ErrAttachLoopbackDevice - } - - return loopFile, nil -} - -// AttachLoopDevice attaches the given sparse file to the next -// available loopback device. It returns an opened *os.File. -func AttachLoopDevice(sparseName string) (loop *os.File, err error) { - - // Try to retrieve the next available loopback device via syscall. - // If it fails, we discard error and start looping for a - // loopback from index 0. - startIndex, err := getNextFreeLoopbackIndex() - if err != nil { - logrus.Debugf("Error retrieving the next available loopback: %s", err) - } - - // OpenFile adds O_CLOEXEC - sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) - if err != nil { - logrus.Errorf("Error opening sparse file %s: %s", sparseName, err) - return nil, ErrAttachLoopbackDevice - } - defer sparseFile.Close() - - loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) - if err != nil { - return nil, err - } - - // Set the status of the loopback device - loopInfo := &loopInfo64{ - loFileName: stringToLoopName(loopFile.Name()), - loOffset: 0, - loFlags: LoFlagsAutoClear, - } - - if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { - logrus.Errorf("Cannot set up loopback device info: %s", err) - - // If the call failed, then free the loopback device - if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { - logrus.Error("Error while cleaning up the loopback device") - } - loopFile.Close() - return nil, ErrAttachLoopbackDevice - } - - return loopFile, nil -} diff --git a/vendor/github.com/docker/docker/pkg/loopback/ioctl.go b/vendor/github.com/docker/docker/pkg/loopback/ioctl.go deleted file mode 100644 index 612fd00ab..000000000 --- a/vendor/github.com/docker/docker/pkg/loopback/ioctl.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build linux,cgo - -package loopback // import "github.com/docker/docker/pkg/loopback" - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -func ioctlLoopCtlGetFree(fd uintptr) (int, error) { - index, err := unix.IoctlGetInt(int(fd), LoopCtlGetFree) - if err != nil { - return 0, err - } - return index, nil -} - -func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { - return unix.IoctlSetInt(int(loopFd), LoopSetFd, int(sparseFd)) -} - -func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { - if _, _, err := unix.Syscall(unix.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { - return err - } - return nil -} - -func ioctlLoopClrFd(loopFd uintptr) error { - if _, _, err := unix.Syscall(unix.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { - return err - } - return nil -} - -func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { - loopInfo := &loopInfo64{} - - if _, _, err := unix.Syscall(unix.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { - return nil, err - } - return loopInfo, nil -} - -func ioctlLoopSetCapacity(loopFd uintptr, value int) error { - return unix.IoctlSetInt(int(loopFd), LoopSetCapacity, value) -} diff --git a/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go b/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go deleted file mode 100644 index 7206bfb95..000000000 --- a/vendor/github.com/docker/docker/pkg/loopback/loop_wrapper.go +++ /dev/null @@ -1,52 +0,0 @@ -// +build linux,cgo - -package loopback // import "github.com/docker/docker/pkg/loopback" - -/* -#include // FIXME: present only for defines, maybe we can remove it? - -#ifndef LOOP_CTL_GET_FREE - #define LOOP_CTL_GET_FREE 0x4C82 -#endif - -#ifndef LO_FLAGS_PARTSCAN - #define LO_FLAGS_PARTSCAN 8 -#endif - -*/ -import "C" - -type loopInfo64 struct { - loDevice uint64 /* ioctl r/o */ - loInode uint64 /* ioctl r/o */ - loRdevice uint64 /* ioctl r/o */ - loOffset uint64 - loSizelimit uint64 /* bytes, 0 == max available */ - loNumber uint32 /* ioctl r/o */ - loEncryptType uint32 - loEncryptKeySize uint32 /* ioctl w/o */ - loFlags uint32 /* ioctl r/o */ - loFileName [LoNameSize]uint8 - loCryptName [LoNameSize]uint8 - loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ - loInit [2]uint64 -} - -// IOCTL consts -const ( - LoopSetFd = C.LOOP_SET_FD - LoopCtlGetFree = C.LOOP_CTL_GET_FREE - LoopGetStatus64 = C.LOOP_GET_STATUS64 - LoopSetStatus64 = C.LOOP_SET_STATUS64 - LoopClrFd = C.LOOP_CLR_FD - LoopSetCapacity = C.LOOP_SET_CAPACITY -) - -// LOOP consts. -const ( - LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR - LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY - LoFlagsPartScan = C.LO_FLAGS_PARTSCAN - LoKeySize = C.LO_KEY_SIZE - LoNameSize = C.LO_NAME_SIZE -) diff --git a/vendor/github.com/docker/docker/pkg/loopback/loopback.go b/vendor/github.com/docker/docker/pkg/loopback/loopback.go deleted file mode 100644 index 086655bc1..000000000 --- a/vendor/github.com/docker/docker/pkg/loopback/loopback.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build linux,cgo - -package loopback // import "github.com/docker/docker/pkg/loopback" - -import ( - "fmt" - "os" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { - loopInfo, err := ioctlLoopGetStatus64(file.Fd()) - if err != nil { - logrus.Errorf("Error get loopback backing file: %s", err) - return 0, 0, ErrGetLoopbackBackingFile - } - return loopInfo.loDevice, loopInfo.loInode, nil -} - -// SetCapacity reloads the size for the loopback device. -func SetCapacity(file *os.File) error { - if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { - logrus.Errorf("Error loopbackSetCapacity: %s", err) - return ErrSetCapacity - } - return nil -} - -// FindLoopDeviceFor returns a loopback device file for the specified file which -// is backing file of a loop back device. -func FindLoopDeviceFor(file *os.File) *os.File { - var stat unix.Stat_t - err := unix.Stat(file.Name(), &stat) - if err != nil { - return nil - } - targetInode := stat.Ino - targetDevice := stat.Dev - - for i := 0; true; i++ { - path := fmt.Sprintf("/dev/loop%d", i) - - file, err := os.OpenFile(path, os.O_RDWR, 0) - if err != nil { - if os.IsNotExist(err) { - return nil - } - - // Ignore all errors until the first not-exist - // we want to continue looking for the file - continue - } - - dev, inode, err := getLoopbackBackingFile(file) - if err == nil && dev == targetDevice && inode == targetInode { - return file - } - file.Close() - } - - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go deleted file mode 100644 index 272363b68..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/flags.go +++ /dev/null @@ -1,149 +0,0 @@ -package mount // import "github.com/docker/docker/pkg/mount" - -import ( - "fmt" - "strings" -) - -var flags = map[string]struct { - clear bool - flag int -}{ - "defaults": {false, 0}, - "ro": {false, RDONLY}, - "rw": {true, RDONLY}, - "suid": {true, NOSUID}, - "nosuid": {false, NOSUID}, - "dev": {true, NODEV}, - "nodev": {false, NODEV}, - "exec": {true, NOEXEC}, - "noexec": {false, NOEXEC}, - "sync": {false, SYNCHRONOUS}, - "async": {true, SYNCHRONOUS}, - "dirsync": {false, DIRSYNC}, - "remount": {false, REMOUNT}, - "mand": {false, MANDLOCK}, - "nomand": {true, MANDLOCK}, - "atime": {true, NOATIME}, - "noatime": {false, NOATIME}, - "diratime": {true, NODIRATIME}, - "nodiratime": {false, NODIRATIME}, - "bind": {false, BIND}, - "rbind": {false, RBIND}, - "unbindable": {false, UNBINDABLE}, - "runbindable": {false, RUNBINDABLE}, - "private": {false, PRIVATE}, - "rprivate": {false, RPRIVATE}, - "shared": {false, SHARED}, - "rshared": {false, RSHARED}, - "slave": {false, SLAVE}, - "rslave": {false, RSLAVE}, - "relatime": {false, RELATIME}, - "norelatime": {true, RELATIME}, - "strictatime": {false, STRICTATIME}, - "nostrictatime": {true, STRICTATIME}, -} - -var validFlags = map[string]bool{ - "": true, - "size": true, - "mode": true, - "uid": true, - "gid": true, - "nr_inodes": true, - "nr_blocks": true, - "mpol": true, -} - -var propagationFlags = map[string]bool{ - "bind": true, - "rbind": true, - "unbindable": true, - "runbindable": true, - "private": true, - "rprivate": true, - "shared": true, - "rshared": true, - "slave": true, - "rslave": true, -} - -// MergeTmpfsOptions merge mount options to make sure there is no duplicate. -func MergeTmpfsOptions(options []string) ([]string, error) { - // We use collisions maps to remove duplicates. - // For flag, the key is the flag value (the key for propagation flag is -1) - // For data=value, the key is the data - flagCollisions := map[int]bool{} - dataCollisions := map[string]bool{} - - var newOptions []string - // We process in reverse order - for i := len(options) - 1; i >= 0; i-- { - option := options[i] - if option == "defaults" { - continue - } - if f, ok := flags[option]; ok && f.flag != 0 { - // There is only one propagation mode - key := f.flag - if propagationFlags[option] { - key = -1 - } - // Check to see if there is collision for flag - if !flagCollisions[key] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - flagCollisions[key] = true - } - continue - } - opt := strings.SplitN(option, "=", 2) - if len(opt) != 2 || !validFlags[opt[0]] { - return nil, fmt.Errorf("Invalid tmpfs option %q", opt) - } - if !dataCollisions[opt[0]] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - dataCollisions[opt[0]] = true - } - } - - return newOptions, nil -} - -// Parse fstab type mount options into mount() flags -// and device specific data -func parseOptions(options string) (int, string) { - var ( - flag int - data []string - ) - - for _, o := range strings.Split(options, ",") { - // If the option does not exist in the flags table or the flag - // is not supported on the platform, - // then it is a data value for a specific fs type - if f, exists := flags[o]; exists && f.flag != 0 { - if f.clear { - flag &= ^f.flag - } else { - flag |= f.flag - } - } else { - data = append(data, o) - } - } - return flag, strings.Join(data, ",") -} - -// ParseTmpfsOptions parse fstab type mount options into flags and data -func ParseTmpfsOptions(options string) (int, string, error) { - flags, data := parseOptions(options) - for _, o := range strings.Split(data, ",") { - opt := strings.SplitN(o, "=", 2) - if !validFlags[opt[0]] { - return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) - } - } - return flags, data, nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go deleted file mode 100644 index ef35ef905..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go +++ /dev/null @@ -1,49 +0,0 @@ -// +build freebsd,cgo - -package mount // import "github.com/docker/docker/pkg/mount" - -/* -#include -*/ -import "C" - -const ( - // RDONLY will mount the filesystem as read-only. - RDONLY = C.MNT_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = C.MNT_NOSUID - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = C.MNT_NOEXEC - - // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. - SYNCHRONOUS = C.MNT_SYNCHRONOUS - - // NOATIME will not update the file access time when reading from a file. - NOATIME = C.MNT_NOATIME -) - -// These flags are unsupported. -const ( - BIND = 0 - DIRSYNC = 0 - MANDLOCK = 0 - NODEV = 0 - NODIRATIME = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SHARED = 0 - RSHARED = 0 - SLAVE = 0 - RSLAVE = 0 - RBIND = 0 - RELATIVE = 0 - RELATIME = 0 - REMOUNT = 0 - STRICTATIME = 0 - mntDetach = 0 -) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/docker/docker/pkg/mount/flags_linux.go deleted file mode 100644 index a1b199a31..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/flags_linux.go +++ /dev/null @@ -1,87 +0,0 @@ -package mount // import "github.com/docker/docker/pkg/mount" - -import ( - "golang.org/x/sys/unix" -) - -const ( - // RDONLY will mount the file system read-only. - RDONLY = unix.MS_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = unix.MS_NOSUID - - // NODEV will not interpret character or block special devices on the file - // system. - NODEV = unix.MS_NODEV - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = unix.MS_NOEXEC - - // SYNCHRONOUS will allow I/O to the file system to be done synchronously. - SYNCHRONOUS = unix.MS_SYNCHRONOUS - - // DIRSYNC will force all directory updates within the file system to be done - // synchronously. This affects the following system calls: create, link, - // unlink, symlink, mkdir, rmdir, mknod and rename. - DIRSYNC = unix.MS_DIRSYNC - - // REMOUNT will attempt to remount an already-mounted file system. This is - // commonly used to change the mount flags for a file system, especially to - // make a readonly file system writeable. It does not change device or mount - // point. - REMOUNT = unix.MS_REMOUNT - - // MANDLOCK will force mandatory locks on a filesystem. - MANDLOCK = unix.MS_MANDLOCK - - // NOATIME will not update the file access time when reading from a file. - NOATIME = unix.MS_NOATIME - - // NODIRATIME will not update the directory access time. - NODIRATIME = unix.MS_NODIRATIME - - // BIND remounts a subtree somewhere else. - BIND = unix.MS_BIND - - // RBIND remounts a subtree and all possible submounts somewhere else. - RBIND = unix.MS_BIND | unix.MS_REC - - // UNBINDABLE creates a mount which cannot be cloned through a bind operation. - UNBINDABLE = unix.MS_UNBINDABLE - - // RUNBINDABLE marks the entire mount tree as UNBINDABLE. - RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC - - // PRIVATE creates a mount which carries no propagation abilities. - PRIVATE = unix.MS_PRIVATE - - // RPRIVATE marks the entire mount tree as PRIVATE. - RPRIVATE = unix.MS_PRIVATE | unix.MS_REC - - // SLAVE creates a mount which receives propagation from its master, but not - // vice versa. - SLAVE = unix.MS_SLAVE - - // RSLAVE marks the entire mount tree as SLAVE. - RSLAVE = unix.MS_SLAVE | unix.MS_REC - - // SHARED creates a mount which provides the ability to create mirrors of - // that mount such that mounts and unmounts within any of the mirrors - // propagate to the other mirrors. - SHARED = unix.MS_SHARED - - // RSHARED marks the entire mount tree as SHARED. - RSHARED = unix.MS_SHARED | unix.MS_REC - - // RELATIME updates inode access times relative to modify or change time. - RELATIME = unix.MS_RELATIME - - // STRICTATIME allows to explicitly request full atime updates. This makes - // it possible for the kernel to default to relatime or noatime but still - // allow userspace to override it. - STRICTATIME = unix.MS_STRICTATIME - - mntDetach = unix.MNT_DETACH -) diff --git a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go deleted file mode 100644 index cc6c47590..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build !linux,!freebsd freebsd,!cgo - -package mount // import "github.com/docker/docker/pkg/mount" - -// These flags are unsupported. -const ( - BIND = 0 - DIRSYNC = 0 - MANDLOCK = 0 - NOATIME = 0 - NODEV = 0 - NODIRATIME = 0 - NOEXEC = 0 - NOSUID = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SHARED = 0 - RSHARED = 0 - SLAVE = 0 - RSLAVE = 0 - RBIND = 0 - RELATIME = 0 - RELATIVE = 0 - REMOUNT = 0 - STRICTATIME = 0 - SYNCHRONOUS = 0 - RDONLY = 0 - mntDetach = 0 -) diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go deleted file mode 100644 index 874aff654..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mount.go +++ /dev/null @@ -1,141 +0,0 @@ -package mount // import "github.com/docker/docker/pkg/mount" - -import ( - "sort" - "strings" - "syscall" - - "github.com/sirupsen/logrus" -) - -// FilterFunc is a type defining a callback function -// to filter out unwanted entries. It takes a pointer -// to an Info struct (not fully populated, currently -// only Mountpoint is filled in), and returns two booleans: -// - skip: true if the entry should be skipped -// - stop: true if parsing should be stopped after the entry -type FilterFunc func(*Info) (skip, stop bool) - -// PrefixFilter discards all entries whose mount points -// do not start with a prefix specified -func PrefixFilter(prefix string) FilterFunc { - return func(m *Info) (bool, bool) { - skip := !strings.HasPrefix(m.Mountpoint, prefix) - return skip, false - } -} - -// SingleEntryFilter looks for a specific entry -func SingleEntryFilter(mp string) FilterFunc { - return func(m *Info) (bool, bool) { - if m.Mountpoint == mp { - return false, true // don't skip, stop now - } - return true, false // skip, keep going - } -} - -// ParentsFilter returns all entries whose mount points -// can be parents of a path specified, discarding others. -// For example, given `/var/lib/docker/something`, entries -// like `/var/lib/docker`, `/var` and `/` are returned. -func ParentsFilter(path string) FilterFunc { - return func(m *Info) (bool, bool) { - skip := !strings.HasPrefix(path, m.Mountpoint) - return skip, false - } -} - -// GetMounts retrieves a list of mounts for the current running process, -// with an optional filter applied (use nil for no filter). -func GetMounts(f FilterFunc) ([]*Info, error) { - return parseMountTable(f) -} - -// Mounted determines if a specified mountpoint has been mounted. -// On Linux it looks at /proc/self/mountinfo. -func Mounted(mountpoint string) (bool, error) { - entries, err := GetMounts(SingleEntryFilter(mountpoint)) - if err != nil { - return false, err - } - - return len(entries) > 0, nil -} - -// Mount will mount filesystem according to the specified configuration, on the -// condition that the target path is *not* already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func Mount(device, target, mType, options string) error { - flag, _ := parseOptions(options) - if flag&REMOUNT != REMOUNT { - if mounted, err := Mounted(target); err != nil || mounted { - return err - } - } - return ForceMount(device, target, mType, options) -} - -// ForceMount will mount a filesystem according to the specified configuration, -// *regardless* if the target path is not already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func ForceMount(device, target, mType, options string) error { - flag, data := parseOptions(options) - return mount(device, target, mType, uintptr(flag), data) -} - -// Unmount lazily unmounts a filesystem on supported platforms, otherwise -// does a normal unmount. -func Unmount(target string) error { - err := unmount(target, mntDetach) - if err == syscall.EINVAL { - // ignore "not mounted" error - err = nil - } - return err -} - -// RecursiveUnmount unmounts the target and all mounts underneath, starting with -// the deepsest mount first. -func RecursiveUnmount(target string) error { - mounts, err := parseMountTable(PrefixFilter(target)) - if err != nil { - return err - } - - // Make the deepest mount be first - sort.Slice(mounts, func(i, j int) bool { - return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) - }) - - for i, m := range mounts { - logrus.Debugf("Trying to unmount %s", m.Mountpoint) - err = unmount(m.Mountpoint, mntDetach) - if err != nil { - // If the error is EINVAL either this whole package is wrong (invalid flags passed to unmount(2)) or this is - // not a mountpoint (which is ok in this case). - // Meanwhile calling `Mounted()` is very expensive. - // - // We've purposefully used `syscall.EINVAL` here instead of `unix.EINVAL` to avoid platform branching - // Since `EINVAL` is defined for both Windows and Linux in the `syscall` package (and other platforms), - // this is nicer than defining a custom value that we can refer to in each platform file. - if err == syscall.EINVAL { - continue - } - if i == len(mounts)-1 { - if mounted, e := Mounted(m.Mountpoint); e != nil || mounted { - return err - } - continue - } - // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem - logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint) - continue - } - - logrus.Debugf("Unmounted %s", m.Mountpoint) - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go deleted file mode 100644 index b6ab83a23..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go +++ /dev/null @@ -1,60 +0,0 @@ -package mount // import "github.com/docker/docker/pkg/mount" - -/* -#include -#include -#include -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "strings" - "unsafe" - - "golang.org/x/sys/unix" -) - -func allocateIOVecs(options []string) []C.struct_iovec { - out := make([]C.struct_iovec, len(options)) - for i, option := range options { - out[i].iov_base = unsafe.Pointer(C.CString(option)) - out[i].iov_len = C.size_t(len(option) + 1) - } - return out -} - -func mount(device, target, mType string, flag uintptr, data string) error { - isNullFS := false - - xs := strings.Split(data, ",") - for _, x := range xs { - if x == "bind" { - isNullFS = true - } - } - - options := []string{"fspath", target} - if isNullFS { - options = append(options, "fstype", "nullfs", "target", device) - } else { - options = append(options, "fstype", mType, "from", device) - } - rawOptions := allocateIOVecs(options) - for _, rawOption := range rawOptions { - defer C.free(rawOption.iov_base) - } - - if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { - reason := C.GoString(C.strerror(*C.__error())) - return fmt.Errorf("Failed to call nmount: %s", reason) - } - return nil -} - -func unmount(target string, flag int) error { - return unix.Unmount(target, flag) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go deleted file mode 100644 index 631daf10a..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go +++ /dev/null @@ -1,57 +0,0 @@ -package mount // import "github.com/docker/docker/pkg/mount" - -import ( - "golang.org/x/sys/unix" -) - -const ( - // ptypes is the set propagation types. - ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE - - // pflags is the full set valid flags for a change propagation call. - pflags = ptypes | unix.MS_REC | unix.MS_SILENT - - // broflags is the combination of bind and read only - broflags = unix.MS_BIND | unix.MS_RDONLY -) - -// isremount returns true if either device name or flags identify a remount request, false otherwise. -func isremount(device string, flags uintptr) bool { - switch { - // We treat device "" and "none" as a remount request to provide compatibility with - // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. - case flags&unix.MS_REMOUNT != 0, device == "", device == "none": - return true - default: - return false - } -} - -func mount(device, target, mType string, flags uintptr, data string) error { - oflags := flags &^ ptypes - if !isremount(device, flags) || data != "" { - // Initial call applying all non-propagation flags for mount - // or remount with changed data - if err := unix.Mount(device, target, mType, oflags, data); err != nil { - return err - } - } - - if flags&ptypes != 0 { - // Change the propagation type. - if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { - return err - } - } - - if oflags&broflags == broflags { - // Remount the bind to apply read only. - return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "") - } - - return nil -} - -func unmount(target string, flag int) error { - return unix.Unmount(target, flag) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go deleted file mode 100644 index 1428dffa5..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !linux,!freebsd freebsd,!cgo - -package mount // import "github.com/docker/docker/pkg/mount" - -func mount(device, target, mType string, flag uintptr, data string) error { - panic("Not implemented") -} - -func unmount(target string, flag int) error { - panic("Not implemented") -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go deleted file mode 100644 index ecd03fc02..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go +++ /dev/null @@ -1,40 +0,0 @@ -package mount // import "github.com/docker/docker/pkg/mount" - -// Info reveals information about a particular mounted filesystem. This -// struct is populated from the content in the /proc//mountinfo file. -type Info struct { - // ID is a unique identifier of the mount (may be reused after umount). - ID int - - // Parent indicates the ID of the mount parent (or of self for the top of the - // mount tree). - Parent int - - // Major indicates one half of the device ID which identifies the device class. - Major int - - // Minor indicates one half of the device ID which identifies a specific - // instance of device. - Minor int - - // Root of the mount within the filesystem. - Root string - - // Mountpoint indicates the mount point relative to the process's root. - Mountpoint string - - // Opts represents mount-specific options. - Opts string - - // Optional represents optional fields. - Optional string - - // Fstype indicates the type of filesystem, such as EXT3. - Fstype string - - // Source indicates filesystem specific information or "none". - Source string - - // VfsOpts represents per super block options. - VfsOpts string -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go deleted file mode 100644 index 36c89dc1a..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go +++ /dev/null @@ -1,55 +0,0 @@ -package mount // import "github.com/docker/docker/pkg/mount" - -/* -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "reflect" - "unsafe" -) - -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts. -func parseMountTable(filter FilterFunc) ([]*Info, error) { - var rawEntries *C.struct_statfs - - count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) - if count == 0 { - return nil, fmt.Errorf("Failed to call getmntinfo") - } - - var entries []C.struct_statfs - header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) - header.Cap = count - header.Len = count - header.Data = uintptr(unsafe.Pointer(rawEntries)) - - var out []*Info - for _, entry := range entries { - var mountinfo Info - var skip, stop bool - mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) - - if filter != nil { - // filter out entries we're not interested in - skip, stop = filter(p) - if skip { - continue - } - } - - mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) - mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) - - out = append(out, &mountinfo) - if stop { - break - } - } - return out, nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go deleted file mode 100644 index c1dba01fc..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go +++ /dev/null @@ -1,132 +0,0 @@ -package mount // import "github.com/docker/docker/pkg/mount" - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { - s := bufio.NewScanner(r) - out := []*Info{} - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - /* - 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue - (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) - - (1) mount ID: unique identifier of the mount (may be reused after umount) - (2) parent ID: ID of parent (or of self for the top of the mount tree) - (3) major:minor: value of st_dev for files on filesystem - (4) root: root of the mount within the filesystem - (5) mount point: mount point relative to the process's root - (6) mount options: per mount options - (7) optional fields: zero or more fields of the form "tag[:value]" - (8) separator: marks the end of the optional fields - (9) filesystem type: name of filesystem of the form "type[.subtype]" - (10) mount source: filesystem specific information or "none" - (11) super options: per super block options - */ - - text := s.Text() - fields := strings.Split(text, " ") - numFields := len(fields) - if numFields < 10 { - // should be at least 10 fields - return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields) - } - - p := &Info{} - // ignore any numbers parsing errors, as there should not be any - p.ID, _ = strconv.Atoi(fields[0]) - p.Parent, _ = strconv.Atoi(fields[1]) - mm := strings.Split(fields[2], ":") - if len(mm) != 2 { - return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm) - } - p.Major, _ = strconv.Atoi(mm[0]) - p.Minor, _ = strconv.Atoi(mm[1]) - - p.Root = fields[3] - p.Mountpoint = fields[4] - p.Opts = fields[5] - - var skip, stop bool - if filter != nil { - // filter out entries we're not interested in - skip, stop = filter(p) - if skip { - continue - } - } - - // one or more optional fields, when a separator (-) - i := 6 - for ; i < numFields && fields[i] != "-"; i++ { - switch i { - case 6: - p.Optional = fields[6] - default: - /* NOTE there might be more optional fields before the such as - fields[7]...fields[N] (where N < sepIndex), although - as of Linux kernel 4.15 the only known ones are - mount propagation flags in fields[6]. The correct - behavior is to ignore any unknown optional fields. - */ - break - } - } - if i == numFields { - return nil, fmt.Errorf("Parsing '%s' failed: missing separator ('-')", text) - } - - // There should be 3 fields after the separator... - if i+4 > numFields { - return nil, fmt.Errorf("Parsing '%s' failed: not enough fields after a separator", text) - } - // ... but in Linux <= 3.9 mounting a cifs with spaces in a share name - // (like "//serv/My Documents") _may_ end up having a space in the last field - // of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs - // option unc= is ignored, so a space should not appear. In here we ignore - // those "extra" fields caused by extra spaces. - p.Fstype = fields[i+1] - p.Source = fields[i+2] - p.VfsOpts = fields[i+3] - - out = append(out, p) - if stop { - break - } - } - return out, nil -} - -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts -func parseMountTable(filter FilterFunc) ([]*Info, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return nil, err - } - defer f.Close() - - return parseInfoFile(f, filter) -} - -// PidMountInfo collects the mounts for a specific process ID. If the process -// ID is unknown, it is better to use `GetMounts` which will inspect -// "/proc/self/mountinfo" instead. -func PidMountInfo(pid int) ([]*Info, error) { - f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) - if err != nil { - return nil, err - } - defer f.Close() - - return parseInfoFile(f, nil) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go deleted file mode 100644 index fd16d3ed6..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows,!linux,!freebsd freebsd,!cgo - -package mount // import "github.com/docker/docker/pkg/mount" - -import ( - "fmt" - "runtime" -) - -func parseMountTable(f FilterFunc) ([]*Info, error) { - return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go deleted file mode 100644 index 27e0f6976..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package mount // import "github.com/docker/docker/pkg/mount" - -func parseMountTable(f FilterFunc) ([]*Info, error) { - // Do NOT return an error! - return nil, nil -} diff --git a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go deleted file mode 100644 index 538f6637a..000000000 --- a/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go +++ /dev/null @@ -1,67 +0,0 @@ -package mount // import "github.com/docker/docker/pkg/mount" - -// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "shared") -} - -// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "rshared") -} - -// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. -// See the supported options in flags.go for further reference. -func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "private") -} - -// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "rprivate") -} - -// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "slave") -} - -// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "rslave") -} - -// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "unbindable") -} - -// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount -// option enabled. See the supported options in flags.go for further reference. -func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "runbindable") -} - -func ensureMountedAs(mountPoint, options string) error { - mounted, err := Mounted(mountPoint) - if err != nil { - return err - } - - if !mounted { - if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { - return err - } - } - if _, err = Mounted(mountPoint); err != nil { - return err - } - - return ForceMount("", mountPoint, "none", options) -} diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go b/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go deleted file mode 100644 index 7fd5955be..000000000 --- a/vendor/github.com/docker/docker/pkg/namesgenerator/cmd/names-generator/main.go +++ /dev/null @@ -1,14 +0,0 @@ -package main - -import ( - "fmt" - "math/rand" - "time" - - "github.com/docker/docker/pkg/namesgenerator" -) - -func main() { - rand.Seed(time.Now().UnixNano()) - fmt.Println(namesgenerator.GetRandomName(0)) -} diff --git a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go deleted file mode 100644 index 5c3395aaa..000000000 --- a/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go +++ /dev/null @@ -1,645 +0,0 @@ -package namesgenerator // import "github.com/docker/docker/pkg/namesgenerator" - -import ( - "fmt" - "math/rand" -) - -var ( - left = [...]string{ - "admiring", - "adoring", - "affectionate", - "agitated", - "amazing", - "angry", - "awesome", - "blissful", - "boring", - "brave", - "clever", - "cocky", - "compassionate", - "competent", - "condescending", - "confident", - "cranky", - "dazzling", - "determined", - "distracted", - "dreamy", - "eager", - "ecstatic", - "elastic", - "elated", - "elegant", - "eloquent", - "epic", - "fervent", - "festive", - "flamboyant", - "focused", - "friendly", - "frosty", - "gallant", - "gifted", - "goofy", - "gracious", - "happy", - "hardcore", - "heuristic", - "hopeful", - "hungry", - "infallible", - "inspiring", - "jolly", - "jovial", - "keen", - "kind", - "laughing", - "loving", - "lucid", - "mystifying", - "modest", - "musing", - "naughty", - "nervous", - "nifty", - "nostalgic", - "objective", - "optimistic", - "peaceful", - "pedantic", - "pensive", - "practical", - "priceless", - "quirky", - "quizzical", - "relaxed", - "reverent", - "romantic", - "sad", - "serene", - "sharp", - "silly", - "sleepy", - "stoic", - "stupefied", - "suspicious", - "tender", - "thirsty", - "trusting", - "unruffled", - "upbeat", - "vibrant", - "vigilant", - "vigorous", - "wizardly", - "wonderful", - "xenodochial", - "youthful", - "zealous", - "zen", - } - - // Docker, starting from 0.7.x, generates names from notable scientists and hackers. - // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. - right = [...]string{ - // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB - "albattani", - - // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen - "allen", - - // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida - "almeida", - - // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi - "agnesi", - - // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes - "archimedes", - - // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli - "ardinghelli", - - // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata - "aryabhata", - - // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin - "austin", - - // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. - "babbage", - - // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach - "banach", - - // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen - "bardeen", - - // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik - "bartik", - - // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi - "bassi", - - // Hugh Beaver, British engineer, founder of the Guinness Book of World Records https://en.wikipedia.org/wiki/Hugh_Beaver - "beaver", - - // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell - "bell", - - // Karl Friedrich Benz - a German automobile engineer. Inventor of the first practical motorcar. https://en.wikipedia.org/wiki/Karl_Benz - "benz", - - // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha - "bhabha", - - // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus - "bhaskara", - - // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell - "blackwell", - - // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. - "bohr", - - // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth - "booth", - - // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg - "borg", - - // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose - "bose", - - // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville - "boyd", - - // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero - "brahmagupta", - - // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain - "brattain", - - // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) - "brown", - - // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson - "carson", - - // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar - "chandrasekhar", - - //Sergey Alexeyevich Chaplygin (Russian: Серге́й Алексе́евич Чаплы́гин; April 5, 1869 – October 8, 1942) was a Russian and Soviet physicist, mathematician, and mechanical engineer. He is known for mathematical formulas such as Chaplygin's equation and for a hypothetical substance in cosmology called Chaplygin gas, named after him. https://en.wikipedia.org/wiki/Sergey_Chaplygin - "chaplygin", - - // Asima Chatterjee was an indian organic chemist noted for her research on vinca alkaloids, development of drugs for treatment of epilepsy and malaria - https://en.wikipedia.org/wiki/Asima_Chatterjee - "chatterjee", - - // Pafnuty Chebyshev - Russian mathematician. He is known fo his works on probability, statistics, mechanics, analytical geometry and number theory https://en.wikipedia.org/wiki/Pafnuty_Chebyshev - "chebyshev", - - //Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon) - "shannon", - - // Joan Clarke - Bletchley Park code breaker during the Second World War who pioneered techniques that remained top secret for decades. Also an accomplished numismatist https://en.wikipedia.org/wiki/Joan_Clarke - "clarke", - - // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden - "colden", - - // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori - "cori", - - // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray - "cray", - - // This entry reflects a husband and wife team who worked together: - // Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https://en.wikipedia.org/wiki/Joan_Curran - // Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https://en.wikipedia.org/wiki/Samuel_Curran - "curran", - - // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. - "curie", - - // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. - "darwin", - - // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. - "davinci", - - // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. - "dijkstra", - - // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky - "dubinsky", - - // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley - "easley", - - // Thomas Alva Edison, prolific inventor https://en.wikipedia.org/wiki/Thomas_Edison - "edison", - - // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein - "einstein", - - // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion - "elion", - - // Alexandra Asanovna Elbakyan (Russian: Алекса́ндра Аса́новна Элбакя́н) is a Kazakhstani graduate student, computer programmer, internet pirate in hiding, and the creator of the site Sci-Hub. Nature has listed her in 2016 in the top ten people that mattered in science, and Ars Technica has compared her to Aaron Swartz. - https://en.wikipedia.org/wiki/Alexandra_Elbakyan - "elbakyan", - - // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart - "engelbart", - - // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid - "euclid", - - // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler - "euler", - - // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat - "fermat", - - // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. - "fermi", - - // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman - "feynman", - - // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. - "franklin", - - // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei - "galileo", - - // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates - "gates", - - // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) - "goldberg", - - // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine - "goldstine", - - // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser - "goldwasser", - - // James Golick, all around gangster. - "golick", - - // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall - "goodall", - - // Lois Haibt - American computer scientist, part of the team at IBM that developed FORTRAN - https://en.wikipedia.org/wiki/Lois_Haibt - "haibt", - - // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) - "hamilton", - - // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking - "hawking", - - // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg - "heisenberg", - - // Grete Hermann was a German philosopher noted for her philosophical work on the foundations of quantum mechanics. https://en.wikipedia.org/wiki/Grete_Hermann - "hermann", - - // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD - "heyrovsky", - - // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin - "hodgkin", - - // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover - "hoover", - - // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper - "hopper", - - // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle - "hugle", - - // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia - "hypatia", - - // Mary Jackson, American mathematician and aerospace engineer who earned the highest title within NASA's engineering department - https://en.wikipedia.org/wiki/Mary_Jackson_(engineer) - "jackson", - - // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil - "jang", - - // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik - "jennings", - - // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen - "jepsen", - - // Katherine Coleman Goble Johnson - American physicist and mathematician contributed to the NASA. https://en.wikipedia.org/wiki/Katherine_Johnson - "johnson", - - // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie - "joliot", - - // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones - "jones", - - // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam - "kalam", - - // Sergey Petrovich Kapitsa (Russian: Серге́й Петро́вич Капи́ца; 14 February 1928 – 14 August 2012) was a Russian physicist and demographer. He was best known as host of the popular and long-running Russian scientific TV show, Evident, but Incredible. His father was the Nobel laureate Soviet-era physicist Pyotr Kapitsa, and his brother was the geographer and Antarctic explorer Andrey Kapitsa. - https://en.wikipedia.org/wiki/Sergey_Kapitsa - "kapitsa", - - // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare - "kare", - - // Mstislav Keldysh - a Soviet scientist in the field of mathematics and mechanics, academician of the USSR Academy of Sciences (1946), President of the USSR Academy of Sciences (1961–1975), three times Hero of Socialist Labor (1956, 1961, 1971), fellow of the Royal Society of Edinburgh (1968). https://en.wikipedia.org/wiki/Mstislav_Keldysh - "keldysh", - - // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller - "keller", - - // Johannes Kepler, German astronomer known for his three laws of planetary motion - https://en.wikipedia.org/wiki/Johannes_Kepler - "kepler", - - // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana - "khorana", - - // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby - "kilby", - - // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch - "kirch", - - // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth - "knuth", - - // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya - "kowalevski", - - // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande - "lalande", - - // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr - "lamarr", - - // Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport - "lamport", - - // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey - "leakey", - - // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt - "leavitt", - - //Daniel Lewin - Mathematician, Akamai co-founder, soldier, 9/11 victim-- Developed optimization techniques for routing traffic on the internet. Died attempting to stop the 9-11 hijackers. https://en.wikipedia.org/wiki/Daniel_Lewin - "lewin", - - // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum - "lichterman", - - // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov - "liskov", - - // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) - "lovelace", - - // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re - "lumiere", - - // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) - "mahavira", - - // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer - "mayer", - - // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) - "mccarthy", - - // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock - "mcclintock", - - // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean - "mclean", - - // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli - "mcnulty", - - // Dmitri Mendeleev - a chemist and inventor. He formulated the Periodic Law, created a farsighted version of the periodic table of elements, and used it to correct the properties of some already discovered elements and also to predict the properties of eight elements yet to be discovered. https://en.wikipedia.org/wiki/Dmitri_Mendeleev - "mendeleev", - - // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner - "meitner", - - // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky - "meninsky", - - // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf - "mestorf", - - // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky - "minsky", - - // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani - "mirzakhani", - - // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse - "morse", - - // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock - "murdock", - - // John von Neumann - todays computer architectures are based on the von Neumann architecture. https://en.wikipedia.org/wiki/Von_Neumann_architecture - "neumann", - - // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton - "newton", - - // Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal Statistical Society and a pioneer in statistical graphics https://en.wikipedia.org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform - "nightingale", - - // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel - "nobel", - - // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether - "noether", - - // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 - "northcutt", - - // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce - "noyce", - - // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems - "panini", - - // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 - "pare", - - // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. - "pasteur", - - // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin - "payne", - - // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman - "perlman", - - // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike - "pike", - - // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 - "poincare", - - // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras - "poitras", - - // Tat’yana Avenirovna Proskuriakova (Russian: Татья́на Авени́ровна Проскуряко́ва) (January 23 [O.S. January 10] 1909 – August 30, 1985) was a Russian-American Mayanist scholar and archaeologist who contributed significantly to the deciphering of Maya hieroglyphs, the writing system of the pre-Columbian Maya civilization of Mesoamerica. https://en.wikipedia.org/wiki/Tatiana_Proskouriakoff - "proskuriakova", - - // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy - "ptolemy", - - // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman - "raman", - - // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan - "ramanujan", - - // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride - "ride", - - // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) - "montalcini", - - // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie - "ritchie", - - // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen - "roentgen", - - // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin - "rosalind", - - // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha - "saha", - - // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet - "sammet", - - // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) - "shaw", - - // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley - "shirley", - - // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley - "shockley", - - // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi - "sinoussi", - - // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton - "snyder", - - // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence - "spence", - - // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman - "stallman", - - // Lina Solomonovna Stern (or Shtern; Russian: Лина Соломоновна Штерн; 26 August 1878 – 7 March 1968) was a Soviet biochemist, physiologist and humanist whose medical discoveries saved thousands of lives at the fronts of World War II. She is best known for her pioneering work on blood–brain barrier, which she described as hemato-encephalic barrier in 1921. https://en.wikipedia.org/wiki/Lina_Stern - "shtern", - - // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker - "stonebraker", - - // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson - "swanson", - - // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz - "swartz", - - // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles - "swirles", - - // Valentina Tereshkova is a russian engineer, cosmonaut and politician. She was the first woman flying to space in 1963. In 2013, at the age of 76, she offered to go on a one-way mission to mars. https://en.wikipedia.org/wiki/Valentina_Tereshkova - "tereshkova", - - // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla - "tesla", - - // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson - "thompson", - - // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds - "torvalds", - - // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. - "turing", - - // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions - "varahamihira", - - // Dorothy Vaughan was a NASA mathematician and computer programmer on the SCOUT launch vehicle program that put America's first satellites into space - https://en.wikipedia.org/wiki/Dorothy_Vaughan - "vaughan", - - // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya - "visvesvaraya", - - // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard - "volhard", - - // Cédric Villani - French mathematician, won Fields Medal, Fermat Prize and Poincaré Price for his work in differential geometry and statistical mechanics. https://en.wikipedia.org/wiki/C%C3%A9dric_Villani - "villani", - - // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer - "wescoff", - - // Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem - https://en.wikipedia.org/wiki/Andrew_Wiles - "wiles", - - // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams - "williams", - - // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson - "wilson", - - // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing - "wing", - - // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak - "wozniak", - - // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers - "wright", - - // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow - "yalow", - - // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath - "yonath", - - // Nikolay Yegorovich Zhukovsky (Russian: Никола́й Его́рович Жуко́вский, January 17 1847 – March 17, 1921) was a Russian scientist, mathematician and engineer, and a founding father of modern aero- and hydrodynamics. Whereas contemporary scientists scoffed at the idea of human flight, Zhukovsky was the first to undertake the study of airflow. He is often called the Father of Russian Aviation. https://en.wikipedia.org/wiki/Nikolay_Yegorovich_Zhukovsky - "zhukovsky", - } -) - -// GetRandomName generates a random name from the list of adjectives and surnames in this package -// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random -// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` -func GetRandomName(retry int) string { -begin: - name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) - if name == "boring_wozniak" /* Steve Wozniak is not boring */ { - goto begin - } - - if retry > 0 { - name = fmt.Sprintf("%s%d", name, rand.Intn(10)) - } - return name -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go deleted file mode 100644 index 94780ef61..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build !windows - -// Package kernel provides helper function to get, parse and compare kernel -// versions for different platforms. -package kernel // import "github.com/docker/docker/pkg/parsers/kernel" - -import ( - "errors" - "fmt" -) - -// VersionInfo holds information about the kernel. -type VersionInfo struct { - Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) - Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) - Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) - Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) -} - -func (k *VersionInfo) String() string { - return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) -} - -// CompareKernelVersion compares two kernel.VersionInfo structs. -// Returns -1 if a < b, 0 if a == b, 1 it a > b -func CompareKernelVersion(a, b VersionInfo) int { - if a.Kernel < b.Kernel { - return -1 - } else if a.Kernel > b.Kernel { - return 1 - } - - if a.Major < b.Major { - return -1 - } else if a.Major > b.Major { - return 1 - } - - if a.Minor < b.Minor { - return -1 - } else if a.Minor > b.Minor { - return 1 - } - - return 0 -} - -// ParseRelease parses a string and creates a VersionInfo based on it. -func ParseRelease(release string) (*VersionInfo, error) { - var ( - kernel, major, minor, parsed int - flavor, partial string - ) - - // Ignore error from Sscanf to allow an empty flavor. Instead, just - // make sure we got all the version numbers. - parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) - if parsed < 2 { - return nil, errors.New("Can't parse kernel version " + release) - } - - // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 - parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) - if parsed < 1 { - flavor = partial - } - - return &VersionInfo{ - Kernel: kernel, - Major: major, - Minor: minor, - Flavor: flavor, - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go deleted file mode 100644 index 6e599eebc..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build darwin - -// Package kernel provides helper function to get, parse and compare kernel -// versions for different platforms. -package kernel // import "github.com/docker/docker/pkg/parsers/kernel" - -import ( - "fmt" - "os/exec" - "strings" - - "github.com/mattn/go-shellwords" -) - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - release, err := getRelease() - if err != nil { - return nil, err - } - - return ParseRelease(release) -} - -// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version -func getRelease() (string, error) { - cmd := exec.Command("system_profiler", "SPSoftwareDataType") - osName, err := cmd.Output() - if err != nil { - return "", err - } - - var release string - data := strings.Split(string(osName), "\n") - for _, line := range data { - if strings.Contains(line, "Kernel Version") { - // It has the format like ' Kernel Version: Darwin 14.5.0' - content := strings.SplitN(line, ":", 2) - if len(content) != 2 { - return "", fmt.Errorf("Kernel Version is invalid") - } - - prettyNames, err := shellwords.Parse(content[1]) - if err != nil { - return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) - } - - if len(prettyNames) != 2 { - return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") - } - release = prettyNames[1] - } - } - - return release, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go deleted file mode 100644 index 8a9aa3122..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build linux freebsd openbsd - -// Package kernel provides helper function to get, parse and compare kernel -// versions for different platforms. -package kernel // import "github.com/docker/docker/pkg/parsers/kernel" - -import ( - "bytes" - - "github.com/sirupsen/logrus" -) - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - uts, err := uname() - if err != nil { - return nil, err - } - - // Remove the \x00 from the release for Atoi to parse correctly - return ParseRelease(string(uts.Release[:bytes.IndexByte(uts.Release[:], 0)])) -} - -// CheckKernelVersion checks if current kernel is newer than (or equal to) -// the given version. -func CheckKernelVersion(k, major, minor int) bool { - if v, err := GetKernelVersion(); err != nil { - logrus.Warnf("error getting kernel version: %s", err) - } else { - if CompareKernelVersion(*v, VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { - return false - } - } - return true -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go deleted file mode 100644 index b7b15a1fd..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go +++ /dev/null @@ -1,51 +0,0 @@ -package kernel // import "github.com/docker/docker/pkg/parsers/kernel" - -import ( - "fmt" - - "golang.org/x/sys/windows" - "golang.org/x/sys/windows/registry" -) - -// VersionInfo holds information about the kernel. -type VersionInfo struct { - kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) - major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) - minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) - build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) -} - -func (k *VersionInfo) String() string { - return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) -} - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - - KVI := &VersionInfo{"Unknown", 0, 0, 0} - - k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) - if err != nil { - return KVI, err - } - defer k.Close() - - blex, _, err := k.GetStringValue("BuildLabEx") - if err != nil { - return KVI, err - } - KVI.kvi = blex - - // Important - docker.exe MUST be manifested for this API to return - // the correct information. - dwVersion, err := windows.GetVersion() - if err != nil { - return KVI, err - } - - KVI.major = int(dwVersion & 0xFF) - KVI.minor = int((dwVersion & 0XFF00) >> 8) - KVI.build = int((dwVersion & 0xFFFF0000) >> 16) - - return KVI, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go deleted file mode 100644 index 212ff4502..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go +++ /dev/null @@ -1,17 +0,0 @@ -package kernel // import "github.com/docker/docker/pkg/parsers/kernel" - -import "golang.org/x/sys/unix" - -// Utsname represents the system name structure. -// It is passthrough for unix.Utsname in order to make it portable with -// other platforms where it is not available. -type Utsname unix.Utsname - -func uname() (*unix.Utsname, error) { - uts := &unix.Utsname{} - - if err := unix.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go deleted file mode 100644 index b2139b60e..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go +++ /dev/null @@ -1,14 +0,0 @@ -package kernel // import "github.com/docker/docker/pkg/parsers/kernel" - -import ( - "golang.org/x/sys/unix" -) - -func uname() (*unix.Utsname, error) { - uts := &unix.Utsname{} - - if err := unix.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go deleted file mode 100644 index 97906e4cd..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !linux - -package kernel // import "github.com/docker/docker/pkg/parsers/kernel" - -import ( - "errors" -) - -// Utsname represents the system name structure. -// It is defined here to make it portable as it is available on linux but not -// on windows. -type Utsname struct { - Release [65]byte -} - -func uname() (*Utsname, error) { - return nil, errors.New("Kernel version detection is available only on linux") -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go deleted file mode 100644 index b251d6aed..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_linux.go +++ /dev/null @@ -1,77 +0,0 @@ -// Package operatingsystem provides helper function to get the operating system -// name for different platforms. -package operatingsystem // import "github.com/docker/docker/pkg/parsers/operatingsystem" - -import ( - "bufio" - "bytes" - "fmt" - "io/ioutil" - "os" - "strings" - - "github.com/mattn/go-shellwords" -) - -var ( - // file to use to detect if the daemon is running in a container - proc1Cgroup = "/proc/1/cgroup" - - // file to check to determine Operating System - etcOsRelease = "/etc/os-release" - - // used by stateless systems like Clear Linux - altOsRelease = "/usr/lib/os-release" -) - -// GetOperatingSystem gets the name of the current operating system. -func GetOperatingSystem() (string, error) { - osReleaseFile, err := os.Open(etcOsRelease) - if err != nil { - if !os.IsNotExist(err) { - return "", fmt.Errorf("Error opening %s: %v", etcOsRelease, err) - } - osReleaseFile, err = os.Open(altOsRelease) - if err != nil { - return "", fmt.Errorf("Error opening %s: %v", altOsRelease, err) - } - } - defer osReleaseFile.Close() - - var prettyName string - scanner := bufio.NewScanner(osReleaseFile) - for scanner.Scan() { - line := scanner.Text() - if strings.HasPrefix(line, "PRETTY_NAME=") { - data := strings.SplitN(line, "=", 2) - prettyNames, err := shellwords.Parse(data[1]) - if err != nil { - return "", fmt.Errorf("PRETTY_NAME is invalid: %s", err.Error()) - } - if len(prettyNames) != 1 { - return "", fmt.Errorf("PRETTY_NAME needs to be enclosed by quotes if they have spaces: %s", data[1]) - } - prettyName = prettyNames[0] - } - } - if prettyName != "" { - return prettyName, nil - } - // If not set, defaults to PRETTY_NAME="Linux" - // c.f. http://www.freedesktop.org/software/systemd/man/os-release.html - return "Linux", nil -} - -// IsContainerized returns true if we are running inside a container. -func IsContainerized() (bool, error) { - b, err := ioutil.ReadFile(proc1Cgroup) - if err != nil { - return false, err - } - for _, line := range bytes.Split(b, []byte{'\n'}) { - if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) && !bytes.HasSuffix(line, []byte("init.scope")) { - return true, nil - } - } - return false, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go deleted file mode 100644 index f4792d37d..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_unix.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build freebsd darwin - -package operatingsystem // import "github.com/docker/docker/pkg/parsers/operatingsystem" - -import ( - "errors" - "os/exec" -) - -// GetOperatingSystem gets the name of the current operating system. -func GetOperatingSystem() (string, error) { - cmd := exec.Command("uname", "-s") - osName, err := cmd.Output() - if err != nil { - return "", err - } - return string(osName), nil -} - -// IsContainerized returns true if we are running inside a container. -// No-op on FreeBSD and Darwin, always returns false. -func IsContainerized() (bool, error) { - // TODO: Implement jail detection for freeBSD - return false, errors.New("Cannot detect if we are in container") -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go b/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go deleted file mode 100644 index 372de5146..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/operatingsystem/operatingsystem_windows.go +++ /dev/null @@ -1,51 +0,0 @@ -package operatingsystem // import "github.com/docker/docker/pkg/parsers/operatingsystem" - -import ( - "fmt" - - "golang.org/x/sys/windows/registry" -) - -// GetOperatingSystem gets the name of the current operating system. -func GetOperatingSystem() (string, error) { - - // Default return value - ret := "Unknown Operating System" - - k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) - if err != nil { - return ret, err - } - defer k.Close() - - pn, _, err := k.GetStringValue("ProductName") - if err != nil { - return ret, err - } - ret = pn - - ri, _, err := k.GetStringValue("ReleaseId") - if err != nil { - return ret, err - } - ret = fmt.Sprintf("%s Version %s", ret, ri) - - cbn, _, err := k.GetStringValue("CurrentBuildNumber") - if err != nil { - return ret, err - } - - ubr, _, err := k.GetIntegerValue("UBR") - if err != nil { - return ret, err - } - ret = fmt.Sprintf("%s (OS Build %s.%d)", ret, cbn, ubr) - - return ret, nil -} - -// IsContainerized returns true if we are running inside a container. -// No-op on Windows, always returns false. -func IsContainerized() (bool, error) { - return false, nil -} diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/github.com/docker/docker/pkg/parsers/parsers.go deleted file mode 100644 index c4186a4c0..000000000 --- a/vendor/github.com/docker/docker/pkg/parsers/parsers.go +++ /dev/null @@ -1,69 +0,0 @@ -// Package parsers provides helper functions to parse and validate different type -// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel -// operating system versions. -package parsers // import "github.com/docker/docker/pkg/parsers" - -import ( - "fmt" - "strconv" - "strings" -) - -// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) -func ParseKeyValueOpt(opt string) (string, string, error) { - parts := strings.SplitN(opt, "=", 2) - if len(parts) != 2 { - return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) - } - return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil -} - -// ParseUintList parses and validates the specified string as the value -// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be -// one of the formats below. Note that duplicates are actually allowed in the -// input string. It returns a `map[int]bool` with available elements from `val` -// set to `true`. -// Supported formats: -// 7 -// 1-6 -// 0,3-4,7,8-10 -// 0-0,0,1-7 -// 03,1-3 <- this is gonna get parsed as [1,2,3] -// 3,2,1 -// 0-2,3,1 -func ParseUintList(val string) (map[int]bool, error) { - if val == "" { - return map[int]bool{}, nil - } - - availableInts := make(map[int]bool) - split := strings.Split(val, ",") - errInvalidFormat := fmt.Errorf("invalid format: %s", val) - - for _, r := range split { - if !strings.Contains(r, "-") { - v, err := strconv.Atoi(r) - if err != nil { - return nil, errInvalidFormat - } - availableInts[v] = true - } else { - split := strings.SplitN(r, "-", 2) - min, err := strconv.Atoi(split[0]) - if err != nil { - return nil, errInvalidFormat - } - max, err := strconv.Atoi(split[1]) - if err != nil { - return nil, errInvalidFormat - } - if max < min { - return nil, errInvalidFormat - } - for i := min; i <= max; i++ { - availableInts[i] = true - } - } - } - return availableInts, nil -} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go deleted file mode 100644 index 0617a89e5..000000000 --- a/vendor/github.com/docker/docker/pkg/pidfile/pidfile.go +++ /dev/null @@ -1,53 +0,0 @@ -// Package pidfile provides structure and helper functions to create and remove -// PID file. A PID file is usually a file used to store the process ID of a -// running process. -package pidfile // import "github.com/docker/docker/pkg/pidfile" - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/docker/docker/pkg/system" -) - -// PIDFile is a file used to store the process ID of a running process. -type PIDFile struct { - path string -} - -func checkPIDFileAlreadyExists(path string) error { - if pidByte, err := ioutil.ReadFile(path); err == nil { - pidString := strings.TrimSpace(string(pidByte)) - if pid, err := strconv.Atoi(pidString); err == nil { - if processExists(pid) { - return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path) - } - } - } - return nil -} - -// New creates a PIDfile using the specified path. -func New(path string) (*PIDFile, error) { - if err := checkPIDFileAlreadyExists(path); err != nil { - return nil, err - } - // Note MkdirAll returns nil if a directory already exists - if err := system.MkdirAll(filepath.Dir(path), os.FileMode(0755), ""); err != nil { - return nil, err - } - if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil { - return nil, err - } - - return &PIDFile{path: path}, nil -} - -// Remove removes the PIDFile. -func (file PIDFile) Remove() error { - return os.Remove(file.path) -} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go deleted file mode 100644 index 92746aa7b..000000000 --- a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_darwin.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build darwin - -package pidfile // import "github.com/docker/docker/pkg/pidfile" - -import ( - "golang.org/x/sys/unix" -) - -func processExists(pid int) bool { - // OS X does not have a proc filesystem. - // Use kill -0 pid to judge if the process exists. - err := unix.Kill(pid, 0) - return err == nil -} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go deleted file mode 100644 index cc6696d21..000000000 --- a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_unix.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !windows,!darwin - -package pidfile // import "github.com/docker/docker/pkg/pidfile" - -import ( - "os" - "path/filepath" - "strconv" -) - -func processExists(pid int) bool { - if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil { - return true - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go b/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go deleted file mode 100644 index 1c5e6cb65..000000000 --- a/vendor/github.com/docker/docker/pkg/pidfile/pidfile_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -package pidfile // import "github.com/docker/docker/pkg/pidfile" - -import ( - "golang.org/x/sys/windows" -) - -const ( - processQueryLimitedInformation = 0x1000 - - stillActive = 259 -) - -func processExists(pid int) bool { - h, err := windows.OpenProcess(processQueryLimitedInformation, false, uint32(pid)) - if err != nil { - return false - } - var c uint32 - err = windows.GetExitCodeProcess(h, &c) - windows.Close(h) - if err != nil { - return c == stillActive - } - return true -} diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go b/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go deleted file mode 100644 index a260a23f4..000000000 --- a/vendor/github.com/docker/docker/pkg/platform/architecture_linux.go +++ /dev/null @@ -1,18 +0,0 @@ -// Package platform provides helper function to get the runtime architecture -// for different platforms. -package platform // import "github.com/docker/docker/pkg/platform" - -import ( - "bytes" - - "golang.org/x/sys/unix" -) - -// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) -func runtimeArchitecture() (string, error) { - utsname := &unix.Utsname{} - if err := unix.Uname(utsname); err != nil { - return "", err - } - return string(utsname.Machine[:bytes.IndexByte(utsname.Machine[:], 0)]), nil -} diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go b/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go deleted file mode 100644 index d51f68698..000000000 --- a/vendor/github.com/docker/docker/pkg/platform/architecture_unix.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build freebsd darwin - -// Package platform provides helper function to get the runtime architecture -// for different platforms. -package platform // import "github.com/docker/docker/pkg/platform" - -import ( - "os/exec" - "strings" -) - -// runtimeArchitecture gets the name of the current architecture (x86, x86_64, i86pc, sun4v, ...) -func runtimeArchitecture() (string, error) { - cmd := exec.Command("/usr/bin/uname", "-m") - machine, err := cmd.Output() - if err != nil { - return "", err - } - return strings.TrimSpace(string(machine)), nil -} diff --git a/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go b/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go deleted file mode 100644 index a25f1bc51..000000000 --- a/vendor/github.com/docker/docker/pkg/platform/architecture_windows.go +++ /dev/null @@ -1,60 +0,0 @@ -package platform // import "github.com/docker/docker/pkg/platform" - -import ( - "fmt" - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - procGetSystemInfo = modkernel32.NewProc("GetSystemInfo") -) - -// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms724958(v=vs.85).aspx -type systeminfo struct { - wProcessorArchitecture uint16 - wReserved uint16 - dwPageSize uint32 - lpMinimumApplicationAddress uintptr - lpMaximumApplicationAddress uintptr - dwActiveProcessorMask uintptr - dwNumberOfProcessors uint32 - dwProcessorType uint32 - dwAllocationGranularity uint32 - wProcessorLevel uint16 - wProcessorRevision uint16 -} - -// Constants -const ( - ProcessorArchitecture64 = 9 // PROCESSOR_ARCHITECTURE_AMD64 - ProcessorArchitectureIA64 = 6 // PROCESSOR_ARCHITECTURE_IA64 - ProcessorArchitecture32 = 0 // PROCESSOR_ARCHITECTURE_INTEL - ProcessorArchitectureArm = 5 // PROCESSOR_ARCHITECTURE_ARM -) - -// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) -func runtimeArchitecture() (string, error) { - var sysinfo systeminfo - syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) - switch sysinfo.wProcessorArchitecture { - case ProcessorArchitecture64, ProcessorArchitectureIA64: - return "x86_64", nil - case ProcessorArchitecture32: - return "i686", nil - case ProcessorArchitectureArm: - return "arm", nil - default: - return "", fmt.Errorf("Unknown processor architecture") - } -} - -// NumProcs returns the number of processors on the system -func NumProcs() uint32 { - var sysinfo systeminfo - syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) - return sysinfo.dwNumberOfProcessors -} diff --git a/vendor/github.com/docker/docker/pkg/platform/platform.go b/vendor/github.com/docker/docker/pkg/platform/platform.go deleted file mode 100644 index f6b02b734..000000000 --- a/vendor/github.com/docker/docker/pkg/platform/platform.go +++ /dev/null @@ -1,23 +0,0 @@ -package platform // import "github.com/docker/docker/pkg/platform" - -import ( - "runtime" - - "github.com/sirupsen/logrus" -) - -var ( - // Architecture holds the runtime architecture of the process. - Architecture string - // OSType holds the runtime operating system type (Linux, …) of the process. - OSType string -) - -func init() { - var err error - Architecture, err = runtimeArchitecture() - if err != nil { - logrus.Errorf("Could not read system architecture info: %v", err) - } - OSType = runtime.GOOS -} diff --git a/vendor/github.com/docker/docker/pkg/plugingetter/getter.go b/vendor/github.com/docker/docker/pkg/plugingetter/getter.go deleted file mode 100644 index 370e0d5b9..000000000 --- a/vendor/github.com/docker/docker/pkg/plugingetter/getter.go +++ /dev/null @@ -1,52 +0,0 @@ -package plugingetter // import "github.com/docker/docker/pkg/plugingetter" - -import ( - "net" - "time" - - "github.com/docker/docker/pkg/plugins" -) - -const ( - // Lookup doesn't update RefCount - Lookup = 0 - // Acquire increments RefCount - Acquire = 1 - // Release decrements RefCount - Release = -1 -) - -// CompatPlugin is an abstraction to handle both v2(new) and v1(legacy) plugins. -type CompatPlugin interface { - Name() string - ScopedPath(string) string - IsV1() bool - PluginWithV1Client -} - -// PluginWithV1Client is a plugin that directly utilizes the v1/http plugin client -type PluginWithV1Client interface { - Client() *plugins.Client -} - -// PluginAddr is a plugin that exposes the socket address for creating custom clients rather than the built-in `*plugins.Client` -type PluginAddr interface { - Addr() net.Addr - Timeout() time.Duration - Protocol() string -} - -// CountedPlugin is a plugin which is reference counted. -type CountedPlugin interface { - Acquire() - Release() - CompatPlugin -} - -// PluginGetter is the interface implemented by Store -type PluginGetter interface { - Get(name, capability string, mode int) (CompatPlugin, error) - GetAllByCap(capability string) ([]CompatPlugin, error) - GetAllManagedPluginsByCap(capability string) []CompatPlugin - Handle(capability string, callback func(string, *plugins.Client)) -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/client.go b/vendor/github.com/docker/docker/pkg/plugins/client.go deleted file mode 100644 index 035330535..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/client.go +++ /dev/null @@ -1,242 +0,0 @@ -package plugins // import "github.com/docker/docker/pkg/plugins" - -import ( - "bytes" - "context" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "net/url" - "time" - - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/plugins/transport" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/sirupsen/logrus" -) - -const ( - defaultTimeOut = 30 -) - -func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) { - tr := &http.Transport{} - - if tlsConfig != nil { - c, err := tlsconfig.Client(*tlsConfig) - if err != nil { - return nil, err - } - tr.TLSClientConfig = c - } - - u, err := url.Parse(addr) - if err != nil { - return nil, err - } - socket := u.Host - if socket == "" { - // valid local socket addresses have the host empty. - socket = u.Path - } - if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { - return nil, err - } - scheme := httpScheme(u) - - return transport.NewHTTPTransport(tr, scheme, socket), nil -} - -// NewClient creates a new plugin client (http). -func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { - clientTransport, err := newTransport(addr, tlsConfig) - if err != nil { - return nil, err - } - return newClientWithTransport(clientTransport, 0), nil -} - -// NewClientWithTimeout creates a new plugin client (http). -func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeout time.Duration) (*Client, error) { - clientTransport, err := newTransport(addr, tlsConfig) - if err != nil { - return nil, err - } - return newClientWithTransport(clientTransport, timeout), nil -} - -// newClientWithTransport creates a new plugin client with a given transport. -func newClientWithTransport(tr transport.Transport, timeout time.Duration) *Client { - return &Client{ - http: &http.Client{ - Transport: tr, - Timeout: timeout, - }, - requestFactory: tr, - } -} - -// Client represents a plugin client. -type Client struct { - http *http.Client // http client to use - requestFactory transport.RequestFactory -} - -// RequestOpts is the set of options that can be passed into a request -type RequestOpts struct { - Timeout time.Duration -} - -// WithRequestTimeout sets a timeout duration for plugin requests -func WithRequestTimeout(t time.Duration) func(*RequestOpts) { - return func(o *RequestOpts) { - o.Timeout = t - } -} - -// Call calls the specified method with the specified arguments for the plugin. -// It will retry for 30 seconds if a failure occurs when calling. -func (c *Client) Call(serviceMethod string, args, ret interface{}) error { - return c.CallWithOptions(serviceMethod, args, ret) -} - -// CallWithOptions is just like call except it takes options -func (c *Client) CallWithOptions(serviceMethod string, args interface{}, ret interface{}, opts ...func(*RequestOpts)) error { - var buf bytes.Buffer - if args != nil { - if err := json.NewEncoder(&buf).Encode(args); err != nil { - return err - } - } - body, err := c.callWithRetry(serviceMethod, &buf, true, opts...) - if err != nil { - return err - } - defer body.Close() - if ret != nil { - if err := json.NewDecoder(body).Decode(&ret); err != nil { - logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) - return err - } - } - return nil -} - -// Stream calls the specified method with the specified arguments for the plugin and returns the response body -func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(args); err != nil { - return nil, err - } - return c.callWithRetry(serviceMethod, &buf, true) -} - -// SendFile calls the specified method, and passes through the IO stream -func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { - body, err := c.callWithRetry(serviceMethod, data, true) - if err != nil { - return err - } - defer body.Close() - if err := json.NewDecoder(body).Decode(&ret); err != nil { - logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) - return err - } - return nil -} - -func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool, reqOpts ...func(*RequestOpts)) (io.ReadCloser, error) { - var retries int - start := time.Now() - - var opts RequestOpts - for _, o := range reqOpts { - o(&opts) - } - - for { - req, err := c.requestFactory.NewRequest(serviceMethod, data) - if err != nil { - return nil, err - } - - cancelRequest := func() {} - if opts.Timeout > 0 { - var ctx context.Context - ctx, cancelRequest = context.WithTimeout(req.Context(), opts.Timeout) - req = req.WithContext(ctx) - } - - resp, err := c.http.Do(req) - if err != nil { - cancelRequest() - if !retry { - return nil, err - } - - timeOff := backoff(retries) - if abort(start, timeOff) { - return nil, err - } - retries++ - logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) - time.Sleep(timeOff) - continue - } - - if resp.StatusCode != http.StatusOK { - b, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - cancelRequest() - if err != nil { - return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} - } - - // Plugins' Response(s) should have an Err field indicating what went - // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just - // return the string(body) - type responseErr struct { - Err string - } - remoteErr := responseErr{} - if err := json.Unmarshal(b, &remoteErr); err == nil { - if remoteErr.Err != "" { - return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} - } - } - // old way... - return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} - } - return ioutils.NewReadCloserWrapper(resp.Body, func() error { - err := resp.Body.Close() - cancelRequest() - return err - }), nil - } -} - -func backoff(retries int) time.Duration { - b, max := 1, defaultTimeOut - for b < max && retries > 0 { - b *= 2 - retries-- - } - if b > max { - b = max - } - return time.Duration(b) * time.Second -} - -func abort(start time.Time, timeOff time.Duration) bool { - return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second -} - -func httpScheme(u *url.URL) string { - scheme := u.Scheme - if scheme != "https" { - scheme = "http" - } - return scheme -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/vendor/github.com/docker/docker/pkg/plugins/discovery.go deleted file mode 100644 index 4b79bd29a..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery.go +++ /dev/null @@ -1,154 +0,0 @@ -package plugins // import "github.com/docker/docker/pkg/plugins" - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/url" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/pkg/errors" -) - -var ( - // ErrNotFound plugin not found - ErrNotFound = errors.New("plugin not found") - socketsPath = "/run/docker/plugins" -) - -// localRegistry defines a registry that is local (using unix socket). -type localRegistry struct{} - -func newLocalRegistry() localRegistry { - return localRegistry{} -} - -// Scan scans all the plugin paths and returns all the names it found -func Scan() ([]string, error) { - var names []string - dirEntries, err := ioutil.ReadDir(socketsPath) - if err != nil && !os.IsNotExist(err) { - return nil, errors.Wrap(err, "error reading dir entries") - } - - for _, fi := range dirEntries { - if fi.IsDir() { - fi, err = os.Stat(filepath.Join(socketsPath, fi.Name(), fi.Name()+".sock")) - if err != nil { - continue - } - } - - if fi.Mode()&os.ModeSocket != 0 { - names = append(names, strings.TrimSuffix(filepath.Base(fi.Name()), filepath.Ext(fi.Name()))) - } - } - - for _, p := range specsPaths { - dirEntries, err := ioutil.ReadDir(p) - if err != nil && !os.IsNotExist(err) { - return nil, errors.Wrap(err, "error reading dir entries") - } - - for _, fi := range dirEntries { - if fi.IsDir() { - infos, err := ioutil.ReadDir(filepath.Join(p, fi.Name())) - if err != nil { - continue - } - - for _, info := range infos { - if strings.TrimSuffix(info.Name(), filepath.Ext(info.Name())) == fi.Name() { - fi = info - break - } - } - } - - ext := filepath.Ext(fi.Name()) - switch ext { - case ".spec", ".json": - plugin := strings.TrimSuffix(fi.Name(), ext) - names = append(names, plugin) - default: - } - } - } - return names, nil -} - -// Plugin returns the plugin registered with the given name (or returns an error). -func (l *localRegistry) Plugin(name string) (*Plugin, error) { - socketpaths := pluginPaths(socketsPath, name, ".sock") - - for _, p := range socketpaths { - if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { - return NewLocalPlugin(name, "unix://"+p), nil - } - } - - var txtspecpaths []string - for _, p := range specsPaths { - txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) - txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) - } - - for _, p := range txtspecpaths { - if _, err := os.Stat(p); err == nil { - if strings.HasSuffix(p, ".json") { - return readPluginJSONInfo(name, p) - } - return readPluginInfo(name, p) - } - } - return nil, errors.Wrapf(ErrNotFound, "could not find plugin %s in v1 plugin registry", name) -} - -func readPluginInfo(name, path string) (*Plugin, error) { - content, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - addr := strings.TrimSpace(string(content)) - - u, err := url.Parse(addr) - if err != nil { - return nil, err - } - - if len(u.Scheme) == 0 { - return nil, fmt.Errorf("Unknown protocol") - } - - return NewLocalPlugin(name, addr), nil -} - -func readPluginJSONInfo(name, path string) (*Plugin, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - var p Plugin - if err := json.NewDecoder(f).Decode(&p); err != nil { - return nil, err - } - p.name = name - if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 { - p.TLSConfig.InsecureSkipVerify = true - } - p.activateWait = sync.NewCond(&sync.Mutex{}) - - return &p, nil -} - -func pluginPaths(base, name, ext string) []string { - return []string{ - filepath.Join(base, name+ext), - filepath.Join(base, name, name+ext), - } -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go deleted file mode 100644 index 58058f282..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !windows - -package plugins // import "github.com/docker/docker/pkg/plugins" - -var specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go deleted file mode 100644 index f0af3477f..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package plugins // import "github.com/docker/docker/pkg/plugins" - -import ( - "os" - "path/filepath" -) - -var specsPaths = []string{filepath.Join(os.Getenv("programdata"), "docker", "plugins")} diff --git a/vendor/github.com/docker/docker/pkg/plugins/errors.go b/vendor/github.com/docker/docker/pkg/plugins/errors.go deleted file mode 100644 index 6735c304b..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/errors.go +++ /dev/null @@ -1,33 +0,0 @@ -package plugins // import "github.com/docker/docker/pkg/plugins" - -import ( - "fmt" - "net/http" -) - -type statusError struct { - status int - method string - err string -} - -// Error returns a formatted string for this error type -func (e *statusError) Error() string { - return fmt.Sprintf("%s: %v", e.method, e.err) -} - -// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin -func IsNotFound(err error) bool { - return isStatusError(err, http.StatusNotFound) -} - -func isStatusError(err error, status int) bool { - if err == nil { - return false - } - e, ok := err.(*statusError) - if !ok { - return false - } - return e.status == status -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go deleted file mode 100644 index d27e28ebe..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/foo.go +++ /dev/null @@ -1,83 +0,0 @@ -package foo // import "github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures" - -import ( - aliasedio "io" - - "github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture" -) - -type wobble struct { - Some string - Val string - Inception *wobble -} - -// Fooer is an empty interface used for tests. -type Fooer interface{} - -// Fooer2 is an interface used for tests. -type Fooer2 interface { - Foo() -} - -// Fooer3 is an interface used for tests. -type Fooer3 interface { - Foo() - Bar(a string) - Baz(a string) (err error) - Qux(a, b string) (val string, err error) - Wobble() (w *wobble) - Wiggle() (w wobble) - WiggleWobble(a []*wobble, b []wobble, c map[string]*wobble, d map[*wobble]wobble, e map[string][]wobble, f []*otherfixture.Spaceship) (g map[*wobble]wobble, h [][]*wobble, i otherfixture.Spaceship, j *otherfixture.Spaceship, k map[*otherfixture.Spaceship]otherfixture.Spaceship, l []otherfixture.Spaceship) -} - -// Fooer4 is an interface used for tests. -type Fooer4 interface { - Foo() error -} - -// Bar is an interface used for tests. -type Bar interface { - Boo(a string, b string) (s string, err error) -} - -// Fooer5 is an interface used for tests. -type Fooer5 interface { - Foo() - Bar -} - -// Fooer6 is an interface used for tests. -type Fooer6 interface { - Foo(a otherfixture.Spaceship) -} - -// Fooer7 is an interface used for tests. -type Fooer7 interface { - Foo(a *otherfixture.Spaceship) -} - -// Fooer8 is an interface used for tests. -type Fooer8 interface { - Foo(a map[string]otherfixture.Spaceship) -} - -// Fooer9 is an interface used for tests. -type Fooer9 interface { - Foo(a map[string]*otherfixture.Spaceship) -} - -// Fooer10 is an interface used for tests. -type Fooer10 interface { - Foo(a []otherfixture.Spaceship) -} - -// Fooer11 is an interface used for tests. -type Fooer11 interface { - Foo(a []*otherfixture.Spaceship) -} - -// Fooer12 is an interface used for tests. -type Fooer12 interface { - Foo(a aliasedio.Reader) -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go deleted file mode 100644 index c603f6778..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go +++ /dev/null @@ -1,4 +0,0 @@ -package otherfixture // import "github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture" - -// Spaceship is a fixture for tests -type Spaceship struct{} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go deleted file mode 100644 index e77a7d45f..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/main.go +++ /dev/null @@ -1,91 +0,0 @@ -package main - -import ( - "bytes" - "flag" - "fmt" - "go/format" - "io/ioutil" - "os" - "unicode" - "unicode/utf8" -) - -type stringSet struct { - values map[string]struct{} -} - -func (s stringSet) String() string { - return "" -} - -func (s stringSet) Set(value string) error { - s.values[value] = struct{}{} - return nil -} -func (s stringSet) GetValues() map[string]struct{} { - return s.values -} - -var ( - typeName = flag.String("type", "", "interface type to generate plugin rpc proxy for") - rpcName = flag.String("name", *typeName, "RPC name, set if different from type") - inputFile = flag.String("i", "", "input file path") - outputFile = flag.String("o", *inputFile+"_proxy.go", "output file path") - - skipFuncs map[string]struct{} - flSkipFuncs = stringSet{make(map[string]struct{})} - - flBuildTags = stringSet{make(map[string]struct{})} -) - -func errorOut(msg string, err error) { - if err == nil { - return - } - fmt.Fprintf(os.Stderr, "%s: %v\n", msg, err) - os.Exit(1) -} - -func checkFlags() error { - if *outputFile == "" { - return fmt.Errorf("missing required flag `-o`") - } - if *inputFile == "" { - return fmt.Errorf("missing required flag `-i`") - } - return nil -} - -func main() { - flag.Var(flSkipFuncs, "skip", "skip parsing for function") - flag.Var(flBuildTags, "tag", "build tags to add to generated files") - flag.Parse() - skipFuncs = flSkipFuncs.GetValues() - - errorOut("error", checkFlags()) - - pkg, err := Parse(*inputFile, *typeName) - errorOut(fmt.Sprintf("error parsing requested type %s", *typeName), err) - - var analysis = struct { - InterfaceType string - RPCName string - BuildTags map[string]struct{} - *ParsedPkg - }{toLower(*typeName), *rpcName, flBuildTags.GetValues(), pkg} - var buf bytes.Buffer - - errorOut("parser error", generatedTempl.Execute(&buf, analysis)) - src, err := format.Source(buf.Bytes()) - errorOut("error formatting generated source:\n"+buf.String(), err) - errorOut("error writing file", ioutil.WriteFile(*outputFile, src, 0644)) -} - -func toLower(s string) string { - if s == "" { - return "" - } - r, n := utf8.DecodeRuneInString(s) - return string(unicode.ToLower(r)) + s[n:] -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go deleted file mode 100644 index 6c547e18c..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/parser.go +++ /dev/null @@ -1,263 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "go/ast" - "go/parser" - "go/token" - "path" - "reflect" - "strings" -) - -var errBadReturn = errors.New("found return arg with no name: all args must be named") - -type errUnexpectedType struct { - expected string - actual interface{} -} - -func (e errUnexpectedType) Error() string { - return fmt.Sprintf("got wrong type expecting %s, got: %v", e.expected, reflect.TypeOf(e.actual)) -} - -// ParsedPkg holds information about a package that has been parsed, -// its name and the list of functions. -type ParsedPkg struct { - Name string - Functions []function - Imports []importSpec -} - -type function struct { - Name string - Args []arg - Returns []arg - Doc string -} - -type arg struct { - Name string - ArgType string - PackageSelector string -} - -func (a *arg) String() string { - return a.Name + " " + a.ArgType -} - -type importSpec struct { - Name string - Path string -} - -func (s *importSpec) String() string { - var ss string - if len(s.Name) != 0 { - ss += s.Name - } - ss += s.Path - return ss -} - -// Parse parses the given file for an interface definition with the given name. -func Parse(filePath string, objName string) (*ParsedPkg, error) { - fs := token.NewFileSet() - pkg, err := parser.ParseFile(fs, filePath, nil, parser.AllErrors) - if err != nil { - return nil, err - } - p := &ParsedPkg{} - p.Name = pkg.Name.Name - obj, exists := pkg.Scope.Objects[objName] - if !exists { - return nil, fmt.Errorf("could not find object %s in %s", objName, filePath) - } - if obj.Kind != ast.Typ { - return nil, fmt.Errorf("exected type, got %s", obj.Kind) - } - spec, ok := obj.Decl.(*ast.TypeSpec) - if !ok { - return nil, errUnexpectedType{"*ast.TypeSpec", obj.Decl} - } - iface, ok := spec.Type.(*ast.InterfaceType) - if !ok { - return nil, errUnexpectedType{"*ast.InterfaceType", spec.Type} - } - - p.Functions, err = parseInterface(iface) - if err != nil { - return nil, err - } - - // figure out what imports will be needed - imports := make(map[string]importSpec) - for _, f := range p.Functions { - args := append(f.Args, f.Returns...) - for _, arg := range args { - if len(arg.PackageSelector) == 0 { - continue - } - - for _, i := range pkg.Imports { - if i.Name != nil { - if i.Name.Name != arg.PackageSelector { - continue - } - imports[i.Path.Value] = importSpec{Name: arg.PackageSelector, Path: i.Path.Value} - break - } - - _, name := path.Split(i.Path.Value) - splitName := strings.Split(name, "-") - if len(splitName) > 1 { - name = splitName[len(splitName)-1] - } - // import paths have quotes already added in, so need to remove them for name comparison - name = strings.TrimPrefix(name, `"`) - name = strings.TrimSuffix(name, `"`) - if name == arg.PackageSelector { - imports[i.Path.Value] = importSpec{Path: i.Path.Value} - break - } - } - } - } - - for _, spec := range imports { - p.Imports = append(p.Imports, spec) - } - - return p, nil -} - -func parseInterface(iface *ast.InterfaceType) ([]function, error) { - var functions []function - for _, field := range iface.Methods.List { - switch f := field.Type.(type) { - case *ast.FuncType: - method, err := parseFunc(field) - if err != nil { - return nil, err - } - if method == nil { - continue - } - functions = append(functions, *method) - case *ast.Ident: - spec, ok := f.Obj.Decl.(*ast.TypeSpec) - if !ok { - return nil, errUnexpectedType{"*ast.TypeSpec", f.Obj.Decl} - } - iface, ok := spec.Type.(*ast.InterfaceType) - if !ok { - return nil, errUnexpectedType{"*ast.TypeSpec", spec.Type} - } - funcs, err := parseInterface(iface) - if err != nil { - fmt.Println(err) - continue - } - functions = append(functions, funcs...) - default: - return nil, errUnexpectedType{"*astFuncType or *ast.Ident", f} - } - } - return functions, nil -} - -func parseFunc(field *ast.Field) (*function, error) { - f := field.Type.(*ast.FuncType) - method := &function{Name: field.Names[0].Name} - if _, exists := skipFuncs[method.Name]; exists { - fmt.Println("skipping:", method.Name) - return nil, nil - } - if f.Params != nil { - args, err := parseArgs(f.Params.List) - if err != nil { - return nil, err - } - method.Args = args - } - if f.Results != nil { - returns, err := parseArgs(f.Results.List) - if err != nil { - return nil, fmt.Errorf("error parsing function returns for %q: %v", method.Name, err) - } - method.Returns = returns - } - return method, nil -} - -func parseArgs(fields []*ast.Field) ([]arg, error) { - var args []arg - for _, f := range fields { - if len(f.Names) == 0 { - return nil, errBadReturn - } - for _, name := range f.Names { - p, err := parseExpr(f.Type) - if err != nil { - return nil, err - } - args = append(args, arg{name.Name, p.value, p.pkg}) - } - } - return args, nil -} - -type parsedExpr struct { - value string - pkg string -} - -func parseExpr(e ast.Expr) (parsedExpr, error) { - var parsed parsedExpr - switch i := e.(type) { - case *ast.Ident: - parsed.value += i.Name - case *ast.StarExpr: - p, err := parseExpr(i.X) - if err != nil { - return parsed, err - } - parsed.value += "*" - parsed.value += p.value - parsed.pkg = p.pkg - case *ast.SelectorExpr: - p, err := parseExpr(i.X) - if err != nil { - return parsed, err - } - parsed.pkg = p.value - parsed.value += p.value + "." - parsed.value += i.Sel.Name - case *ast.MapType: - parsed.value += "map[" - p, err := parseExpr(i.Key) - if err != nil { - return parsed, err - } - parsed.value += p.value - parsed.value += "]" - p, err = parseExpr(i.Value) - if err != nil { - return parsed, err - } - parsed.value += p.value - parsed.pkg = p.pkg - case *ast.ArrayType: - parsed.value += "[]" - p, err := parseExpr(i.Elt) - if err != nil { - return parsed, err - } - parsed.value += p.value - parsed.pkg = p.pkg - default: - return parsed, errUnexpectedType{"*ast.Ident or *ast.StarExpr", i} - } - return parsed, nil -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go b/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go deleted file mode 100644 index 50ed9293c..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/pluginrpc-gen/template.go +++ /dev/null @@ -1,118 +0,0 @@ -package main - -import ( - "strings" - "text/template" -) - -func printArgs(args []arg) string { - var argStr []string - for _, arg := range args { - argStr = append(argStr, arg.String()) - } - return strings.Join(argStr, ", ") -} - -func buildImports(specs []importSpec) string { - if len(specs) == 0 { - return `import "errors"` - } - imports := "import(\n" - imports += "\t\"errors\"\n" - for _, i := range specs { - imports += "\t" + i.String() + "\n" - } - imports += ")" - return imports -} - -func marshalType(t string) string { - switch t { - case "error": - // convert error types to plain strings to ensure the values are encoded/decoded properly - return "string" - default: - return t - } -} - -func isErr(t string) bool { - switch t { - case "error": - return true - default: - return false - } -} - -// Need to use this helper due to issues with go-vet -func buildTag(s string) string { - return "+build " + s -} - -var templFuncs = template.FuncMap{ - "printArgs": printArgs, - "marshalType": marshalType, - "isErr": isErr, - "lower": strings.ToLower, - "title": title, - "tag": buildTag, - "imports": buildImports, -} - -func title(s string) string { - if strings.ToLower(s) == "id" { - return "ID" - } - return strings.Title(s) -} - -var generatedTempl = template.Must(template.New("rpc_cient").Funcs(templFuncs).Parse(` -// generated code - DO NOT EDIT -{{ range $k, $v := .BuildTags }} - // {{ tag $k }} {{ end }} - -package {{ .Name }} - -{{ imports .Imports }} - -type client interface{ - Call(string, interface{}, interface{}) error -} - -type {{ .InterfaceType }}Proxy struct { - client -} - -{{ range .Functions }} - type {{ $.InterfaceType }}Proxy{{ .Name }}Request struct{ - {{ range .Args }} - {{ title .Name }} {{ .ArgType }} {{ end }} - } - - type {{ $.InterfaceType }}Proxy{{ .Name }}Response struct{ - {{ range .Returns }} - {{ title .Name }} {{ marshalType .ArgType }} {{ end }} - } - - func (pp *{{ $.InterfaceType }}Proxy) {{ .Name }}({{ printArgs .Args }}) ({{ printArgs .Returns }}) { - var( - req {{ $.InterfaceType }}Proxy{{ .Name }}Request - ret {{ $.InterfaceType }}Proxy{{ .Name }}Response - ) - {{ range .Args }} - req.{{ title .Name }} = {{ lower .Name }} {{ end }} - if err = pp.Call("{{ $.RPCName }}.{{ .Name }}", req, &ret); err != nil { - return - } - {{ range $r := .Returns }} - {{ if isErr .ArgType }} - if ret.{{ title .Name }} != "" { - {{ lower .Name }} = errors.New(ret.{{ title .Name }}) - } {{ end }} - {{ if isErr .ArgType | not }} {{ lower .Name }} = ret.{{ title .Name }} {{ end }} {{ end }} - - return - } -{{ end }} -`)) diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go deleted file mode 100644 index 6962079df..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/plugins.go +++ /dev/null @@ -1,337 +0,0 @@ -// Package plugins provides structures and helper functions to manage Docker -// plugins. -// -// Docker discovers plugins by looking for them in the plugin directory whenever -// a user or container tries to use one by name. UNIX domain socket files must -// be located under /run/docker/plugins, whereas spec files can be located -// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled -// by the Registry interface, which lets you list all plugins or get a plugin by -// its name if it exists. -// -// The plugins need to implement an HTTP server and bind this to the UNIX socket -// or the address specified in the spec files. -// A handshake is send at /Plugin.Activate, and plugins are expected to return -// a Manifest with a list of of Docker subsystems which this plugin implements. -// -// In order to use a plugins, you can use the ``Get`` with the name of the -// plugin and the subsystem it implements. -// -// plugin, err := plugins.Get("example", "VolumeDriver") -// if err != nil { -// return fmt.Errorf("Error looking up volume plugin example: %v", err) -// } -package plugins // import "github.com/docker/docker/pkg/plugins" - -import ( - "errors" - "sync" - "time" - - "github.com/docker/go-connections/tlsconfig" - "github.com/sirupsen/logrus" -) - -// ProtocolSchemeHTTPV1 is the name of the protocol used for interacting with plugins using this package. -const ProtocolSchemeHTTPV1 = "moby.plugins.http/v1" - -var ( - // ErrNotImplements is returned if the plugin does not implement the requested driver. - ErrNotImplements = errors.New("Plugin does not implement the requested driver") -) - -type plugins struct { - sync.Mutex - plugins map[string]*Plugin -} - -type extpointHandlers struct { - sync.RWMutex - extpointHandlers map[string][]func(string, *Client) -} - -var ( - storage = plugins{plugins: make(map[string]*Plugin)} - handlers = extpointHandlers{extpointHandlers: make(map[string][]func(string, *Client))} -) - -// Manifest lists what a plugin implements. -type Manifest struct { - // List of subsystem the plugin implements. - Implements []string -} - -// Plugin is the definition of a docker plugin. -type Plugin struct { - // Name of the plugin - name string - // Address of the plugin - Addr string - // TLS configuration of the plugin - TLSConfig *tlsconfig.Options - // Client attached to the plugin - client *Client - // Manifest of the plugin (see above) - Manifest *Manifest `json:"-"` - - // wait for activation to finish - activateWait *sync.Cond - // error produced by activation - activateErr error - // keeps track of callback handlers run against this plugin - handlersRun bool -} - -// Name returns the name of the plugin. -func (p *Plugin) Name() string { - return p.name -} - -// Client returns a ready-to-use plugin client that can be used to communicate with the plugin. -func (p *Plugin) Client() *Client { - return p.client -} - -// Protocol returns the protocol name/version used for plugins in this package. -func (p *Plugin) Protocol() string { - return ProtocolSchemeHTTPV1 -} - -// IsV1 returns true for V1 plugins and false otherwise. -func (p *Plugin) IsV1() bool { - return true -} - -// NewLocalPlugin creates a new local plugin. -func NewLocalPlugin(name, addr string) *Plugin { - return &Plugin{ - name: name, - Addr: addr, - // TODO: change to nil - TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, - activateWait: sync.NewCond(&sync.Mutex{}), - } -} - -func (p *Plugin) activate() error { - p.activateWait.L.Lock() - - if p.activated() { - p.runHandlers() - p.activateWait.L.Unlock() - return p.activateErr - } - - p.activateErr = p.activateWithLock() - - p.runHandlers() - p.activateWait.L.Unlock() - p.activateWait.Broadcast() - return p.activateErr -} - -// runHandlers runs the registered handlers for the implemented plugin types -// This should only be run after activation, and while the activation lock is held. -func (p *Plugin) runHandlers() { - if !p.activated() { - return - } - - handlers.RLock() - if !p.handlersRun { - for _, iface := range p.Manifest.Implements { - hdlrs, handled := handlers.extpointHandlers[iface] - if !handled { - continue - } - for _, handler := range hdlrs { - handler(p.name, p.client) - } - } - p.handlersRun = true - } - handlers.RUnlock() - -} - -// activated returns if the plugin has already been activated. -// This should only be called with the activation lock held -func (p *Plugin) activated() bool { - return p.Manifest != nil -} - -func (p *Plugin) activateWithLock() error { - c, err := NewClient(p.Addr, p.TLSConfig) - if err != nil { - return err - } - p.client = c - - m := new(Manifest) - if err = p.client.Call("Plugin.Activate", nil, m); err != nil { - return err - } - - p.Manifest = m - return nil -} - -func (p *Plugin) waitActive() error { - p.activateWait.L.Lock() - for !p.activated() && p.activateErr == nil { - p.activateWait.Wait() - } - p.activateWait.L.Unlock() - return p.activateErr -} - -func (p *Plugin) implements(kind string) bool { - if p.Manifest == nil { - return false - } - for _, driver := range p.Manifest.Implements { - if driver == kind { - return true - } - } - return false -} - -func load(name string) (*Plugin, error) { - return loadWithRetry(name, true) -} - -func loadWithRetry(name string, retry bool) (*Plugin, error) { - registry := newLocalRegistry() - start := time.Now() - - var retries int - for { - pl, err := registry.Plugin(name) - if err != nil { - if !retry { - return nil, err - } - - timeOff := backoff(retries) - if abort(start, timeOff) { - return nil, err - } - retries++ - logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) - time.Sleep(timeOff) - continue - } - - storage.Lock() - if pl, exists := storage.plugins[name]; exists { - storage.Unlock() - return pl, pl.activate() - } - storage.plugins[name] = pl - storage.Unlock() - - err = pl.activate() - - if err != nil { - storage.Lock() - delete(storage.plugins, name) - storage.Unlock() - } - - return pl, err - } -} - -func get(name string) (*Plugin, error) { - storage.Lock() - pl, ok := storage.plugins[name] - storage.Unlock() - if ok { - return pl, pl.activate() - } - return load(name) -} - -// Get returns the plugin given the specified name and requested implementation. -func Get(name, imp string) (*Plugin, error) { - pl, err := get(name) - if err != nil { - return nil, err - } - if err := pl.waitActive(); err == nil && pl.implements(imp) { - logrus.Debugf("%s implements: %s", name, imp) - return pl, nil - } - return nil, ErrNotImplements -} - -// Handle adds the specified function to the extpointHandlers. -func Handle(iface string, fn func(string, *Client)) { - handlers.Lock() - hdlrs, ok := handlers.extpointHandlers[iface] - if !ok { - hdlrs = []func(string, *Client){} - } - - hdlrs = append(hdlrs, fn) - handlers.extpointHandlers[iface] = hdlrs - - storage.Lock() - for _, p := range storage.plugins { - p.activateWait.L.Lock() - if p.activated() && p.implements(iface) { - p.handlersRun = false - } - p.activateWait.L.Unlock() - } - storage.Unlock() - - handlers.Unlock() -} - -// GetAll returns all the plugins for the specified implementation -func GetAll(imp string) ([]*Plugin, error) { - pluginNames, err := Scan() - if err != nil { - return nil, err - } - - type plLoad struct { - pl *Plugin - err error - } - - chPl := make(chan *plLoad, len(pluginNames)) - var wg sync.WaitGroup - for _, name := range pluginNames { - storage.Lock() - pl, ok := storage.plugins[name] - storage.Unlock() - if ok { - chPl <- &plLoad{pl, nil} - continue - } - - wg.Add(1) - go func(name string) { - defer wg.Done() - pl, err := loadWithRetry(name, false) - chPl <- &plLoad{pl, err} - }(name) - } - - wg.Wait() - close(chPl) - - var out []*Plugin - for pl := range chPl { - if pl.err != nil { - logrus.Error(pl.err) - continue - } - if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) { - out = append(out, pl.pl) - } - } - return out, nil -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go deleted file mode 100644 index cdfbe9345..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !windows - -package plugins // import "github.com/docker/docker/pkg/plugins" - -// ScopedPath returns the path scoped to the plugin's rootfs. -// For v1 plugins, this always returns the path unchanged as v1 plugins run directly on the host. -func (p *Plugin) ScopedPath(s string) string { - return s -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go deleted file mode 100644 index ddf1d786c..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package plugins // import "github.com/docker/docker/pkg/plugins" - -// ScopedPath returns the path scoped to the plugin's rootfs. -// For v1 plugins, this always returns the path unchanged as v1 plugins run directly on the host. -func (p *Plugin) ScopedPath(s string) string { - return s -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/http.go b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go deleted file mode 100644 index 76d3bdb71..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/transport/http.go +++ /dev/null @@ -1,36 +0,0 @@ -package transport // import "github.com/docker/docker/pkg/plugins/transport" - -import ( - "io" - "net/http" -) - -// httpTransport holds an http.RoundTripper -// and information about the scheme and address the transport -// sends request to. -type httpTransport struct { - http.RoundTripper - scheme string - addr string -} - -// NewHTTPTransport creates a new httpTransport. -func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { - return httpTransport{ - RoundTripper: r, - scheme: scheme, - addr: addr, - } -} - -// NewRequest creates a new http.Request and sets the URL -// scheme and address with the transport's fields. -func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { - req, err := newHTTPRequest(path, data) - if err != nil { - return nil, err - } - req.URL.Scheme = t.scheme - req.URL.Host = t.addr - return req, nil -} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go deleted file mode 100644 index 9cb13335a..000000000 --- a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go +++ /dev/null @@ -1,36 +0,0 @@ -package transport // import "github.com/docker/docker/pkg/plugins/transport" - -import ( - "io" - "net/http" - "strings" -) - -// VersionMimetype is the Content-Type the engine sends to plugins. -const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" - -// RequestFactory defines an interface that -// transports can implement to create new requests. -type RequestFactory interface { - NewRequest(path string, data io.Reader) (*http.Request, error) -} - -// Transport defines an interface that plugin transports -// must implement. -type Transport interface { - http.RoundTripper - RequestFactory -} - -// newHTTPRequest creates a new request with a path and a body. -func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - req, err := http.NewRequest("POST", path, data) - if err != nil { - return nil, err - } - req.Header.Add("Accept", VersionMimetype) - return req, nil -} diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go deleted file mode 100644 index 46339c282..000000000 --- a/vendor/github.com/docker/docker/pkg/pools/pools.go +++ /dev/null @@ -1,137 +0,0 @@ -// Package pools provides a collection of pools which provide various -// data types with buffers. These can be used to lower the number of -// memory allocations and reuse buffers. -// -// New pools should be added to this package to allow them to be -// shared across packages. -// -// Utility functions which operate on pools should be added to this -// package to allow them to be reused. -package pools // import "github.com/docker/docker/pkg/pools" - -import ( - "bufio" - "io" - "sync" - - "github.com/docker/docker/pkg/ioutils" -) - -const buffer32K = 32 * 1024 - -var ( - // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. - BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) - // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. - BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) - buffer32KPool = newBufferPoolWithSize(buffer32K) -) - -// BufioReaderPool is a bufio reader that uses sync.Pool. -type BufioReaderPool struct { - pool sync.Pool -} - -// newBufioReaderPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioReaderPoolWithSize(size int) *BufioReaderPool { - return &BufioReaderPool{ - pool: sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, size) }, - }, - } -} - -// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. -func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { - buf := bufPool.pool.Get().(*bufio.Reader) - buf.Reset(r) - return buf -} - -// Put puts the bufio.Reader back into the pool. -func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -type bufferPool struct { - pool sync.Pool -} - -func newBufferPoolWithSize(size int) *bufferPool { - return &bufferPool{ - pool: sync.Pool{ - New: func() interface{} { return make([]byte, size) }, - }, - } -} - -func (bp *bufferPool) Get() []byte { - return bp.pool.Get().([]byte) -} - -func (bp *bufferPool) Put(b []byte) { - bp.pool.Put(b) -} - -// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. -func Copy(dst io.Writer, src io.Reader) (written int64, err error) { - buf := buffer32KPool.Get() - written, err = io.CopyBuffer(dst, src, buf) - buffer32KPool.Put(buf) - return -} - -// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back -// into the pool and closes the reader if it's an io.ReadCloser. -func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { - return ioutils.NewReadCloserWrapper(r, func() error { - if readCloser, ok := r.(io.ReadCloser); ok { - readCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} - -// BufioWriterPool is a bufio writer that uses sync.Pool. -type BufioWriterPool struct { - pool sync.Pool -} - -// newBufioWriterPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioWriterPoolWithSize(size int) *BufioWriterPool { - return &BufioWriterPool{ - pool: sync.Pool{ - New: func() interface{} { return bufio.NewWriterSize(nil, size) }, - }, - } -} - -// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. -func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { - buf := bufPool.pool.Get().(*bufio.Writer) - buf.Reset(w) - return buf -} - -// Put puts the bufio.Writer back into the pool. -func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back -// into the pool and closes the writer if it's an io.Writecloser. -func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() - if writeCloser, ok := w.(io.WriteCloser); ok { - writeCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} diff --git a/vendor/github.com/docker/docker/pkg/progress/progress.go b/vendor/github.com/docker/docker/pkg/progress/progress.go deleted file mode 100644 index 9aea59195..000000000 --- a/vendor/github.com/docker/docker/pkg/progress/progress.go +++ /dev/null @@ -1,89 +0,0 @@ -package progress // import "github.com/docker/docker/pkg/progress" - -import ( - "fmt" -) - -// Progress represents the progress of a transfer. -type Progress struct { - ID string - - // Progress contains a Message or... - Message string - - // ...progress of an action - Action string - Current int64 - Total int64 - - // If true, don't show xB/yB - HideCounts bool - // If not empty, use units instead of bytes for counts - Units string - - // Aux contains extra information not presented to the user, such as - // digests for push signing. - Aux interface{} - - LastUpdate bool -} - -// Output is an interface for writing progress information. It's -// like a writer for progress, but we don't call it Writer because -// that would be confusing next to ProgressReader (also, because it -// doesn't implement the io.Writer interface). -type Output interface { - WriteProgress(Progress) error -} - -type chanOutput chan<- Progress - -func (out chanOutput) WriteProgress(p Progress) error { - out <- p - return nil -} - -// ChanOutput returns an Output that writes progress updates to the -// supplied channel. -func ChanOutput(progressChan chan<- Progress) Output { - return chanOutput(progressChan) -} - -type discardOutput struct{} - -func (discardOutput) WriteProgress(Progress) error { - return nil -} - -// DiscardOutput returns an Output that discards progress -func DiscardOutput() Output { - return discardOutput{} -} - -// Update is a convenience function to write a progress update to the channel. -func Update(out Output, id, action string) { - out.WriteProgress(Progress{ID: id, Action: action}) -} - -// Updatef is a convenience function to write a printf-formatted progress update -// to the channel. -func Updatef(out Output, id, format string, a ...interface{}) { - Update(out, id, fmt.Sprintf(format, a...)) -} - -// Message is a convenience function to write a progress message to the channel. -func Message(out Output, id, message string) { - out.WriteProgress(Progress{ID: id, Message: message}) -} - -// Messagef is a convenience function to write a printf-formatted progress -// message to the channel. -func Messagef(out Output, id, format string, a ...interface{}) { - Message(out, id, fmt.Sprintf(format, a...)) -} - -// Aux sends auxiliary information over a progress interface, which will not be -// formatted for the UI. This is used for things such as push signing. -func Aux(out Output, a interface{}) { - out.WriteProgress(Progress{Aux: a}) -} diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader.go b/vendor/github.com/docker/docker/pkg/progress/progressreader.go deleted file mode 100644 index 7ca07dc64..000000000 --- a/vendor/github.com/docker/docker/pkg/progress/progressreader.go +++ /dev/null @@ -1,66 +0,0 @@ -package progress // import "github.com/docker/docker/pkg/progress" - -import ( - "io" - "time" - - "golang.org/x/time/rate" -) - -// Reader is a Reader with progress bar. -type Reader struct { - in io.ReadCloser // Stream to read from - out Output // Where to send progress bar to - size int64 - current int64 - lastUpdate int64 - id string - action string - rateLimiter *rate.Limiter -} - -// NewProgressReader creates a new ProgressReader. -func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { - return &Reader{ - in: in, - out: out, - size: size, - id: id, - action: action, - rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), - } -} - -func (p *Reader) Read(buf []byte) (n int, err error) { - read, err := p.in.Read(buf) - p.current += int64(read) - updateEvery := int64(1024 * 512) //512kB - if p.size > 0 { - // Update progress for every 1% read if 1% < 512kB - if increment := int64(0.01 * float64(p.size)); increment < updateEvery { - updateEvery = increment - } - } - if p.current-p.lastUpdate > updateEvery || err != nil { - p.updateProgress(err != nil && read == 0) - p.lastUpdate = p.current - } - - return read, err -} - -// Close closes the progress reader and its underlying reader. -func (p *Reader) Close() error { - if p.current < p.size { - // print a full progress bar when closing prematurely - p.current = p.size - p.updateProgress(false) - } - return p.in.Close() -} - -func (p *Reader) updateProgress(last bool) { - if last || p.current == p.size || p.rateLimiter.Allow() { - p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) - } -} diff --git a/vendor/github.com/docker/docker/pkg/pubsub/publisher.go b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go deleted file mode 100644 index 76033ed9e..000000000 --- a/vendor/github.com/docker/docker/pkg/pubsub/publisher.go +++ /dev/null @@ -1,121 +0,0 @@ -package pubsub // import "github.com/docker/docker/pkg/pubsub" - -import ( - "sync" - "time" -) - -var wgPool = sync.Pool{New: func() interface{} { return new(sync.WaitGroup) }} - -// NewPublisher creates a new pub/sub publisher to broadcast messages. -// The duration is used as the send timeout as to not block the publisher publishing -// messages to other clients if one client is slow or unresponsive. -// The buffer is used when creating new channels for subscribers. -func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { - return &Publisher{ - buffer: buffer, - timeout: publishTimeout, - subscribers: make(map[subscriber]topicFunc), - } -} - -type subscriber chan interface{} -type topicFunc func(v interface{}) bool - -// Publisher is basic pub/sub structure. Allows to send events and subscribe -// to them. Can be safely used from multiple goroutines. -type Publisher struct { - m sync.RWMutex - buffer int - timeout time.Duration - subscribers map[subscriber]topicFunc -} - -// Len returns the number of subscribers for the publisher -func (p *Publisher) Len() int { - p.m.RLock() - i := len(p.subscribers) - p.m.RUnlock() - return i -} - -// Subscribe adds a new subscriber to the publisher returning the channel. -func (p *Publisher) Subscribe() chan interface{} { - return p.SubscribeTopic(nil) -} - -// SubscribeTopic adds a new subscriber that filters messages sent by a topic. -func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { - ch := make(chan interface{}, p.buffer) - p.m.Lock() - p.subscribers[ch] = topic - p.m.Unlock() - return ch -} - -// SubscribeTopicWithBuffer adds a new subscriber that filters messages sent by a topic. -// The returned channel has a buffer of the specified size. -func (p *Publisher) SubscribeTopicWithBuffer(topic topicFunc, buffer int) chan interface{} { - ch := make(chan interface{}, buffer) - p.m.Lock() - p.subscribers[ch] = topic - p.m.Unlock() - return ch -} - -// Evict removes the specified subscriber from receiving any more messages. -func (p *Publisher) Evict(sub chan interface{}) { - p.m.Lock() - delete(p.subscribers, sub) - close(sub) - p.m.Unlock() -} - -// Publish sends the data in v to all subscribers currently registered with the publisher. -func (p *Publisher) Publish(v interface{}) { - p.m.RLock() - if len(p.subscribers) == 0 { - p.m.RUnlock() - return - } - - wg := wgPool.Get().(*sync.WaitGroup) - for sub, topic := range p.subscribers { - wg.Add(1) - go p.sendTopic(sub, topic, v, wg) - } - wg.Wait() - wgPool.Put(wg) - p.m.RUnlock() -} - -// Close closes the channels to all subscribers registered with the publisher. -func (p *Publisher) Close() { - p.m.Lock() - for sub := range p.subscribers { - delete(p.subscribers, sub) - close(sub) - } - p.m.Unlock() -} - -func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) { - defer wg.Done() - if topic != nil && !topic(v) { - return - } - - // send under a select as to not block if the receiver is unavailable - if p.timeout > 0 { - select { - case sub <- v: - case <-time.After(p.timeout): - } - return - } - - select { - case sub <- v: - default: - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go deleted file mode 100644 index efea71794..000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go +++ /dev/null @@ -1,28 +0,0 @@ -package reexec // import "github.com/docker/docker/pkg/reexec" - -import ( - "os/exec" - "syscall" - - "golang.org/x/sys/unix" -) - -// Self returns the path to the current process's binary. -// Returns "/proc/self/exe". -func Self() string { - return "/proc/self/exe" -} - -// Command returns *exec.Cmd which has Path as current binary. Also it setting -// SysProcAttr.Pdeathsig to SIGTERM. -// This will use the in-memory version (/proc/self/exe) of the current binary, -// it is thus safe to delete or replace the on-disk binary (os.Args[0]). -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - SysProcAttr: &syscall.SysProcAttr{ - Pdeathsig: unix.SIGTERM, - }, - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go deleted file mode 100644 index ceaabbdee..000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build freebsd darwin - -package reexec // import "github.com/docker/docker/pkg/reexec" - -import ( - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will -// be set to "/usr/bin/docker". -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go deleted file mode 100644 index 09fb4b2d2..000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux,!windows,!freebsd,!darwin - -package reexec // import "github.com/docker/docker/pkg/reexec" - -import ( - "os/exec" -) - -// Command is unsupported on operating systems apart from Linux, Windows, and Darwin. -func Command(args ...string) *exec.Cmd { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go deleted file mode 100644 index 438226890..000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go +++ /dev/null @@ -1,21 +0,0 @@ -package reexec // import "github.com/docker/docker/pkg/reexec" - -import ( - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker.exe" at "C:\", then cmd.Path will -// be set to "C:\docker.exe". -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go deleted file mode 100644 index f8ccddd59..000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/reexec.go +++ /dev/null @@ -1,47 +0,0 @@ -package reexec // import "github.com/docker/docker/pkg/reexec" - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" -) - -var registeredInitializers = make(map[string]func()) - -// Register adds an initialization func under the specified name -func Register(name string, initializer func()) { - if _, exists := registeredInitializers[name]; exists { - panic(fmt.Sprintf("reexec func already registered under name %q", name)) - } - - registeredInitializers[name] = initializer -} - -// Init is called as the first part of the exec process and returns true if an -// initialization function was called. -func Init() bool { - initializer, exists := registeredInitializers[os.Args[0]] - if exists { - initializer() - - return true - } - return false -} - -func naiveSelf() string { - name := os.Args[0] - if filepath.Base(name) == name { - if lp, err := exec.LookPath(name); err == nil { - return lp - } - } - // handle conversion of relative paths to absolute - if absName, err := filepath.Abs(name); err == nil { - return absName - } - // if we couldn't get absolute name, return original - // (NOTE: Go only errors on Abs() if os.Getwd fails) - return name -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal.go b/vendor/github.com/docker/docker/pkg/signal/signal.go deleted file mode 100644 index 88ef7b5ea..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal.go +++ /dev/null @@ -1,54 +0,0 @@ -// Package signal provides helper functions for dealing with signals across -// various operating systems. -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "fmt" - "os" - "os/signal" - "strconv" - "strings" - "syscall" -) - -// CatchAll catches all signals and relays them to the specified channel. -func CatchAll(sigc chan os.Signal) { - var handledSigs []os.Signal - for _, s := range SignalMap { - handledSigs = append(handledSigs, s) - } - signal.Notify(sigc, handledSigs...) -} - -// StopCatch stops catching the signals and closes the specified channel. -func StopCatch(sigc chan os.Signal) { - signal.Stop(sigc) - close(sigc) -} - -// ParseSignal translates a string to a valid syscall signal. -// It returns an error if the signal map doesn't include the given signal. -func ParseSignal(rawSignal string) (syscall.Signal, error) { - s, err := strconv.Atoi(rawSignal) - if err == nil { - if s == 0 { - return -1, fmt.Errorf("Invalid signal: %s", rawSignal) - } - return syscall.Signal(s), nil - } - signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] - if !ok { - return -1, fmt.Errorf("Invalid signal: %s", rawSignal) - } - return signal, nil -} - -// ValidSignalForPlatform returns true if a signal is valid on the platform -func ValidSignalForPlatform(sig syscall.Signal) bool { - for _, v := range SignalMap { - if v == sig { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go deleted file mode 100644 index ee5501e3d..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go +++ /dev/null @@ -1,41 +0,0 @@ -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "syscall" -) - -// SignalMap is a map of Darwin signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUG": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "EMT": syscall.SIGEMT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INFO": syscall.SIGINFO, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "PIPE": syscall.SIGPIPE, - "PROF": syscall.SIGPROF, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go deleted file mode 100644 index 764f90e26..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go +++ /dev/null @@ -1,43 +0,0 @@ -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "syscall" -) - -// SignalMap is a map of FreeBSD signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUF": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "EMT": syscall.SIGEMT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INFO": syscall.SIGINFO, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "LWP": syscall.SIGLWP, - "PIPE": syscall.SIGPIPE, - "PROF": syscall.SIGPROF, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "THR": syscall.SIGTHR, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go deleted file mode 100644 index caed97c96..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go +++ /dev/null @@ -1,81 +0,0 @@ -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -const ( - sigrtmin = 34 - sigrtmax = 64 -) - -// SignalMap is a map of Linux signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": unix.SIGABRT, - "ALRM": unix.SIGALRM, - "BUS": unix.SIGBUS, - "CHLD": unix.SIGCHLD, - "CLD": unix.SIGCLD, - "CONT": unix.SIGCONT, - "FPE": unix.SIGFPE, - "HUP": unix.SIGHUP, - "ILL": unix.SIGILL, - "INT": unix.SIGINT, - "IO": unix.SIGIO, - "IOT": unix.SIGIOT, - "KILL": unix.SIGKILL, - "PIPE": unix.SIGPIPE, - "POLL": unix.SIGPOLL, - "PROF": unix.SIGPROF, - "PWR": unix.SIGPWR, - "QUIT": unix.SIGQUIT, - "SEGV": unix.SIGSEGV, - "STKFLT": unix.SIGSTKFLT, - "STOP": unix.SIGSTOP, - "SYS": unix.SIGSYS, - "TERM": unix.SIGTERM, - "TRAP": unix.SIGTRAP, - "TSTP": unix.SIGTSTP, - "TTIN": unix.SIGTTIN, - "TTOU": unix.SIGTTOU, - "URG": unix.SIGURG, - "USR1": unix.SIGUSR1, - "USR2": unix.SIGUSR2, - "VTALRM": unix.SIGVTALRM, - "WINCH": unix.SIGWINCH, - "XCPU": unix.SIGXCPU, - "XFSZ": unix.SIGXFSZ, - "RTMIN": sigrtmin, - "RTMIN+1": sigrtmin + 1, - "RTMIN+2": sigrtmin + 2, - "RTMIN+3": sigrtmin + 3, - "RTMIN+4": sigrtmin + 4, - "RTMIN+5": sigrtmin + 5, - "RTMIN+6": sigrtmin + 6, - "RTMIN+7": sigrtmin + 7, - "RTMIN+8": sigrtmin + 8, - "RTMIN+9": sigrtmin + 9, - "RTMIN+10": sigrtmin + 10, - "RTMIN+11": sigrtmin + 11, - "RTMIN+12": sigrtmin + 12, - "RTMIN+13": sigrtmin + 13, - "RTMIN+14": sigrtmin + 14, - "RTMIN+15": sigrtmin + 15, - "RTMAX-14": sigrtmax - 14, - "RTMAX-13": sigrtmax - 13, - "RTMAX-12": sigrtmax - 12, - "RTMAX-11": sigrtmax - 11, - "RTMAX-10": sigrtmax - 10, - "RTMAX-9": sigrtmax - 9, - "RTMAX-8": sigrtmax - 8, - "RTMAX-7": sigrtmax - 7, - "RTMAX-6": sigrtmax - 6, - "RTMAX-5": sigrtmax - 5, - "RTMAX-4": sigrtmax - 4, - "RTMAX-3": sigrtmax - 3, - "RTMAX-2": sigrtmax - 2, - "RTMAX-1": sigrtmax - 1, - "RTMAX": sigrtmax, -} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go deleted file mode 100644 index a2aa4248f..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !windows - -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "syscall" -) - -// Signals used in cli/command (no windows equivalent, use -// invalid signals so they don't get handled) - -const ( - // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted. - SIGCHLD = syscall.SIGCHLD - // SIGWINCH is a signal sent to a process when its controlling terminal changes its size - SIGWINCH = syscall.SIGWINCH - // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading - SIGPIPE = syscall.SIGPIPE - // DefaultStopSignal is the syscall signal used to stop a container in unix systems. - DefaultStopSignal = "SIGTERM" -) diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go deleted file mode 100644 index 1fd25a83c..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !linux,!darwin,!freebsd,!windows - -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "syscall" -) - -// SignalMap is an empty map of signals for unsupported platform. -var SignalMap = map[string]syscall.Signal{} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go deleted file mode 100644 index 65752f24a..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "syscall" -) - -// Signals used in cli/command (no windows equivalent, use -// invalid signals so they don't get handled) -const ( - SIGCHLD = syscall.Signal(0xff) - SIGWINCH = syscall.Signal(0xff) - SIGPIPE = syscall.Signal(0xff) - // DefaultStopSignal is the syscall signal used to stop a container in windows systems. - DefaultStopSignal = "15" -) - -// SignalMap is a map of "supported" signals. As per the comment in GOLang's -// ztypes_windows.go: "More invented values for signals". Windows doesn't -// really support signals in any way, shape or form that Unix does. -// -// We have these so that docker kill can be used to gracefully (TERM) and -// forcibly (KILL) terminate a container on Windows. -var SignalMap = map[string]syscall.Signal{ - "KILL": syscall.SIGKILL, - "TERM": syscall.SIGTERM, -} diff --git a/vendor/github.com/docker/docker/pkg/signal/testfiles/main.go b/vendor/github.com/docker/docker/pkg/signal/testfiles/main.go deleted file mode 100644 index e56854c7c..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/testfiles/main.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "os" - "syscall" - "time" - - "github.com/docker/docker/pkg/signal" - "github.com/sirupsen/logrus" -) - -func main() { - sigmap := map[string]os.Signal{ - "TERM": syscall.SIGTERM, - "QUIT": syscall.SIGQUIT, - "INT": os.Interrupt, - } - signal.Trap(func() { - time.Sleep(time.Second) - os.Exit(99) - }, logrus.StandardLogger()) - go func() { - p, err := os.FindProcess(os.Getpid()) - if err != nil { - panic(err) - } - s := os.Getenv("SIGNAL_TYPE") - multiple := os.Getenv("IF_MULTIPLE") - switch s { - case "TERM", "INT": - if multiple == "1" { - for { - p.Signal(sigmap[s]) - } - } else { - p.Signal(sigmap[s]) - } - case "QUIT": - p.Signal(sigmap[s]) - } - }() - time.Sleep(2 * time.Second) -} diff --git a/vendor/github.com/docker/docker/pkg/signal/trap.go b/vendor/github.com/docker/docker/pkg/signal/trap.go deleted file mode 100644 index 2a6e69fb5..000000000 --- a/vendor/github.com/docker/docker/pkg/signal/trap.go +++ /dev/null @@ -1,104 +0,0 @@ -package signal // import "github.com/docker/docker/pkg/signal" - -import ( - "fmt" - "os" - gosignal "os/signal" - "path/filepath" - "runtime" - "strings" - "sync/atomic" - "syscall" - "time" - - "github.com/pkg/errors" -) - -// Trap sets up a simplified signal "trap", appropriate for common -// behavior expected from a vanilla unix command-line tool in general -// (and the Docker engine in particular). -// -// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. -// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is -// skipped and the process is terminated immediately (allows force quit of stuck daemon) -// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. -// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while -// the docker daemon is not restarted and also running under systemd. -// Fixes https://github.com/docker/docker/issues/19728 -// -func Trap(cleanup func(), logger interface { - Info(args ...interface{}) -}) { - c := make(chan os.Signal, 1) - // we will handle INT, TERM, QUIT, SIGPIPE here - signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE} - gosignal.Notify(c, signals...) - go func() { - interruptCount := uint32(0) - for sig := range c { - if sig == syscall.SIGPIPE { - continue - } - - go func(sig os.Signal) { - logger.Info(fmt.Sprintf("Processing signal '%v'", sig)) - switch sig { - case os.Interrupt, syscall.SIGTERM: - if atomic.LoadUint32(&interruptCount) < 3 { - // Initiate the cleanup only once - if atomic.AddUint32(&interruptCount, 1) == 1 { - // Call the provided cleanup handler - cleanup() - os.Exit(0) - } else { - return - } - } else { - // 3 SIGTERM/INT signals received; force exit without cleanup - logger.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") - } - case syscall.SIGQUIT: - DumpStacks("") - logger.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT") - } - //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # - os.Exit(128 + int(sig.(syscall.Signal))) - }(sig) - } - }() -} - -const stacksLogNameTemplate = "goroutine-stacks-%s.log" - -// DumpStacks appends the runtime stack into file in dir and returns full path -// to that file. -func DumpStacks(dir string) (string, error) { - var ( - buf []byte - stackSize int - ) - bufferLen := 16384 - for stackSize == len(buf) { - buf = make([]byte, bufferLen) - stackSize = runtime.Stack(buf, true) - bufferLen *= 2 - } - buf = buf[:stackSize] - var f *os.File - if dir != "" { - path := filepath.Join(dir, fmt.Sprintf(stacksLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) - var err error - f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) - if err != nil { - return "", errors.Wrap(err, "failed to open file to write the goroutine stacks") - } - defer f.Close() - defer f.Sync() - } else { - f = os.Stderr - } - if _, err := f.Write(buf); err != nil { - return "", errors.Wrap(err, "failed to write goroutine stacks") - } - return f.Name(), nil -} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go deleted file mode 100644 index 8f6e0a737..000000000 --- a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go +++ /dev/null @@ -1,190 +0,0 @@ -package stdcopy // import "github.com/docker/docker/pkg/stdcopy" - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "sync" -) - -// StdType is the type of standard stream -// a writer can multiplex to. -type StdType byte - -const ( - // Stdin represents standard input stream type. - Stdin StdType = iota - // Stdout represents standard output stream type. - Stdout - // Stderr represents standard error steam type. - Stderr - // Systemerr represents errors originating from the system that make it - // into the multiplexed stream. - Systemerr - - stdWriterPrefixLen = 8 - stdWriterFdIndex = 0 - stdWriterSizeIndex = 4 - - startingBufLen = 32*1024 + stdWriterPrefixLen + 1 -) - -var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} - -// stdWriter is wrapper of io.Writer with extra customized info. -type stdWriter struct { - io.Writer - prefix byte -} - -// Write sends the buffer to the underneath writer. -// It inserts the prefix header before the buffer, -// so stdcopy.StdCopy knows where to multiplex the output. -// It makes stdWriter to implement io.Writer. -func (w *stdWriter) Write(p []byte) (n int, err error) { - if w == nil || w.Writer == nil { - return 0, errors.New("Writer not instantiated") - } - if p == nil { - return 0, nil - } - - header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} - binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) - buf := bufPool.Get().(*bytes.Buffer) - buf.Write(header[:]) - buf.Write(p) - - n, err = w.Writer.Write(buf.Bytes()) - n -= stdWriterPrefixLen - if n < 0 { - n = 0 - } - - buf.Reset() - bufPool.Put(buf) - return -} - -// NewStdWriter instantiates a new Writer. -// Everything written to it will be encapsulated using a custom format, -// and written to the underlying `w` stream. -// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. -// `t` indicates the id of the stream to encapsulate. -// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. -func NewStdWriter(w io.Writer, t StdType) io.Writer { - return &stdWriter{ - Writer: w, - prefix: byte(t), - } -} - -// StdCopy is a modified version of io.Copy. -// -// StdCopy will demultiplex `src`, assuming that it contains two streams, -// previously multiplexed together using a StdWriter instance. -// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. -// -// StdCopy will read until it hits EOF on `src`. It will then return a nil error. -// In other words: if `err` is non nil, it indicates a real underlying error. -// -// `written` will hold the total number of bytes written to `dstout` and `dsterr`. -func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { - var ( - buf = make([]byte, startingBufLen) - bufLen = len(buf) - nr, nw int - er, ew error - out io.Writer - frameSize int - ) - - for { - // Make sure we have at least a full header - for nr < stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < stdWriterPrefixLen { - return written, nil - } - break - } - if er != nil { - return 0, er - } - } - - stream := StdType(buf[stdWriterFdIndex]) - // Check the first byte to know where to write - switch stream { - case Stdin: - fallthrough - case Stdout: - // Write on stdout - out = dstout - case Stderr: - // Write on stderr - out = dsterr - case Systemerr: - // If we're on Systemerr, we won't write anywhere. - // NB: if this code changes later, make sure you don't try to write - // to outstream if Systemerr is the stream - out = nil - default: - return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) - } - - // Retrieve the size of the frame - frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) - - // Check if the buffer is big enough to read the frame. - // Extend it if necessary. - if frameSize+stdWriterPrefixLen > bufLen { - buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) - bufLen = len(buf) - } - - // While the amount of bytes read is less than the size of the frame + header, we keep reading - for nr < frameSize+stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < frameSize+stdWriterPrefixLen { - return written, nil - } - break - } - if er != nil { - return 0, er - } - } - - // we might have an error from the source mixed up in our multiplexed - // stream. if we do, return it. - if stream == Systemerr { - return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen])) - } - - // Write the retrieved frame (without header) - nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) - if ew != nil { - return 0, ew - } - - // If the frame has not been fully written: error - if nw != frameSize { - return 0, io.ErrShortWrite - } - written += int64(nw) - - // Move the rest of the buffer to the beginning - copy(buf, buf[frameSize+stdWriterPrefixLen:]) - // Move the index - nr -= frameSize + stdWriterPrefixLen - } -} diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go deleted file mode 100644 index 2b5e71304..000000000 --- a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go +++ /dev/null @@ -1,159 +0,0 @@ -// Package streamformatter provides helper functions to format a stream. -package streamformatter // import "github.com/docker/docker/pkg/streamformatter" - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/progress" -) - -const streamNewline = "\r\n" - -type jsonProgressFormatter struct{} - -func appendNewline(source []byte) []byte { - return append(source, []byte(streamNewline)...) -} - -// FormatStatus formats the specified objects according to the specified format (and id). -func FormatStatus(id, format string, a ...interface{}) []byte { - str := fmt.Sprintf(format, a...) - b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) - if err != nil { - return FormatError(err) - } - return appendNewline(b) -} - -// FormatError formats the error as a JSON object -func FormatError(err error) []byte { - jsonError, ok := err.(*jsonmessage.JSONError) - if !ok { - jsonError = &jsonmessage.JSONError{Message: err.Error()} - } - if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { - return appendNewline(b) - } - return []byte(`{"error":"format error"}` + streamNewline) -} - -func (sf *jsonProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { - return FormatStatus(id, format, a...) -} - -// formatProgress formats the progress information for a specified action. -func (sf *jsonProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { - if progress == nil { - progress = &jsonmessage.JSONProgress{} - } - var auxJSON *json.RawMessage - if aux != nil { - auxJSONBytes, err := json.Marshal(aux) - if err != nil { - return nil - } - auxJSON = new(json.RawMessage) - *auxJSON = auxJSONBytes - } - b, err := json.Marshal(&jsonmessage.JSONMessage{ - Status: action, - ProgressMessage: progress.String(), - Progress: progress, - ID: id, - Aux: auxJSON, - }) - if err != nil { - return nil - } - return appendNewline(b) -} - -type rawProgressFormatter struct{} - -func (sf *rawProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { - return []byte(fmt.Sprintf(format, a...) + streamNewline) -} - -func (sf *rawProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { - if progress == nil { - progress = &jsonmessage.JSONProgress{} - } - endl := "\r" - if progress.String() == "" { - endl += "\n" - } - return []byte(action + " " + progress.String() + endl) -} - -// NewProgressOutput returns a progress.Output object that can be passed to -// progress.NewProgressReader. -func NewProgressOutput(out io.Writer) progress.Output { - return &progressOutput{sf: &rawProgressFormatter{}, out: out, newLines: true} -} - -// NewJSONProgressOutput returns a progress.Output that that formats output -// using JSON objects -func NewJSONProgressOutput(out io.Writer, newLines bool) progress.Output { - return &progressOutput{sf: &jsonProgressFormatter{}, out: out, newLines: newLines} -} - -type formatProgress interface { - formatStatus(id, format string, a ...interface{}) []byte - formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte -} - -type progressOutput struct { - sf formatProgress - out io.Writer - newLines bool -} - -// WriteProgress formats progress information from a ProgressReader. -func (out *progressOutput) WriteProgress(prog progress.Progress) error { - var formatted []byte - if prog.Message != "" { - formatted = out.sf.formatStatus(prog.ID, prog.Message) - } else { - jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts, Units: prog.Units} - formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) - } - _, err := out.out.Write(formatted) - if err != nil { - return err - } - - if out.newLines && prog.LastUpdate { - _, err = out.out.Write(out.sf.formatStatus("", "")) - return err - } - - return nil -} - -// AuxFormatter is a streamFormatter that writes aux progress messages -type AuxFormatter struct { - io.Writer -} - -// Emit emits the given interface as an aux progress message -func (sf *AuxFormatter) Emit(aux interface{}) error { - auxJSONBytes, err := json.Marshal(aux) - if err != nil { - return err - } - auxJSON := new(json.RawMessage) - *auxJSON = auxJSONBytes - msgJSON, err := json.Marshal(&jsonmessage.JSONMessage{Aux: auxJSON}) - if err != nil { - return err - } - msgJSON = appendNewline(msgJSON) - n, err := sf.Writer.Write(msgJSON) - if n != len(msgJSON) { - return io.ErrShortWrite - } - return err -} diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go deleted file mode 100644 index 1473ed974..000000000 --- a/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go +++ /dev/null @@ -1,47 +0,0 @@ -package streamformatter // import "github.com/docker/docker/pkg/streamformatter" - -import ( - "encoding/json" - "io" - - "github.com/docker/docker/pkg/jsonmessage" -) - -type streamWriter struct { - io.Writer - lineFormat func([]byte) string -} - -func (sw *streamWriter) Write(buf []byte) (int, error) { - formattedBuf := sw.format(buf) - n, err := sw.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} - -func (sw *streamWriter) format(buf []byte) []byte { - msg := &jsonmessage.JSONMessage{Stream: sw.lineFormat(buf)} - b, err := json.Marshal(msg) - if err != nil { - return FormatError(err) - } - return appendNewline(b) -} - -// NewStdoutWriter returns a writer which formats the output as json message -// representing stdout lines -func NewStdoutWriter(out io.Writer) io.Writer { - return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { - return string(buf) - }} -} - -// NewStderrWriter returns a writer which formats the output as json message -// representing stderr lines -func NewStderrWriter(out io.Writer) io.Writer { - return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { - return "\033[91m" + string(buf) + "\033[0m" - }} -} diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go deleted file mode 100644 index fa7d9166e..000000000 --- a/vendor/github.com/docker/docker/pkg/stringid/stringid.go +++ /dev/null @@ -1,99 +0,0 @@ -// Package stringid provides helper functions for dealing with string identifiers -package stringid // import "github.com/docker/docker/pkg/stringid" - -import ( - cryptorand "crypto/rand" - "encoding/hex" - "fmt" - "io" - "math" - "math/big" - "math/rand" - "regexp" - "strconv" - "strings" - "time" -) - -const shortLen = 12 - -var ( - validShortID = regexp.MustCompile("^[a-f0-9]{12}$") - validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) -) - -// IsShortID determines if an arbitrary string *looks like* a short ID. -func IsShortID(id string) bool { - return validShortID.MatchString(id) -} - -// TruncateID returns a shorthand version of a string identifier for convenience. -// A collision with other shorthands is very unlikely, but possible. -// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller -// will need to use a longer prefix, or the full-length Id. -func TruncateID(id string) string { - if i := strings.IndexRune(id, ':'); i >= 0 { - id = id[i+1:] - } - if len(id) > shortLen { - id = id[:shortLen] - } - return id -} - -func generateID(r io.Reader) string { - b := make([]byte, 32) - for { - if _, err := io.ReadFull(r, b); err != nil { - panic(err) // This shouldn't happen - } - id := hex.EncodeToString(b) - // if we try to parse the truncated for as an int and we don't have - // an error then the value is all numeric and causes issues when - // used as a hostname. ref #3869 - if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { - continue - } - return id - } -} - -// GenerateRandomID returns a unique id. -func GenerateRandomID() string { - return generateID(cryptorand.Reader) -} - -// GenerateNonCryptoID generates unique id without using cryptographically -// secure sources of random. -// It helps you to save entropy. -func GenerateNonCryptoID() string { - return generateID(readerFunc(rand.Read)) -} - -// ValidateID checks whether an ID string is a valid image ID. -func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID %q is invalid", id) - } - return nil -} - -func init() { - // safely set the seed globally so we generate random ids. Tries to use a - // crypto seed before falling back to time. - var seed int64 - if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { - // This should not happen, but worst-case fallback to time-based seed. - seed = time.Now().UnixNano() - } else { - seed = cryptoseed.Int64() - } - - rand.Seed(seed) -} - -type readerFunc func(p []byte) (int, error) - -func (fn readerFunc) Read(p []byte) (int, error) { - return fn(p) -} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs.go b/vendor/github.com/docker/docker/pkg/symlink/fs.go deleted file mode 100644 index 7b894cde7..000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/fs.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.BSD file. - -// This code is a modified version of path/filepath/symlink.go from the Go standard library. - -package symlink // import "github.com/docker/docker/pkg/symlink" - -import ( - "bytes" - "errors" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" -) - -// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an -// absolute path. This function handles paths in a platform-agnostic manner. -func FollowSymlinkInScope(path, root string) (string, error) { - path, err := filepath.Abs(filepath.FromSlash(path)) - if err != nil { - return "", err - } - root, err = filepath.Abs(filepath.FromSlash(root)) - if err != nil { - return "", err - } - return evalSymlinksInScope(path, root) -} - -// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return -// a result guaranteed to be contained within the scope `root`, at the time of the call. -// Symlinks in `root` are not evaluated and left as-is. -// Errors encountered while attempting to evaluate symlinks in path will be returned. -// Non-existing paths are valid and do not constitute an error. -// `path` has to contain `root` as a prefix, or else an error will be returned. -// Trying to break out from `root` does not constitute an error. -// -// Example: -// If /foo/bar -> /outside, -// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/outside" -// -// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks -// are created and not to create subsequently, additional symlinks that could potentially make a -// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") -// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should -// no longer be considered safely contained in "/foo". -func evalSymlinksInScope(path, root string) (string, error) { - root = filepath.Clean(root) - if path == root { - return path, nil - } - if !strings.HasPrefix(path, root) { - return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) - } - const maxIter = 255 - originalPath := path - // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" - path = path[len(root):] - if root == string(filepath.Separator) { - path = string(filepath.Separator) + path - } - if !strings.HasPrefix(path, string(filepath.Separator)) { - return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) - } - path = filepath.Clean(path) - // consume path by taking each frontmost path element, - // expanding it if it's a symlink, and appending it to b - var b bytes.Buffer - // b here will always be considered to be the "current absolute path inside - // root" when we append paths to it, we also append a slash and use - // filepath.Clean after the loop to trim the trailing slash - for n := 0; path != ""; n++ { - if n > maxIter { - return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) - } - - // find next path component, p - i := strings.IndexRune(path, filepath.Separator) - var p string - if i == -1 { - p, path = path, "" - } else { - p, path = path[:i], path[i+1:] - } - - if p == "" { - continue - } - - // this takes a b.String() like "b/../" and a p like "c" and turns it - // into "/b/../c" which then gets filepath.Cleaned into "/c" and then - // root gets prepended and we Clean again (to remove any trailing slash - // if the first Clean gave us just "/") - cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) - if isDriveOrRoot(cleanP) { - // never Lstat "/" itself, or drive letters on Windows - b.Reset() - continue - } - fullP := filepath.Clean(root + cleanP) - - fi, err := os.Lstat(fullP) - if os.IsNotExist(err) { - // if p does not exist, accept it - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - if err != nil { - return "", err - } - if fi.Mode()&os.ModeSymlink == 0 { - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - - // it's a symlink, put it at the front of path - dest, err := os.Readlink(fullP) - if err != nil { - return "", err - } - if system.IsAbs(dest) { - b.Reset() - } - path = dest + string(filepath.Separator) + path - } - - // see note above on "fullP := ..." for why this is double-cleaned and - // what's happening here - return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil -} - -// EvalSymlinks returns the path name after the evaluation of any symbolic -// links. -// If path is relative the result will be relative to the current directory, -// unless one of the components is an absolute symbolic link. -// This version has been updated to support long paths prepended with `\\?\`. -func EvalSymlinks(path string) (string, error) { - return evalSymlinks(path) -} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go deleted file mode 100644 index c6dafcb0b..000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package symlink // import "github.com/docker/docker/pkg/symlink" - -import ( - "path/filepath" -) - -func evalSymlinks(path string) (string, error) { - return filepath.EvalSymlinks(path) -} - -func isDriveOrRoot(p string) bool { - return p == string(filepath.Separator) -} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go deleted file mode 100644 index 754761717..000000000 --- a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go +++ /dev/null @@ -1,169 +0,0 @@ -package symlink // import "github.com/docker/docker/pkg/symlink" - -import ( - "bytes" - "errors" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/longpath" - "golang.org/x/sys/windows" -) - -func toShort(path string) (string, error) { - p, err := windows.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetShortPathName says we can reuse buffer - n, err := windows.GetShortPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - if _, err = windows.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { - return "", err - } - } - return windows.UTF16ToString(b), nil -} - -func toLong(path string) (string, error) { - p, err := windows.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetLongPathName says we can reuse buffer - n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - n, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - } - b = b[:n] - return windows.UTF16ToString(b), nil -} - -func evalSymlinks(path string) (string, error) { - path, err := walkSymlinks(path) - if err != nil { - return "", err - } - - p, err := toShort(path) - if err != nil { - return "", err - } - p, err = toLong(p) - if err != nil { - return "", err - } - // windows.GetLongPathName does not change the case of the drive letter, - // but the result of EvalSymlinks must be unique, so we have - // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). - // Make drive letter upper case. - if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { - p = string(p[0]+'A'-'a') + p[1:] - } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { - p = p[:3] + string(p[4]+'A'-'a') + p[5:] - } - return filepath.Clean(p), nil -} - -const utf8RuneSelf = 0x80 - -func walkSymlinks(path string) (string, error) { - const maxIter = 255 - originalPath := path - // consume path by taking each frontmost path element, - // expanding it if it's a symlink, and appending it to b - var b bytes.Buffer - for n := 0; path != ""; n++ { - if n > maxIter { - return "", errors.New("EvalSymlinks: too many links in " + originalPath) - } - - // A path beginning with `\\?\` represents the root, so automatically - // skip that part and begin processing the next segment. - if strings.HasPrefix(path, longpath.Prefix) { - b.WriteString(longpath.Prefix) - path = path[4:] - continue - } - - // find next path component, p - var i = -1 - for j, c := range path { - if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { - i = j - break - } - } - var p string - if i == -1 { - p, path = path, "" - } else { - p, path = path[:i], path[i+1:] - } - - if p == "" { - if b.Len() == 0 { - // must be absolute path - b.WriteRune(filepath.Separator) - } - continue - } - - // If this is the first segment after the long path prefix, accept the - // current segment as a volume root or UNC share and move on to the next. - if b.String() == longpath.Prefix { - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - - fi, err := os.Lstat(b.String() + p) - if err != nil { - return "", err - } - if fi.Mode()&os.ModeSymlink == 0 { - b.WriteString(p) - if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { - b.WriteRune(filepath.Separator) - } - continue - } - - // it's a symlink, put it at the front of path - dest, err := os.Readlink(b.String() + p) - if err != nil { - return "", err - } - if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { - b.Reset() - } - path = dest + string(filepath.Separator) + path - } - return filepath.Clean(b.String()), nil -} - -func isDriveOrRoot(p string) bool { - if p == string(filepath.Separator) { - return true - } - - length := len(p) - if length >= 2 { - if p[length-1] == ':' && (('a' <= p[length-2] && p[length-2] <= 'z') || ('A' <= p[length-2] && p[length-2] <= 'Z')) { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go deleted file mode 100644 index eea2d25bf..000000000 --- a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux,!windows - -package sysinfo // import "github.com/docker/docker/pkg/sysinfo" - -import ( - "runtime" -) - -// NumCPU returns the number of CPUs -func NumCPU() int { - return runtime.NumCPU() -} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go deleted file mode 100644 index 5f6c6df8c..000000000 --- a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go +++ /dev/null @@ -1,42 +0,0 @@ -package sysinfo // import "github.com/docker/docker/pkg/sysinfo" - -import ( - "runtime" - "unsafe" - - "golang.org/x/sys/unix" -) - -// numCPU queries the system for the count of threads available -// for use to this process. -// -// Issues two syscalls. -// Returns 0 on errors. Use |runtime.NumCPU| in that case. -func numCPU() int { - // Gets the affinity mask for a process: The very one invoking this function. - pid, _, _ := unix.RawSyscall(unix.SYS_GETPID, 0, 0, 0) - - var mask [1024 / 64]uintptr - _, _, err := unix.RawSyscall(unix.SYS_SCHED_GETAFFINITY, pid, uintptr(len(mask)*8), uintptr(unsafe.Pointer(&mask[0]))) - if err != 0 { - return 0 - } - - // For every available thread a bit is set in the mask. - ncpu := 0 - for _, e := range mask { - if e == 0 { - continue - } - ncpu += int(popcnt(uint64(e))) - } - return ncpu -} - -// NumCPU returns the number of CPUs which are currently online -func NumCPU() int { - if ncpu := numCPU(); ncpu > 0 { - return ncpu - } - return runtime.NumCPU() -} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go deleted file mode 100644 index 13523f671..000000000 --- a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go +++ /dev/null @@ -1,35 +0,0 @@ -package sysinfo // import "github.com/docker/docker/pkg/sysinfo" - -import ( - "runtime" - "unsafe" - - "golang.org/x/sys/windows" -) - -var ( - kernel32 = windows.NewLazySystemDLL("kernel32.dll") - getCurrentProcess = kernel32.NewProc("GetCurrentProcess") - getProcessAffinityMask = kernel32.NewProc("GetProcessAffinityMask") -) - -func numCPU() int { - // Gets the affinity mask for a process - var mask, sysmask uintptr - currentProcess, _, _ := getCurrentProcess.Call() - ret, _, _ := getProcessAffinityMask.Call(currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask))) - if ret == 0 { - return 0 - } - // For every available thread a bit is set in the mask. - ncpu := int(popcnt(uint64(mask))) - return ncpu -} - -// NumCPU returns the number of CPUs which are currently online -func NumCPU() int { - if ncpu := numCPU(); ncpu > 0 { - return ncpu - } - return runtime.NumCPU() -} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go deleted file mode 100644 index 8fc0ecc25..000000000 --- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go +++ /dev/null @@ -1,144 +0,0 @@ -package sysinfo // import "github.com/docker/docker/pkg/sysinfo" - -import "github.com/docker/docker/pkg/parsers" - -// SysInfo stores information about which features a kernel supports. -// TODO Windows: Factor out platform specific capabilities. -type SysInfo struct { - // Whether the kernel supports AppArmor or not - AppArmor bool - // Whether the kernel supports Seccomp or not - Seccomp bool - - cgroupMemInfo - cgroupCPUInfo - cgroupBlkioInfo - cgroupCpusetInfo - cgroupPids - - // Whether IPv4 forwarding is supported or not, if this was disabled, networking will not work - IPv4ForwardingDisabled bool - - // Whether bridge-nf-call-iptables is supported or not - BridgeNFCallIPTablesDisabled bool - - // Whether bridge-nf-call-ip6tables is supported or not - BridgeNFCallIP6TablesDisabled bool - - // Whether the cgroup has the mountpoint of "devices" or not - CgroupDevicesEnabled bool -} - -type cgroupMemInfo struct { - // Whether memory limit is supported or not - MemoryLimit bool - - // Whether swap limit is supported or not - SwapLimit bool - - // Whether soft limit is supported or not - MemoryReservation bool - - // Whether OOM killer disable is supported or not - OomKillDisable bool - - // Whether memory swappiness is supported or not - MemorySwappiness bool - - // Whether kernel memory limit is supported or not - KernelMemory bool -} - -type cgroupCPUInfo struct { - // Whether CPU shares is supported or not - CPUShares bool - - // Whether CPU CFS(Completely Fair Scheduler) period is supported or not - CPUCfsPeriod bool - - // Whether CPU CFS(Completely Fair Scheduler) quota is supported or not - CPUCfsQuota bool - - // Whether CPU real-time period is supported or not - CPURealtimePeriod bool - - // Whether CPU real-time runtime is supported or not - CPURealtimeRuntime bool -} - -type cgroupBlkioInfo struct { - // Whether Block IO weight is supported or not - BlkioWeight bool - - // Whether Block IO weight_device is supported or not - BlkioWeightDevice bool - - // Whether Block IO read limit in bytes per second is supported or not - BlkioReadBpsDevice bool - - // Whether Block IO write limit in bytes per second is supported or not - BlkioWriteBpsDevice bool - - // Whether Block IO read limit in IO per second is supported or not - BlkioReadIOpsDevice bool - - // Whether Block IO write limit in IO per second is supported or not - BlkioWriteIOpsDevice bool -} - -type cgroupCpusetInfo struct { - // Whether Cpuset is supported or not - Cpuset bool - - // Available Cpuset's cpus - Cpus string - - // Available Cpuset's memory nodes - Mems string -} - -type cgroupPids struct { - // Whether Pids Limit is supported or not - PidsLimit bool -} - -// IsCpusetCpusAvailable returns `true` if the provided string set is contained -// in cgroup's cpuset.cpus set, `false` otherwise. -// If error is not nil a parsing error occurred. -func (c cgroupCpusetInfo) IsCpusetCpusAvailable(provided string) (bool, error) { - return isCpusetListAvailable(provided, c.Cpus) -} - -// IsCpusetMemsAvailable returns `true` if the provided string set is contained -// in cgroup's cpuset.mems set, `false` otherwise. -// If error is not nil a parsing error occurred. -func (c cgroupCpusetInfo) IsCpusetMemsAvailable(provided string) (bool, error) { - return isCpusetListAvailable(provided, c.Mems) -} - -func isCpusetListAvailable(provided, available string) (bool, error) { - parsedProvided, err := parsers.ParseUintList(provided) - if err != nil { - return false, err - } - parsedAvailable, err := parsers.ParseUintList(available) - if err != nil { - return false, err - } - for k := range parsedProvided { - if !parsedAvailable[k] { - return false, nil - } - } - return true, nil -} - -// Returns bit count of 1, used by NumCPU -func popcnt(x uint64) (n byte) { - x -= (x >> 1) & 0x5555555555555555 - x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 - x += x >> 4 - x &= 0x0f0f0f0f0f0f0f0f - x *= 0x0101010101010101 - return byte(x >> 56) -} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go deleted file mode 100644 index dde5be19b..000000000 --- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go +++ /dev/null @@ -1,254 +0,0 @@ -package sysinfo // import "github.com/docker/docker/pkg/sysinfo" - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "strings" - - "github.com/opencontainers/runc/libcontainer/cgroups" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -func findCgroupMountpoints() (map[string]string, error) { - cgMounts, err := cgroups.GetCgroupMounts(false) - if err != nil { - return nil, fmt.Errorf("Failed to parse cgroup information: %v", err) - } - mps := make(map[string]string) - for _, m := range cgMounts { - for _, ss := range m.Subsystems { - mps[ss] = m.Mountpoint - } - } - return mps, nil -} - -// New returns a new SysInfo, using the filesystem to detect which features -// the kernel supports. If `quiet` is `false` warnings are printed in logs -// whenever an error occurs or misconfigurations are present. -func New(quiet bool) *SysInfo { - sysInfo := &SysInfo{} - cgMounts, err := findCgroupMountpoints() - if err != nil { - logrus.Warnf("Failed to parse cgroup information: %v", err) - } else { - sysInfo.cgroupMemInfo = checkCgroupMem(cgMounts, quiet) - sysInfo.cgroupCPUInfo = checkCgroupCPU(cgMounts, quiet) - sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(cgMounts, quiet) - sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(cgMounts, quiet) - sysInfo.cgroupPids = checkCgroupPids(quiet) - } - - _, ok := cgMounts["devices"] - sysInfo.CgroupDevicesEnabled = ok - - sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward") - sysInfo.BridgeNFCallIPTablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables") - sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables") - - // Check if AppArmor is supported. - if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { - sysInfo.AppArmor = true - } - - // Check if Seccomp is supported, via CONFIG_SECCOMP. - if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { - // Make sure the kernel has CONFIG_SECCOMP_FILTER. - if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { - sysInfo.Seccomp = true - } - } - - return sysInfo -} - -// checkCgroupMem reads the memory information from the memory cgroup mount point. -func checkCgroupMem(cgMounts map[string]string, quiet bool) cgroupMemInfo { - mountPoint, ok := cgMounts["memory"] - if !ok { - if !quiet { - logrus.Warn("Your kernel does not support cgroup memory limit") - } - return cgroupMemInfo{} - } - - swapLimit := cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes") - if !quiet && !swapLimit { - logrus.Warn("Your kernel does not support swap memory limit") - } - memoryReservation := cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes") - if !quiet && !memoryReservation { - logrus.Warn("Your kernel does not support memory reservation") - } - oomKillDisable := cgroupEnabled(mountPoint, "memory.oom_control") - if !quiet && !oomKillDisable { - logrus.Warn("Your kernel does not support oom control") - } - memorySwappiness := cgroupEnabled(mountPoint, "memory.swappiness") - if !quiet && !memorySwappiness { - logrus.Warn("Your kernel does not support memory swappiness") - } - kernelMemory := cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes") - if !quiet && !kernelMemory { - logrus.Warn("Your kernel does not support kernel memory limit") - } - - return cgroupMemInfo{ - MemoryLimit: true, - SwapLimit: swapLimit, - MemoryReservation: memoryReservation, - OomKillDisable: oomKillDisable, - MemorySwappiness: memorySwappiness, - KernelMemory: kernelMemory, - } -} - -// checkCgroupCPU reads the cpu information from the cpu cgroup mount point. -func checkCgroupCPU(cgMounts map[string]string, quiet bool) cgroupCPUInfo { - mountPoint, ok := cgMounts["cpu"] - if !ok { - if !quiet { - logrus.Warn("Unable to find cpu cgroup in mounts") - } - return cgroupCPUInfo{} - } - - cpuShares := cgroupEnabled(mountPoint, "cpu.shares") - if !quiet && !cpuShares { - logrus.Warn("Your kernel does not support cgroup cpu shares") - } - - cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us") - if !quiet && !cpuCfsPeriod { - logrus.Warn("Your kernel does not support cgroup cfs period") - } - - cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us") - if !quiet && !cpuCfsQuota { - logrus.Warn("Your kernel does not support cgroup cfs quotas") - } - - cpuRealtimePeriod := cgroupEnabled(mountPoint, "cpu.rt_period_us") - if !quiet && !cpuRealtimePeriod { - logrus.Warn("Your kernel does not support cgroup rt period") - } - - cpuRealtimeRuntime := cgroupEnabled(mountPoint, "cpu.rt_runtime_us") - if !quiet && !cpuRealtimeRuntime { - logrus.Warn("Your kernel does not support cgroup rt runtime") - } - - return cgroupCPUInfo{ - CPUShares: cpuShares, - CPUCfsPeriod: cpuCfsPeriod, - CPUCfsQuota: cpuCfsQuota, - CPURealtimePeriod: cpuRealtimePeriod, - CPURealtimeRuntime: cpuRealtimeRuntime, - } -} - -// checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point. -func checkCgroupBlkioInfo(cgMounts map[string]string, quiet bool) cgroupBlkioInfo { - mountPoint, ok := cgMounts["blkio"] - if !ok { - if !quiet { - logrus.Warn("Unable to find blkio cgroup in mounts") - } - return cgroupBlkioInfo{} - } - - weight := cgroupEnabled(mountPoint, "blkio.weight") - if !quiet && !weight { - logrus.Warn("Your kernel does not support cgroup blkio weight") - } - - weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device") - if !quiet && !weightDevice { - logrus.Warn("Your kernel does not support cgroup blkio weight_device") - } - - readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device") - if !quiet && !readBpsDevice { - logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device") - } - - writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device") - if !quiet && !writeBpsDevice { - logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device") - } - readIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device") - if !quiet && !readIOpsDevice { - logrus.Warn("Your kernel does not support cgroup blkio throttle.read_iops_device") - } - - writeIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device") - if !quiet && !writeIOpsDevice { - logrus.Warn("Your kernel does not support cgroup blkio throttle.write_iops_device") - } - return cgroupBlkioInfo{ - BlkioWeight: weight, - BlkioWeightDevice: weightDevice, - BlkioReadBpsDevice: readBpsDevice, - BlkioWriteBpsDevice: writeBpsDevice, - BlkioReadIOpsDevice: readIOpsDevice, - BlkioWriteIOpsDevice: writeIOpsDevice, - } -} - -// checkCgroupCpusetInfo reads the cpuset information from the cpuset cgroup mount point. -func checkCgroupCpusetInfo(cgMounts map[string]string, quiet bool) cgroupCpusetInfo { - mountPoint, ok := cgMounts["cpuset"] - if !ok { - if !quiet { - logrus.Warn("Unable to find cpuset cgroup in mounts") - } - return cgroupCpusetInfo{} - } - - cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus")) - if err != nil { - return cgroupCpusetInfo{} - } - - mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems")) - if err != nil { - return cgroupCpusetInfo{} - } - - return cgroupCpusetInfo{ - Cpuset: true, - Cpus: strings.TrimSpace(string(cpus)), - Mems: strings.TrimSpace(string(mems)), - } -} - -// checkCgroupPids reads the pids information from the pids cgroup mount point. -func checkCgroupPids(quiet bool) cgroupPids { - _, err := cgroups.FindCgroupMountpoint("pids") - if err != nil { - if !quiet { - logrus.Warn(err) - } - return cgroupPids{} - } - - return cgroupPids{ - PidsLimit: true, - } -} - -func cgroupEnabled(mountPoint, name string) bool { - _, err := os.Stat(path.Join(mountPoint, name)) - return err == nil -} - -func readProcBool(path string) bool { - val, err := ioutil.ReadFile(path) - if err != nil { - return false - } - return strings.TrimSpace(string(val)) == "1" -} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go deleted file mode 100644 index 23cc695fb..000000000 --- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !linux,!windows - -package sysinfo // import "github.com/docker/docker/pkg/sysinfo" - -// New returns an empty SysInfo for non linux for now. -func New(quiet bool) *SysInfo { - sysInfo := &SysInfo{} - return sysInfo -} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go deleted file mode 100644 index 5f68524e7..000000000 --- a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package sysinfo // import "github.com/docker/docker/pkg/sysinfo" - -// New returns an empty SysInfo for windows for now. -func New(quiet bool) *SysInfo { - sysInfo := &SysInfo{} - return sysInfo -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go deleted file mode 100644 index c26a4e24b..000000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes.go +++ /dev/null @@ -1,31 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "time" -) - -// Chtimes changes the access time and modified time of a file at the given path -func Chtimes(name string, atime time.Time, mtime time.Time) error { - unixMinTime := time.Unix(0, 0) - unixMaxTime := maxTime - - // If the modified time is prior to the Unix Epoch, or after the - // end of Unix Time, os.Chtimes has undefined behavior - // default to Unix Epoch in this case, just in case - - if atime.Before(unixMinTime) || atime.After(unixMaxTime) { - atime = unixMinTime - } - - if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { - mtime = unixMinTime - } - - if err := os.Chtimes(name, atime, mtime); err != nil { - return err - } - - // Take platform specific action for setting create time. - return setCTime(name, mtime) -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go deleted file mode 100644 index 259138a45..000000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "time" -) - -//setCTime will set the create time on a file. On Unix, the create -//time is updated as a side effect of setting the modified time, so -//no action is required. -func setCTime(path string, ctime time.Time) error { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go deleted file mode 100644 index d3a115ff4..000000000 --- a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "time" - - "golang.org/x/sys/windows" -) - -//setCTime will set the create time on a file. On Windows, this requires -//calling SetFileTime and explicitly including the create time. -func setCTime(path string, ctime time.Time) error { - ctimespec := windows.NsecToTimespec(ctime.UnixNano()) - pathp, e := windows.UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := windows.CreateFile(pathp, - windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, - windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e - } - defer windows.Close(h) - c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) - return windows.SetFileTime(h, &c, nil, nil) -} diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go deleted file mode 100644 index 2573d7162..000000000 --- a/vendor/github.com/docker/docker/pkg/system/errors.go +++ /dev/null @@ -1,13 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "errors" -) - -var ( - // ErrNotSupportedPlatform means the platform is not supported. - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") - - // ErrNotSupportedOperatingSystem means the operating system is not supported. - ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") -) diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/docker/docker/pkg/system/exitcode.go deleted file mode 100644 index 4ba8fe35b..000000000 --- a/vendor/github.com/docker/docker/pkg/system/exitcode.go +++ /dev/null @@ -1,19 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "fmt" - "os/exec" - "syscall" -) - -// GetExitCode returns the ExitStatus of the specified error if its type is -// exec.ExitError, returns 0 and an error otherwise. -func GetExitCode(err error) (int, error) { - exitCode := 0 - if exiterr, ok := err.(*exec.ExitError); ok { - if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return procExit.ExitStatus(), nil - } - } - return exitCode, fmt.Errorf("failed to get exit code") -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/docker/docker/pkg/system/filesys.go deleted file mode 100644 index adeb16305..000000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys.go +++ /dev/null @@ -1,67 +0,0 @@ -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "io/ioutil" - "os" - "path/filepath" -) - -// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. -func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { - return MkdirAll(path, perm, sddl) -} - -// MkdirAll creates a directory named path along with any necessary parents, -// with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode, sddl string) error { - return os.MkdirAll(path, perm) -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. -func IsAbs(path string) bool { - return filepath.IsAbs(path) -} - -// The functions below here are wrappers for the equivalents in the os and ioutils packages. -// They are passthrough on Unix platforms, and only relevant on Windows. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return os.Create(name) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return os.Open(name) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { - return os.OpenFile(name, flag, perm) -} - -// TempFileSequential creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func TempFileSequential(dir, prefix string) (f *os.File, err error) { - return ioutil.TempFile(dir, prefix) -} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go deleted file mode 100644 index a1f6013f1..000000000 --- a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ /dev/null @@ -1,296 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - "sync" - "syscall" - "time" - "unsafe" - - winio "github.com/Microsoft/go-winio" - "golang.org/x/sys/windows" -) - -const ( - // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System - SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" - // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System - SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" -) - -// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// with an appropriate SDDL defined ACL. -func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { - return mkdirall(path, true, sddl) -} - -// MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, _ os.FileMode, sddl string) error { - return mkdirall(path, false, sddl) -} - -// mkdirall is a custom version of os.MkdirAll modified for use on Windows -// so that it is both volume path aware, and can create a directory with -// a DACL. -func mkdirall(path string, applyACL bool, sddl string) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { - return nil - } - - // The rest of this method is largely copied from os.MkdirAll and should be kept - // as-is to ensure compatibility. - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = mkdirall(path[0:j-1], false, sddl) - if err != nil { - return err - } - } - - // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. - if applyACL { - err = mkdirWithACL(path, sddl) - } else { - err = os.Mkdir(path, 0) - } - - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// mkdirWithACL creates a new directory. If there is an error, it will be of -// type *PathError. . -// -// This is a modified and combined version of os.Mkdir and windows.Mkdir -// in golang to cater for creating a directory am ACL permitting full -// access, with inheritance, to any subfolder/file for Built-in Administrators -// and Local System. -func mkdirWithACL(name string, sddl string) error { - sa := windows.SecurityAttributes{Length: 0} - sd, err := winio.SddlToSecurityDescriptor(sddl) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) - - namep, err := windows.UTF16PtrFromString(name) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - - e := windows.CreateDirectory(namep, &sa) - if e != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: e} - } - return nil -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if !filepath.IsAbs(path) { - if !strings.HasPrefix(path, string(os.PathSeparator)) { - return false - } - } - return true -} - -// The origin of the functions below here are the golang OS and windows packages, -// slightly modified to only cope with files, not directories due to the -// specific use case. -// -// The alteration is to allow a file on Windows to be opened with -// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating -// the standby list, particularly when accessing large files such as layer.tar. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDONLY, 0) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, errf := windowsOpenFileSequential(name, flag, 0) - if errf == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: errf} -} - -func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) - if e != nil { - return nil, e - } - return os.NewFile(uintptr(r), name), nil -} - -func makeInheritSa() *windows.SecurityAttributes { - var sa windows.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - return &sa -} - -func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { - if len(path) == 0 { - return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND - } - pathp, err := windows.UTF16PtrFromString(path) - if err != nil { - return windows.InvalidHandle, err - } - var access uint32 - switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { - case windows.O_RDONLY: - access = windows.GENERIC_READ - case windows.O_WRONLY: - access = windows.GENERIC_WRITE - case windows.O_RDWR: - access = windows.GENERIC_READ | windows.GENERIC_WRITE - } - if mode&windows.O_CREAT != 0 { - access |= windows.GENERIC_WRITE - } - if mode&windows.O_APPEND != 0 { - access &^= windows.GENERIC_WRITE - access |= windows.FILE_APPEND_DATA - } - sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) - var sa *windows.SecurityAttributes - if mode&windows.O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): - createmode = windows.CREATE_NEW - case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): - createmode = windows.CREATE_ALWAYS - case mode&windows.O_CREAT == windows.O_CREAT: - createmode = windows.OPEN_ALWAYS - case mode&windows.O_TRUNC == windows.O_TRUNC: - createmode = windows.TRUNCATE_EXISTING - default: - createmode = windows.OPEN_EXISTING - } - // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) - return h, e -} - -// Helpers for TempFileSequential -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential -// file access. Below is the original comment from golang: -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func TempFileSequential(dir, prefix string) (f *os.File, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) - f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} diff --git a/vendor/github.com/docker/docker/pkg/system/init.go b/vendor/github.com/docker/docker/pkg/system/init.go deleted file mode 100644 index a17597aab..000000000 --- a/vendor/github.com/docker/docker/pkg/system/init.go +++ /dev/null @@ -1,22 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "syscall" - "time" - "unsafe" -) - -// Used by chtimes -var maxTime time.Time - -func init() { - // chtimes initialization - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/init_unix.go b/vendor/github.com/docker/docker/pkg/system/init_unix.go deleted file mode 100644 index 4996a67c1..000000000 --- a/vendor/github.com/docker/docker/pkg/system/init_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -// InitLCOW does nothing since LCOW is a windows only feature -func InitLCOW(experimental bool) { -} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go deleted file mode 100644 index 4910ff69d..000000000 --- a/vendor/github.com/docker/docker/pkg/system/init_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -// lcowSupported determines if Linux Containers on Windows are supported. -var lcowSupported = false - -// InitLCOW sets whether LCOW is supported or not -func InitLCOW(experimental bool) { - v := GetOSVersion() - if experimental && v.Build >= 16299 { - lcowSupported = true - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow.go b/vendor/github.com/docker/docker/pkg/system/lcow.go deleted file mode 100644 index 5c3fbfe6f..000000000 --- a/vendor/github.com/docker/docker/pkg/system/lcow.go +++ /dev/null @@ -1,69 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "fmt" - "runtime" - "strings" - - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ValidatePlatform determines if a platform structure is valid. -// TODO This is a temporary function - can be replaced by parsing from -// https://github.com/containerd/containerd/pull/1403/files at a later date. -// @jhowardmsft -func ValidatePlatform(platform *specs.Platform) error { - platform.Architecture = strings.ToLower(platform.Architecture) - platform.OS = strings.ToLower(platform.OS) - // Based on https://github.com/moby/moby/pull/34642#issuecomment-330375350, do - // not support anything except operating system. - if platform.Architecture != "" { - return fmt.Errorf("invalid platform architecture %q", platform.Architecture) - } - if platform.OS != "" { - if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) { - return fmt.Errorf("invalid platform os %q", platform.OS) - } - } - if len(platform.OSFeatures) != 0 { - return fmt.Errorf("invalid platform osfeatures %q", platform.OSFeatures) - } - if platform.OSVersion != "" { - return fmt.Errorf("invalid platform osversion %q", platform.OSVersion) - } - if platform.Variant != "" { - return fmt.Errorf("invalid platform variant %q", platform.Variant) - } - return nil -} - -// ParsePlatform parses a platform string in the format os[/arch[/variant] -// into an OCI image-spec platform structure. -// TODO This is a temporary function - can be replaced by parsing from -// https://github.com/containerd/containerd/pull/1403/files at a later date. -// @jhowardmsft -func ParsePlatform(in string) *specs.Platform { - p := &specs.Platform{} - elements := strings.SplitN(strings.ToLower(in), "/", 3) - if len(elements) == 3 { - p.Variant = elements[2] - } - if len(elements) >= 2 { - p.Architecture = elements[1] - } - if len(elements) >= 1 { - p.OS = elements[0] - } - return p -} - -// IsOSSupported determines if an operating system is supported by the host -func IsOSSupported(os string) bool { - if runtime.GOOS == os { - return true - } - if LCOWSupported() && os == "linux" { - return true - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_unix.go b/vendor/github.com/docker/docker/pkg/system/lcow_unix.go deleted file mode 100644 index 26397fb8a..000000000 --- a/vendor/github.com/docker/docker/pkg/system/lcow_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -// LCOWSupported returns true if Linux containers on Windows are supported. -func LCOWSupported() bool { - return false -} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_windows.go b/vendor/github.com/docker/docker/pkg/system/lcow_windows.go deleted file mode 100644 index f0139df8f..000000000 --- a/vendor/github.com/docker/docker/pkg/system/lcow_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -// LCOWSupported returns true if Linux containers on Windows are supported. -func LCOWSupported() bool { - return lcowSupported -} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go deleted file mode 100644 index 7477995f1..000000000 --- a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "syscall" -) - -// Lstat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Lstat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Lstat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go deleted file mode 100644 index 359c791d9..000000000 --- a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go +++ /dev/null @@ -1,14 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "os" - -// Lstat calls os.Lstat to get a fileinfo interface back. -// This is then copied into our own locally defined structure. -func Lstat(path string) (*StatT, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - - return fromStatT(&fi) -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go deleted file mode 100644 index 6667eb84d..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo.go +++ /dev/null @@ -1,17 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -// MemInfo contains memory statistics of the host system. -type MemInfo struct { - // Total usable RAM (i.e. physical RAM minus a few reserved bits and the - // kernel binary code). - MemTotal int64 - - // Amount of free memory. - MemFree int64 - - // Total amount of swap space available. - SwapTotal int64 - - // Amount of swap space that is currently unused. - SwapFree int64 -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go deleted file mode 100644 index d79e8b076..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go +++ /dev/null @@ -1,65 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "bufio" - "io" - "os" - "strconv" - "strings" - - "github.com/docker/go-units" -) - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return nil, err - } - defer file.Close() - return parseMemInfo(file) -} - -// parseMemInfo parses the /proc/meminfo file into -// a MemInfo object given an io.Reader to the file. -// Throws error if there are problems reading from the file -func parseMemInfo(reader io.Reader) (*MemInfo, error) { - meminfo := &MemInfo{} - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - // Expected format: ["MemTotal:", "1234", "kB"] - parts := strings.Fields(scanner.Text()) - - // Sanity checks: Skip malformed entries. - if len(parts) < 3 || parts[2] != "kB" { - continue - } - - // Convert to bytes. - size, err := strconv.Atoi(parts[1]) - if err != nil { - continue - } - bytes := int64(size) * units.KiB - - switch parts[0] { - case "MemTotal:": - meminfo.MemTotal = bytes - case "MemFree:": - meminfo.MemFree = bytes - case "SwapTotal:": - meminfo.SwapTotal = bytes - case "SwapFree:": - meminfo.SwapFree = bytes - } - - } - - // Handle errors that may have occurred during the reading of the file. - if err := scanner.Err(); err != nil { - return nil, err - } - - return meminfo, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go deleted file mode 100644 index 56f449426..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !linux,!windows - -package system // import "github.com/docker/docker/pkg/system" - -// ReadMemInfo is not supported on platforms other than linux and windows. -func ReadMemInfo() (*MemInfo, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go deleted file mode 100644 index 6ed93f2fe..000000000 --- a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "unsafe" - - "golang.org/x/sys/windows" -) - -var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type memorystatusex struct { - dwLength uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - msi := &memorystatusex{ - dwLength: 64, - } - r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) - if r1 == 0 { - return &MemInfo{}, nil - } - return &MemInfo{ - MemTotal: int64(msi.ullTotalPhys), - MemFree: int64(msi.ullAvailPhys), - SwapTotal: int64(msi.ullTotalPageFile), - SwapFree: int64(msi.ullAvailPageFile), - }, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go deleted file mode 100644 index b132482e0..000000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "golang.org/x/sys/unix" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev int) error { - return unix.Mknod(path, mode, dev) -} - -// Mkdev is used to build the value of linux devices (in /dev/) which specifies major -// and minor number of the newly created device special file. -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint32 { - return uint32(unix.Mkdev(uint32(major), uint32(minor))) -} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go deleted file mode 100644 index ec89d7a15..000000000 --- a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go deleted file mode 100644 index a3d957afa..000000000 --- a/vendor/github.com/docker/docker/pkg/system/path.go +++ /dev/null @@ -1,60 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "fmt" - "path/filepath" - "runtime" - "strings" - - "github.com/containerd/continuity/pathdriver" -) - -const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -func DefaultPathEnv(os string) string { - if runtime.GOOS == "windows" { - if os != runtime.GOOS { - return defaultUnixPathEnv - } - // Deliberately empty on Windows containers on Windows as the default path will be set by - // the container. Docker has no context of what the default path should be. - return "" - } - return defaultUnixPathEnv - -} - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. -// On Linux: this is a no-op. -// On Windows: this does the following> -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be concatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) { - if runtime.GOOS != "windows" || LCOWSupported() { - return path, nil - } - - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) - } - if !driver.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/docker/docker/pkg/system/process_unix.go deleted file mode 100644 index 0195a891b..000000000 --- a/vendor/github.com/docker/docker/pkg/system/process_unix.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build linux freebsd darwin - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -// IsProcessAlive returns true if process with a given pid is running. -func IsProcessAlive(pid int) bool { - err := unix.Kill(pid, syscall.Signal(0)) - if err == nil || err == unix.EPERM { - return true - } - - return false -} - -// KillProcess force-stops a process. -func KillProcess(pid int) { - unix.Kill(pid, unix.SIGKILL) -} diff --git a/vendor/github.com/docker/docker/pkg/system/process_windows.go b/vendor/github.com/docker/docker/pkg/system/process_windows.go deleted file mode 100644 index 4e70c97b1..000000000 --- a/vendor/github.com/docker/docker/pkg/system/process_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "os" - -// IsProcessAlive returns true if process with a given pid is running. -func IsProcessAlive(pid int) bool { - _, err := os.FindProcess(pid) - - return err == nil -} - -// KillProcess force-stops a process. -func KillProcess(pid int) { - p, err := os.FindProcess(pid) - if err == nil { - p.Kill() - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go deleted file mode 100644 index 02e4d2622..000000000 --- a/vendor/github.com/docker/docker/pkg/system/rm.go +++ /dev/null @@ -1,80 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "syscall" - "time" - - "github.com/docker/docker/pkg/mount" - "github.com/pkg/errors" -) - -// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can -// often be remedied. -// Only use `EnsureRemoveAll` if you really want to make every effort to remove -// a directory. -// -// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there -// can be a race between reading directory entries and then actually attempting -// to remove everything in the directory. -// These types of errors do not need to be returned since it's ok for the dir to -// be gone we can just retry the remove operation. -// -// This should not return a `os.ErrNotExist` kind of error under any circumstances -func EnsureRemoveAll(dir string) error { - notExistErr := make(map[string]bool) - - // track retries - exitOnErr := make(map[string]int) - maxRetry := 50 - - // Attempt to unmount anything beneath this dir first - mount.RecursiveUnmount(dir) - - for { - err := os.RemoveAll(dir) - if err == nil { - return err - } - - pe, ok := err.(*os.PathError) - if !ok { - return err - } - - if os.IsNotExist(err) { - if notExistErr[pe.Path] { - return err - } - notExistErr[pe.Path] = true - - // There is a race where some subdir can be removed but after the parent - // dir entries have been read. - // So the path could be from `os.Remove(subdir)` - // If the reported non-existent path is not the passed in `dir` we - // should just retry, but otherwise return with no error. - if pe.Path == dir { - return nil - } - continue - } - - if pe.Err != syscall.EBUSY { - return err - } - - if mounted, _ := mount.Mounted(pe.Path); mounted { - if e := mount.Unmount(pe.Path); e != nil { - if mounted, _ := mount.Mounted(pe.Path); mounted { - return errors.Wrapf(e, "error while removing %s", dir) - } - } - } - - if exitOnErr[pe.Path] == maxRetry { - return err - } - exitOnErr[pe.Path]++ - time.Sleep(100 * time.Millisecond) - } -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go deleted file mode 100644 index c1c0ee9f3..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go +++ /dev/null @@ -1,13 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go deleted file mode 100644 index c1c0ee9f3..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go +++ /dev/null @@ -1,13 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go deleted file mode 100644 index 98c9eb18d..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ /dev/null @@ -1,19 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: s.Mode, - uid: s.Uid, - gid: s.Gid, - rdev: s.Rdev, - mtim: s.Mtim}, nil -} - -// FromStatT converts a syscall.Stat_t type to a system.Stat_t type -// This is exposed on Linux as pkg/archive/changes uses it. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go deleted file mode 100644 index 756b92d1e..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go +++ /dev/null @@ -1,13 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go deleted file mode 100644 index 756b92d1e..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go +++ /dev/null @@ -1,13 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go deleted file mode 100644 index 3d7e2ebbe..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_unix.go +++ /dev/null @@ -1,65 +0,0 @@ -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "syscall" -) - -// StatT type contains status of a file. It contains metadata -// like permission, owner, group, size, etc about a file. -type StatT struct { - mode uint32 - uid uint32 - gid uint32 - rdev uint64 - size int64 - mtim syscall.Timespec -} - -// Mode returns file's permission mode. -func (s StatT) Mode() uint32 { - return s.mode -} - -// UID returns file's user id of owner. -func (s StatT) UID() uint32 { - return s.uid -} - -// GID returns file's group id of owner. -func (s StatT) GID() uint32 { - return s.gid -} - -// Rdev returns file's device ID (if it's special file). -func (s StatT) Rdev() uint64 { - return s.rdev -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() syscall.Timespec { - return s.mtim -} - -// IsDir reports whether s describes a directory. -func (s StatT) IsDir() bool { - return s.mode&syscall.S_IFDIR != 0 -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go deleted file mode 100644 index b2456cb88..000000000 --- a/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ /dev/null @@ -1,49 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "os" - "time" -) - -// StatT type contains status of a file. It contains metadata -// like permission, size, etc about a file. -type StatT struct { - mode os.FileMode - size int64 - mtim time.Time -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mode returns file's permission mode. -func (s StatT) Mode() os.FileMode { - return os.FileMode(s.mode) -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() time.Time { - return time.Time(s.mtim) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - fi, err := os.Stat(path) - if err != nil { - return nil, err - } - return fromStatT(&fi) -} - -// fromStatT converts a os.FileInfo type to a system.StatT type -func fromStatT(fi *os.FileInfo) (*StatT, error) { - return &StatT{ - size: (*fi).Size(), - mode: (*fi).Mode(), - mtim: (*fi).ModTime()}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go deleted file mode 100644 index 919a412a7..000000000 --- a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build linux freebsd - -package system // import "github.com/docker/docker/pkg/system" - -import "golang.org/x/sys/unix" - -// Unmount is a platform-specific helper function to call -// the unmount syscall. -func Unmount(dest string) error { - return unix.Unmount(dest, 0) -} - -// CommandLineToArgv should not be used on Unix. -// It simply returns commandLine in the only element in the returned array. -func CommandLineToArgv(commandLine string) ([]string, error) { - return []string{commandLine}, nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go deleted file mode 100644 index ee7e0256f..000000000 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ /dev/null @@ -1,127 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "fmt" - "unsafe" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -var ( - ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") - procGetProductInfo = modkernel32.NewProc("GetProductInfo") -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion struct { - Version uint32 - MajorVersion uint8 - MinorVersion uint8 - Build uint16 -} - -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx -type osVersionInfoEx struct { - OSVersionInfoSize uint32 - MajorVersion uint32 - MinorVersion uint32 - BuildNumber uint32 - PlatformID uint32 - CSDVersion [128]uint16 - ServicePackMajor uint16 - ServicePackMinor uint16 - SuiteMask uint16 - ProductType byte - Reserve byte -} - -// GetOSVersion gets the operating system version on Windows. Note that -// docker.exe must be manifested to get the correct version information. -func GetOSVersion() OSVersion { - var err error - osv := OSVersion{} - osv.Version, err = windows.GetVersion() - if err != nil { - // GetVersion never fails. - panic(err) - } - osv.MajorVersion = uint8(osv.Version & 0xFF) - osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) - osv.Build = uint16(osv.Version >> 16) - return osv -} - -func (osv OSVersion) ToString() string { - return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) -} - -// IsWindowsClient returns true if the SKU is client -// @engine maintainers - this function should not be removed or modified as it -// is used to enforce licensing restrictions on Windows. -func IsWindowsClient() bool { - osviex := &osVersionInfoEx{OSVersionInfoSize: 284} - r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) - if r1 == 0 { - logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) - return false - } - const verNTWorkstation = 0x00000001 - return osviex.ProductType == verNTWorkstation -} - -// IsIoTCore returns true if the currently running image is based off of -// Windows 10 IoT Core. -// @engine maintainers - this function should not be removed or modified as it -// is used to enforce licensing restrictions on Windows. -func IsIoTCore() bool { - var returnedProductType uint32 - r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) - if r1 == 0 { - logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) - return false - } - const productIoTUAP = 0x0000007B - const productIoTUAPCommercial = 0x00000083 - return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial -} - -// Unmount is a platform-specific helper function to call -// the unmount syscall. Not supported on Windows -func Unmount(dest string) error { - return nil -} - -// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. -func CommandLineToArgv(commandLine string) ([]string, error) { - var argc int32 - - argsPtr, err := windows.UTF16PtrFromString(commandLine) - if err != nil { - return nil, err - } - - argv, err := windows.CommandLineToArgv(argsPtr, &argc) - if err != nil { - return nil, err - } - defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) - - newArgs := make([]string, argc) - for i, v := range (*argv)[:argc] { - newArgs[i] = string(windows.UTF16ToString((*v)[:])) - } - - return newArgs, nil -} - -// HasWin32KSupport determines whether containers that depend on win32k can -// run on this machine. Win32k is the driver used to implement windowing. -func HasWin32KSupport() bool { - // For now, check for ntuser API support on the host. In the future, a host - // may support win32k in containers even if the host does not support ntuser - // APIs. - return ntuserApiset.Load() == nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go deleted file mode 100644 index 9912a2bab..000000000 --- a/vendor/github.com/docker/docker/pkg/system/umask.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !windows - -package system // import "github.com/docker/docker/pkg/system" - -import ( - "golang.org/x/sys/unix" -) - -// Umask sets current process's file mode creation mask to newmask -// and returns oldmask. -func Umask(newmask int) (oldmask int, err error) { - return unix.Umask(newmask), nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go deleted file mode 100644 index fc62388c3..000000000 --- a/vendor/github.com/docker/docker/pkg/system/umask_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -// Umask is not supported on the windows platform. -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go deleted file mode 100644 index ed1b9fad5..000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go +++ /dev/null @@ -1,24 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - var _path *byte - _path, err := unix.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/docker/docker/pkg/system/utimes_linux.go deleted file mode 100644 index 0afe85458..000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_linux.go +++ /dev/null @@ -1,25 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - atFdCwd := unix.AT_FDCWD - - var _path *byte - _path, err := unix.BytePtrFromString(path) - if err != nil { - return err - } - if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go deleted file mode 100644 index 095e072e1..000000000 --- a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !linux,!freebsd - -package system // import "github.com/docker/docker/pkg/system" - -import "syscall" - -// LUtimesNano is only supported on linux and freebsd. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go deleted file mode 100644 index 66d4895b2..000000000 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -package system // import "github.com/docker/docker/pkg/system" - -import "golang.org/x/sys/unix" - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// It will returns a nil slice and nil error if the xattr is not set. -func Lgetxattr(path string, attr string) ([]byte, error) { - dest := make([]byte, 128) - sz, errno := unix.Lgetxattr(path, attr, dest) - if errno == unix.ENODATA { - return nil, nil - } - if errno == unix.ERANGE { - dest = make([]byte, sz) - sz, errno = unix.Lgetxattr(path, attr, dest) - } - if errno != nil { - return nil, errno - } - - return dest[:sz], nil -} - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return unix.Lsetxattr(path, attr, data, flags) -} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go deleted file mode 100644 index d780a90cd..000000000 --- a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package system // import "github.com/docker/docker/pkg/system" - -// Lgetxattr is not supported on platforms other than linux. -func Lgetxattr(path string, attr string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -// Lsetxattr is not supported on platforms other than linux. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go deleted file mode 100644 index e83589374..000000000 --- a/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package tailfile provides helper functions to read the nth lines of any -// ReadSeeker. -package tailfile // import "github.com/docker/docker/pkg/tailfile" - -import ( - "bytes" - "errors" - "io" - "os" -) - -const blockSize = 1024 - -var eol = []byte("\n") - -// ErrNonPositiveLinesNumber is an error returned if the lines number was negative. -var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") - -//TailFile returns last n lines of reader f (could be a nil). -func TailFile(f io.ReadSeeker, n int) ([][]byte, error) { - if n <= 0 { - return nil, ErrNonPositiveLinesNumber - } - size, err := f.Seek(0, os.SEEK_END) - if err != nil { - return nil, err - } - block := -1 - var data []byte - var cnt int - for { - var b []byte - step := int64(block * blockSize) - left := size + step // how many bytes to beginning - if left < 0 { - if _, err := f.Seek(0, os.SEEK_SET); err != nil { - return nil, err - } - b = make([]byte, blockSize+left) - if _, err := f.Read(b); err != nil { - return nil, err - } - data = append(b, data...) - break - } else { - b = make([]byte, blockSize) - if _, err := f.Seek(left, os.SEEK_SET); err != nil { - return nil, err - } - if _, err := f.Read(b); err != nil { - return nil, err - } - data = append(b, data...) - } - cnt += bytes.Count(b, eol) - if cnt > n { - break - } - block-- - } - lines := bytes.Split(data, eol) - if n < len(lines) { - return lines[len(lines)-n-1 : len(lines)-1], nil - } - return lines[:len(lines)-1], nil -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go deleted file mode 100644 index bc7d84df4..000000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go +++ /dev/null @@ -1,21 +0,0 @@ -package tarsum // import "github.com/docker/docker/pkg/tarsum" - -// BuilderContext is an interface extending TarSum by adding the Remove method. -// In general there was concern about adding this method to TarSum itself -// so instead it is being added just to "BuilderContext" which will then -// only be used during the .dockerignore file processing -// - see builder/evaluator.go -type BuilderContext interface { - TarSum - Remove(string) -} - -func (bc *tarSum) Remove(filename string) { - for i, fis := range bc.sums { - if fis.Name() == filename { - bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) - // Note, we don't just return because there could be - // more than one with this name - } - } -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go deleted file mode 100644 index 01d4ed59b..000000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go +++ /dev/null @@ -1,133 +0,0 @@ -package tarsum // import "github.com/docker/docker/pkg/tarsum" - -import ( - "runtime" - "sort" - "strings" -) - -// FileInfoSumInterface provides an interface for accessing file checksum -// information within a tar file. This info is accessed through interface -// so the actual name and sum cannot be melded with. -type FileInfoSumInterface interface { - // File name - Name() string - // Checksum of this particular file and its headers - Sum() string - // Position of file in the tar - Pos() int64 -} - -type fileInfoSum struct { - name string - sum string - pos int64 -} - -func (fis fileInfoSum) Name() string { - return fis.name -} -func (fis fileInfoSum) Sum() string { - return fis.sum -} -func (fis fileInfoSum) Pos() int64 { - return fis.pos -} - -// FileInfoSums provides a list of FileInfoSumInterfaces. -type FileInfoSums []FileInfoSumInterface - -// GetFile returns the first FileInfoSumInterface with a matching name. -func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { - // We do case insensitive matching on Windows as c:\APP and c:\app are - // the same. See issue #33107. - for i := range fis { - if (runtime.GOOS == "windows" && strings.EqualFold(fis[i].Name(), name)) || - (runtime.GOOS != "windows" && fis[i].Name() == name) { - return fis[i] - } - } - return nil -} - -// GetAllFile returns a FileInfoSums with all matching names. -func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { - f := FileInfoSums{} - for i := range fis { - if fis[i].Name() == name { - f = append(f, fis[i]) - } - } - return f -} - -// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. -func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { - seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. - for i := range fis { - f := fis[i] - if _, ok := seen[f.Name()]; ok { - dups = append(dups, f) - } else { - seen[f.Name()] = 0 - } - } - return dups -} - -// Len returns the size of the FileInfoSums. -func (fis FileInfoSums) Len() int { return len(fis) } - -// Swap swaps two FileInfoSum values if a FileInfoSums list. -func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } - -// SortByPos sorts FileInfoSums content by position. -func (fis FileInfoSums) SortByPos() { - sort.Sort(byPos{fis}) -} - -// SortByNames sorts FileInfoSums content by name. -func (fis FileInfoSums) SortByNames() { - sort.Sort(byName{fis}) -} - -// SortBySums sorts FileInfoSums content by sums. -func (fis FileInfoSums) SortBySums() { - dups := fis.GetDuplicatePaths() - if len(dups) > 0 { - sort.Sort(bySum{fis, dups}) - } else { - sort.Sort(bySum{fis, nil}) - } -} - -// byName is a sort.Sort helper for sorting by file names. -// If names are the same, order them by their appearance in the tar archive -type byName struct{ FileInfoSums } - -func (bn byName) Less(i, j int) bool { - if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { - return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() - } - return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() -} - -// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive -type bySum struct { - FileInfoSums - dups FileInfoSums -} - -func (bs bySum) Less(i, j int) bool { - if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { - return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() - } - return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() -} - -// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order -type byPos struct{ FileInfoSums } - -func (bp byPos) Less(i, j int) bool { - return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go deleted file mode 100644 index 5542e1b2c..000000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go +++ /dev/null @@ -1,301 +0,0 @@ -// Package tarsum provides algorithms to perform checksum calculation on -// filesystem layers. -// -// The transportation of filesystems, regarding Docker, is done with tar(1) -// archives. There are a variety of tar serialization formats [2], and a key -// concern here is ensuring a repeatable checksum given a set of inputs from a -// generic tar archive. Types of transportation include distribution to and from a -// registry endpoint, saving and loading through commands or Docker daemon APIs, -// transferring the build context from client to Docker daemon, and committing the -// filesystem of a container to become an image. -// -// As tar archives are used for transit, but not preserved in many situations, the -// focus of the algorithm is to ensure the integrity of the preserved filesystem, -// while maintaining a deterministic accountability. This includes neither -// constraining the ordering or manipulation of the files during the creation or -// unpacking of the archive, nor include additional metadata state about the file -// system attributes. -package tarsum // import "github.com/docker/docker/pkg/tarsum" - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "crypto" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "hash" - "io" - "path" - "strings" -) - -const ( - buf8K = 8 * 1024 - buf16K = 16 * 1024 - buf32K = 32 * 1024 -) - -// NewTarSum creates a new interface for calculating a fixed time checksum of a -// tar archive. -// -// This is used for calculating checksums of layers of an image, in some cases -// including the byte payload of the image's json metadata as well, and for -// calculating the checksums for buildcache. -func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { - return NewTarSumHash(r, dc, v, DefaultTHash) -} - -// NewTarSumHash creates a new TarSum, providing a THash to use rather than -// the DefaultTHash. -func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { - headerSelector, err := getTarHeaderSelector(v) - if err != nil { - return nil, err - } - ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} - err = ts.initTarSum() - return ts, err -} - -// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. -func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { - parts := strings.SplitN(label, "+", 2) - if len(parts) != 2 { - return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") - } - - versionName, hashName := parts[0], parts[1] - - version, ok := tarSumVersionsByName[versionName] - if !ok { - return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) - } - - hashConfig, ok := standardHashConfigs[hashName] - if !ok { - return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) - } - - tHash := NewTHash(hashConfig.name, hashConfig.hash.New) - - return NewTarSumHash(r, disableCompression, version, tHash) -} - -// TarSum is the generic interface for calculating fixed time -// checksums of a tar archive. -type TarSum interface { - io.Reader - GetSums() FileInfoSums - Sum([]byte) string - Version() Version - Hash() THash -} - -// tarSum struct is the structure for a Version0 checksum calculation. -type tarSum struct { - io.Reader - tarR *tar.Reader - tarW *tar.Writer - writer writeCloseFlusher - bufTar *bytes.Buffer - bufWriter *bytes.Buffer - bufData []byte - h hash.Hash - tHash THash - sums FileInfoSums - fileCounter int64 - currentFile string - finished bool - first bool - DisableCompression bool // false by default. When false, the output gzip compressed. - tarSumVersion Version // this field is not exported so it can not be mutated during use - headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive -} - -func (ts tarSum) Hash() THash { - return ts.tHash -} - -func (ts tarSum) Version() Version { - return ts.tarSumVersion -} - -// THash provides a hash.Hash type generator and its name. -type THash interface { - Hash() hash.Hash - Name() string -} - -// NewTHash is a convenience method for creating a THash. -func NewTHash(name string, h func() hash.Hash) THash { - return simpleTHash{n: name, h: h} -} - -type tHashConfig struct { - name string - hash crypto.Hash -} - -var ( - // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. - standardHashConfigs = map[string]tHashConfig{ - "sha256": {name: "sha256", hash: crypto.SHA256}, - "sha512": {name: "sha512", hash: crypto.SHA512}, - } -) - -// DefaultTHash is default TarSum hashing algorithm - "sha256". -var DefaultTHash = NewTHash("sha256", sha256.New) - -type simpleTHash struct { - n string - h func() hash.Hash -} - -func (sth simpleTHash) Name() string { return sth.n } -func (sth simpleTHash) Hash() hash.Hash { return sth.h() } - -func (ts *tarSum) encodeHeader(h *tar.Header) error { - for _, elem := range ts.headerSelector.selectHeaders(h) { - // Ignore these headers to be compatible with versions - // before go 1.10 - if elem[0] == "gname" || elem[0] == "uname" { - elem[1] = "" - } - if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { - return err - } - } - return nil -} - -func (ts *tarSum) initTarSum() error { - ts.bufTar = bytes.NewBuffer([]byte{}) - ts.bufWriter = bytes.NewBuffer([]byte{}) - ts.tarR = tar.NewReader(ts.Reader) - ts.tarW = tar.NewWriter(ts.bufTar) - if !ts.DisableCompression { - ts.writer = gzip.NewWriter(ts.bufWriter) - } else { - ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} - } - if ts.tHash == nil { - ts.tHash = DefaultTHash - } - ts.h = ts.tHash.Hash() - ts.h.Reset() - ts.first = true - ts.sums = FileInfoSums{} - return nil -} - -func (ts *tarSum) Read(buf []byte) (int, error) { - if ts.finished { - return ts.bufWriter.Read(buf) - } - if len(ts.bufData) < len(buf) { - switch { - case len(buf) <= buf8K: - ts.bufData = make([]byte, buf8K) - case len(buf) <= buf16K: - ts.bufData = make([]byte, buf16K) - case len(buf) <= buf32K: - ts.bufData = make([]byte, buf32K) - default: - ts.bufData = make([]byte, len(buf)) - } - } - buf2 := ts.bufData[:len(buf)] - - n, err := ts.tarR.Read(buf2) - if err != nil { - if err == io.EOF { - if _, err := ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - if !ts.first { - ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) - ts.fileCounter++ - ts.h.Reset() - } else { - ts.first = false - } - - if _, err := ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - - currentHeader, err := ts.tarR.Next() - if err != nil { - if err == io.EOF { - if err := ts.tarW.Close(); err != nil { - return 0, err - } - if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - if err := ts.writer.Close(); err != nil { - return 0, err - } - ts.finished = true - return ts.bufWriter.Read(buf) - } - return 0, err - } - - ts.currentFile = path.Join(".", path.Join("/", currentHeader.Name)) - if err := ts.encodeHeader(currentHeader); err != nil { - return 0, err - } - if err := ts.tarW.WriteHeader(currentHeader); err != nil { - return 0, err - } - - if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - ts.writer.Flush() - - return ts.bufWriter.Read(buf) - } - return 0, err - } - - // Filling the hash buffer - if _, err = ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - - // Filling the tar writer - if _, err = ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - - // Filling the output writer - if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - ts.writer.Flush() - - return ts.bufWriter.Read(buf) -} - -func (ts *tarSum) Sum(extra []byte) string { - ts.sums.SortBySums() - h := ts.tHash.Hash() - if extra != nil { - h.Write(extra) - } - for _, fis := range ts.sums { - h.Write([]byte(fis.Sum())) - } - checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) - return checksum -} - -func (ts *tarSum) GetSums() FileInfoSums { - return ts.sums -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go deleted file mode 100644 index aa1f17186..000000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go +++ /dev/null @@ -1,158 +0,0 @@ -package tarsum // import "github.com/docker/docker/pkg/tarsum" - -import ( - "archive/tar" - "errors" - "io" - "sort" - "strconv" - "strings" -) - -// Version is used for versioning of the TarSum algorithm -// based on the prefix of the hash used -// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" -type Version int - -// Prefix of "tarsum" -const ( - Version0 Version = iota - Version1 - // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation - VersionDev -) - -// WriteV1Header writes a tar header to a writer in V1 tarsum format. -func WriteV1Header(h *tar.Header, w io.Writer) { - for _, elem := range v1TarHeaderSelect(h) { - w.Write([]byte(elem[0] + elem[1])) - } -} - -// VersionLabelForChecksum returns the label for the given tarsum -// checksum, i.e., everything before the first `+` character in -// the string or an empty string if no label separator is found. -func VersionLabelForChecksum(checksum string) string { - // Checksums are in the form: {versionLabel}+{hashID}:{hex} - sepIndex := strings.Index(checksum, "+") - if sepIndex < 0 { - return "" - } - return checksum[:sepIndex] -} - -// GetVersions gets a list of all known tarsum versions. -func GetVersions() []Version { - v := []Version{} - for k := range tarSumVersions { - v = append(v, k) - } - return v -} - -var ( - tarSumVersions = map[Version]string{ - Version0: "tarsum", - Version1: "tarsum.v1", - VersionDev: "tarsum.dev", - } - tarSumVersionsByName = map[string]Version{ - "tarsum": Version0, - "tarsum.v1": Version1, - "tarsum.dev": VersionDev, - } -) - -func (tsv Version) String() string { - return tarSumVersions[tsv] -} - -// GetVersionFromTarsum returns the Version from the provided string. -func GetVersionFromTarsum(tarsum string) (Version, error) { - tsv := tarsum - if strings.Contains(tarsum, "+") { - tsv = strings.SplitN(tarsum, "+", 2)[0] - } - for v, s := range tarSumVersions { - if s == tsv { - return v, nil - } - } - return -1, ErrNotVersion -} - -// Errors that may be returned by functions in this package -var ( - ErrNotVersion = errors.New("string does not include a TarSum Version") - ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") -) - -// tarHeaderSelector is the interface which different versions -// of tarsum should use for selecting and ordering tar headers -// for each item in the archive. -type tarHeaderSelector interface { - selectHeaders(h *tar.Header) (orderedHeaders [][2]string) -} - -type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) - -func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { - return f(h) -} - -func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - return [][2]string{ - {"name", h.Name}, - {"mode", strconv.FormatInt(h.Mode, 10)}, - {"uid", strconv.Itoa(h.Uid)}, - {"gid", strconv.Itoa(h.Gid)}, - {"size", strconv.FormatInt(h.Size, 10)}, - {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, - {"typeflag", string([]byte{h.Typeflag})}, - {"linkname", h.Linkname}, - {"uname", h.Uname}, - {"gname", h.Gname}, - {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, - {"devminor", strconv.FormatInt(h.Devminor, 10)}, - } -} - -func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - // Get extended attributes. - xAttrKeys := make([]string, len(h.Xattrs)) - for k := range h.Xattrs { - xAttrKeys = append(xAttrKeys, k) - } - sort.Strings(xAttrKeys) - - // Make the slice with enough capacity to hold the 11 basic headers - // we want from the v0 selector plus however many xattrs we have. - orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) - - // Copy all headers from v0 excluding the 'mtime' header (the 5th element). - v0headers := v0TarHeaderSelect(h) - orderedHeaders = append(orderedHeaders, v0headers[0:5]...) - orderedHeaders = append(orderedHeaders, v0headers[6:]...) - - // Finally, append the sorted xattrs. - for _, k := range xAttrKeys { - orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) - } - - return -} - -var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ - Version0: v0TarHeaderSelect, - Version1: v1TarHeaderSelect, - VersionDev: v1TarHeaderSelect, -} - -func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { - headerSelector, ok := registeredHeaderSelectors[v] - if !ok { - return nil, ErrVersionNotImplemented - } - - return headerSelector, nil -} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go deleted file mode 100644 index c4c45a35e..000000000 --- a/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go +++ /dev/null @@ -1,22 +0,0 @@ -package tarsum // import "github.com/docker/docker/pkg/tarsum" - -import ( - "io" -) - -type writeCloseFlusher interface { - io.WriteCloser - Flush() error -} - -type nopCloseFlusher struct { - io.Writer -} - -func (n *nopCloseFlusher) Close() error { - return nil -} - -func (n *nopCloseFlusher) Flush() error { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go b/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go deleted file mode 100644 index d5c840cf1..000000000 --- a/vendor/github.com/docker/docker/pkg/truncindex/truncindex.go +++ /dev/null @@ -1,139 +0,0 @@ -// Package truncindex provides a general 'index tree', used by Docker -// in order to be able to reference containers by only a few unambiguous -// characters of their id. -package truncindex // import "github.com/docker/docker/pkg/truncindex" - -import ( - "errors" - "fmt" - "strings" - "sync" - - "github.com/tchap/go-patricia/patricia" -) - -var ( - // ErrEmptyPrefix is an error returned if the prefix was empty. - ErrEmptyPrefix = errors.New("Prefix can't be empty") - - // ErrIllegalChar is returned when a space is in the ID - ErrIllegalChar = errors.New("illegal character: ' '") - - // ErrNotExist is returned when ID or its prefix not found in index. - ErrNotExist = errors.New("ID does not exist") -) - -// ErrAmbiguousPrefix is returned if the prefix was ambiguous -// (multiple ids for the prefix). -type ErrAmbiguousPrefix struct { - prefix string -} - -func (e ErrAmbiguousPrefix) Error() string { - return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) -} - -// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. -// This is used to retrieve image and container IDs by more convenient shorthand prefixes. -type TruncIndex struct { - sync.RWMutex - trie *patricia.Trie - ids map[string]struct{} -} - -// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. -func NewTruncIndex(ids []string) (idx *TruncIndex) { - idx = &TruncIndex{ - ids: make(map[string]struct{}), - - // Change patricia max prefix per node length, - // because our len(ID) always 64 - trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), - } - for _, id := range ids { - idx.addID(id) - } - return -} - -func (idx *TruncIndex) addID(id string) error { - if strings.Contains(id, " ") { - return ErrIllegalChar - } - if id == "" { - return ErrEmptyPrefix - } - if _, exists := idx.ids[id]; exists { - return fmt.Errorf("id already exists: '%s'", id) - } - idx.ids[id] = struct{}{} - if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { - return fmt.Errorf("failed to insert id: %s", id) - } - return nil -} - -// Add adds a new ID to the TruncIndex. -func (idx *TruncIndex) Add(id string) error { - idx.Lock() - defer idx.Unlock() - return idx.addID(id) -} - -// Delete removes an ID from the TruncIndex. If there are multiple IDs -// with the given prefix, an error is thrown. -func (idx *TruncIndex) Delete(id string) error { - idx.Lock() - defer idx.Unlock() - if _, exists := idx.ids[id]; !exists || id == "" { - return fmt.Errorf("no such id: '%s'", id) - } - delete(idx.ids, id) - if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { - return fmt.Errorf("no such id: '%s'", id) - } - return nil -} - -// Get retrieves an ID from the TruncIndex. If there are multiple IDs -// with the given prefix, an error is thrown. -func (idx *TruncIndex) Get(s string) (string, error) { - if s == "" { - return "", ErrEmptyPrefix - } - var ( - id string - ) - subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { - if id != "" { - // we haven't found the ID if there are two or more IDs - id = "" - return ErrAmbiguousPrefix{prefix: string(prefix)} - } - id = string(prefix) - return nil - } - - idx.RLock() - defer idx.RUnlock() - if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { - return "", err - } - if id != "" { - return id, nil - } - return "", ErrNotExist -} - -// Iterate iterates over all stored IDs and passes each of them to the given -// handler. Take care that the handler method does not call any public -// method on truncindex as the internal locking is not reentrant/recursive -// and will result in deadlock. -func (idx *TruncIndex) Iterate(handler func(id string)) { - idx.Lock() - defer idx.Unlock() - idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { - handler(string(prefix)) - return nil - }) -} diff --git a/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go deleted file mode 100644 index 9cf348c72..000000000 --- a/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go +++ /dev/null @@ -1,52 +0,0 @@ -// Package urlutil provides helper function to check urls kind. -// It supports http urls, git urls and transport url (tcp://, …) -package urlutil // import "github.com/docker/docker/pkg/urlutil" - -import ( - "regexp" - "strings" -) - -var ( - validPrefixes = map[string][]string{ - "url": {"http://", "https://"}, - - // The github.com/ prefix is a special case used to treat context-paths - // starting with `github.com` as a git URL if the given path does not - // exist locally. The "github.com/" prefix is kept for backward compatibility, - // and is a legacy feature. - // - // Going forward, no additional prefixes should be added, and users should - // be encouraged to use explicit URLs (https://github.com/user/repo.git) instead. - "git": {"git://", "github.com/", "git@"}, - "transport": {"tcp://", "tcp+tls://", "udp://", "unix://", "unixgram://"}, - } - urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") -) - -// IsURL returns true if the provided str is an HTTP(S) URL. -func IsURL(str string) bool { - return checkURL(str, "url") -} - -// IsGitURL returns true if the provided str is a git repository URL. -func IsGitURL(str string) bool { - if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { - return true - } - return checkURL(str, "git") -} - -// IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL. -func IsTransportURL(str string) bool { - return checkURL(str, "transport") -} - -func checkURL(str, kind string) bool { - for _, prefix := range validPrefixes[kind] { - if strings.HasPrefix(str, prefix) { - return true - } - } - return false -} diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent.go b/vendor/github.com/docker/docker/pkg/useragent/useragent.go deleted file mode 100644 index 22db82129..000000000 --- a/vendor/github.com/docker/docker/pkg/useragent/useragent.go +++ /dev/null @@ -1,55 +0,0 @@ -// Package useragent provides helper functions to pack -// version information into a single User-Agent header. -package useragent // import "github.com/docker/docker/pkg/useragent" - -import ( - "strings" -) - -// VersionInfo is used to model UserAgent versions. -type VersionInfo struct { - Name string - Version string -} - -func (vi *VersionInfo) isValid() bool { - const stopChars = " \t\r\n/" - name := vi.Name - vers := vi.Version - if len(name) == 0 || strings.ContainsAny(name, stopChars) { - return false - } - if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { - return false - } - return true -} - -// AppendVersions converts versions to a string and appends the string to the string base. -// -// Each VersionInfo will be converted to a string in the format of -// "product/version", where the "product" is get from the name field, while -// version is get from the version field. Several pieces of version information -// will be concatenated and separated by space. -// -// Example: -// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) -// results in "base foo/1.0 bar/2.0". -func AppendVersions(base string, versions ...VersionInfo) string { - if len(versions) == 0 { - return base - } - - verstrs := make([]string, 0, 1+len(versions)) - if len(base) > 0 { - verstrs = append(verstrs, base) - } - - for _, v := range versions { - if !v.isValid() { - continue - } - verstrs = append(verstrs, v.Name+"/"+v.Version) - } - return strings.Join(verstrs, " ") -} diff --git a/vendor/github.com/docker/docker/plugin/backend_linux.go b/vendor/github.com/docker/docker/plugin/backend_linux.go deleted file mode 100644 index 044e14b0c..000000000 --- a/vendor/github.com/docker/docker/plugin/backend_linux.go +++ /dev/null @@ -1,876 +0,0 @@ -package plugin // import "github.com/docker/docker/plugin" - -import ( - "archive/tar" - "compress/gzip" - "context" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "os" - "path" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/distribution" - progressutils "github.com/docker/docker/distribution/utils" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/authorization" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/plugin/v2" - refstore "github.com/docker/docker/reference" - "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var acceptedPluginFilterTags = map[string]bool{ - "enabled": true, - "capability": true, -} - -// Disable deactivates a plugin. This means resources (volumes, networks) cant use them. -func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error { - p, err := pm.config.Store.GetV2Plugin(refOrID) - if err != nil { - return err - } - pm.mu.RLock() - c := pm.cMap[p] - pm.mu.RUnlock() - - if !config.ForceDisable && p.GetRefCount() > 0 { - return errors.WithStack(inUseError(p.Name())) - } - - for _, typ := range p.GetTypes() { - if typ.Capability == authorization.AuthZApiImplements { - pm.config.AuthzMiddleware.RemovePlugin(p.Name()) - } - } - - if err := pm.disable(p, c); err != nil { - return err - } - pm.publisher.Publish(EventDisable{Plugin: p.PluginObj}) - pm.config.LogPluginEvent(p.GetID(), refOrID, "disable") - return nil -} - -// Enable activates a plugin, which implies that they are ready to be used by containers. -func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error { - p, err := pm.config.Store.GetV2Plugin(refOrID) - if err != nil { - return err - } - - c := &controller{timeoutInSecs: config.Timeout} - if err := pm.enable(p, c, false); err != nil { - return err - } - pm.publisher.Publish(EventEnable{Plugin: p.PluginObj}) - pm.config.LogPluginEvent(p.GetID(), refOrID, "enable") - return nil -} - -// Inspect examines a plugin config -func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) { - p, err := pm.config.Store.GetV2Plugin(refOrID) - if err != nil { - return nil, err - } - - return &p.PluginObj, nil -} - -func (pm *Manager) pull(ctx context.Context, ref reference.Named, config *distribution.ImagePullConfig, outStream io.Writer) error { - if outStream != nil { - // Include a buffer so that slow client connections don't affect - // transfer performance. - progressChan := make(chan progress.Progress, 100) - - writesDone := make(chan struct{}) - - defer func() { - close(progressChan) - <-writesDone - }() - - var cancelFunc context.CancelFunc - ctx, cancelFunc = context.WithCancel(ctx) - - go func() { - progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) - close(writesDone) - }() - - config.ProgressOutput = progress.ChanOutput(progressChan) - } else { - config.ProgressOutput = progress.DiscardOutput() - } - return distribution.Pull(ctx, ref, config) -} - -type tempConfigStore struct { - config []byte - configDigest digest.Digest -} - -func (s *tempConfigStore) Put(c []byte) (digest.Digest, error) { - dgst := digest.FromBytes(c) - - s.config = c - s.configDigest = dgst - - return dgst, nil -} - -func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) { - if d != s.configDigest { - return nil, errNotFound("digest not found") - } - return s.config, nil -} - -func (s *tempConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { - return configToRootFS(c) -} - -func (s *tempConfigStore) PlatformFromConfig(c []byte) (*specs.Platform, error) { - // TODO: LCOW/Plugins. This will need revisiting. For now use the runtime OS - return &specs.Platform{OS: runtime.GOOS}, nil -} - -func computePrivileges(c types.PluginConfig) types.PluginPrivileges { - var privileges types.PluginPrivileges - if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" { - privileges = append(privileges, types.PluginPrivilege{ - Name: "network", - Description: "permissions to access a network", - Value: []string{c.Network.Type}, - }) - } - if c.IpcHost { - privileges = append(privileges, types.PluginPrivilege{ - Name: "host ipc namespace", - Description: "allow access to host ipc namespace", - Value: []string{"true"}, - }) - } - if c.PidHost { - privileges = append(privileges, types.PluginPrivilege{ - Name: "host pid namespace", - Description: "allow access to host pid namespace", - Value: []string{"true"}, - }) - } - for _, mount := range c.Mounts { - if mount.Source != nil { - privileges = append(privileges, types.PluginPrivilege{ - Name: "mount", - Description: "host path to mount", - Value: []string{*mount.Source}, - }) - } - } - for _, device := range c.Linux.Devices { - if device.Path != nil { - privileges = append(privileges, types.PluginPrivilege{ - Name: "device", - Description: "host device to access", - Value: []string{*device.Path}, - }) - } - } - if c.Linux.AllowAllDevices { - privileges = append(privileges, types.PluginPrivilege{ - Name: "allow-all-devices", - Description: "allow 'rwm' access to all devices", - Value: []string{"true"}, - }) - } - if len(c.Linux.Capabilities) > 0 { - privileges = append(privileges, types.PluginPrivilege{ - Name: "capabilities", - Description: "list of additional capabilities required", - Value: c.Linux.Capabilities, - }) - } - - return privileges -} - -// Privileges pulls a plugin config and computes the privileges required to install it. -func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { - // create image store instance - cs := &tempConfigStore{} - - // DownloadManager not defined because only pulling configuration. - pluginPullConfig := &distribution.ImagePullConfig{ - Config: distribution.Config{ - MetaHeaders: metaHeader, - AuthConfig: authConfig, - RegistryService: pm.config.RegistryService, - ImageEventLogger: func(string, string, string) {}, - ImageStore: cs, - }, - Schema2Types: distribution.PluginTypes, - } - - if err := pm.pull(ctx, ref, pluginPullConfig, nil); err != nil { - return nil, err - } - - if cs.config == nil { - return nil, errors.New("no configuration pulled") - } - var config types.PluginConfig - if err := json.Unmarshal(cs.config, &config); err != nil { - return nil, errdefs.System(err) - } - - return computePrivileges(config), nil -} - -// Upgrade upgrades a plugin -func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) { - p, err := pm.config.Store.GetV2Plugin(name) - if err != nil { - return err - } - - if p.IsEnabled() { - return errors.Wrap(enabledError(p.Name()), "plugin must be disabled before upgrading") - } - - pm.muGC.RLock() - defer pm.muGC.RUnlock() - - // revalidate because Pull is public - if _, err := reference.ParseNormalizedNamed(name); err != nil { - return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse %q", name) - } - - tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") - if err != nil { - return errors.Wrap(errdefs.System(err), "error preparing upgrade") - } - defer os.RemoveAll(tmpRootFSDir) - - dm := &downloadManager{ - tmpDir: tmpRootFSDir, - blobStore: pm.blobStore, - } - - pluginPullConfig := &distribution.ImagePullConfig{ - Config: distribution.Config{ - MetaHeaders: metaHeader, - AuthConfig: authConfig, - RegistryService: pm.config.RegistryService, - ImageEventLogger: pm.config.LogPluginEvent, - ImageStore: dm, - }, - DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead - Schema2Types: distribution.PluginTypes, - } - - err = pm.pull(ctx, ref, pluginPullConfig, outStream) - if err != nil { - go pm.GC() - return err - } - - if err := pm.upgradePlugin(p, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges); err != nil { - return err - } - p.PluginObj.PluginReference = ref.String() - return nil -} - -// Pull pulls a plugin, check if the correct privileges are provided and install the plugin. -func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer, opts ...CreateOpt) (err error) { - pm.muGC.RLock() - defer pm.muGC.RUnlock() - - // revalidate because Pull is public - nameref, err := reference.ParseNormalizedNamed(name) - if err != nil { - return errors.Wrapf(errdefs.InvalidParameter(err), "failed to parse %q", name) - } - name = reference.FamiliarString(reference.TagNameOnly(nameref)) - - if err := pm.config.Store.validateName(name); err != nil { - return errdefs.InvalidParameter(err) - } - - tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") - if err != nil { - return errors.Wrap(errdefs.System(err), "error preparing pull") - } - defer os.RemoveAll(tmpRootFSDir) - - dm := &downloadManager{ - tmpDir: tmpRootFSDir, - blobStore: pm.blobStore, - } - - pluginPullConfig := &distribution.ImagePullConfig{ - Config: distribution.Config{ - MetaHeaders: metaHeader, - AuthConfig: authConfig, - RegistryService: pm.config.RegistryService, - ImageEventLogger: pm.config.LogPluginEvent, - ImageStore: dm, - }, - DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead - Schema2Types: distribution.PluginTypes, - } - - err = pm.pull(ctx, ref, pluginPullConfig, outStream) - if err != nil { - go pm.GC() - return err - } - - refOpt := func(p *v2.Plugin) { - p.PluginObj.PluginReference = ref.String() - } - optsList := make([]CreateOpt, 0, len(opts)+1) - optsList = append(optsList, opts...) - optsList = append(optsList, refOpt) - - p, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges, optsList...) - if err != nil { - return err - } - - pm.publisher.Publish(EventCreate{Plugin: p.PluginObj}) - return nil -} - -// List displays the list of plugins and associated metadata. -func (pm *Manager) List(pluginFilters filters.Args) ([]types.Plugin, error) { - if err := pluginFilters.Validate(acceptedPluginFilterTags); err != nil { - return nil, err - } - - enabledOnly := false - disabledOnly := false - if pluginFilters.Contains("enabled") { - if pluginFilters.ExactMatch("enabled", "true") { - enabledOnly = true - } else if pluginFilters.ExactMatch("enabled", "false") { - disabledOnly = true - } else { - return nil, invalidFilter{"enabled", pluginFilters.Get("enabled")} - } - } - - plugins := pm.config.Store.GetAll() - out := make([]types.Plugin, 0, len(plugins)) - -next: - for _, p := range plugins { - if enabledOnly && !p.PluginObj.Enabled { - continue - } - if disabledOnly && p.PluginObj.Enabled { - continue - } - if pluginFilters.Contains("capability") { - for _, f := range p.GetTypes() { - if !pluginFilters.Match("capability", f.Capability) { - continue next - } - } - } - out = append(out, p.PluginObj) - } - return out, nil -} - -// Push pushes a plugin to the store. -func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error { - p, err := pm.config.Store.GetV2Plugin(name) - if err != nil { - return err - } - - ref, err := reference.ParseNormalizedNamed(p.Name()) - if err != nil { - return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name()) - } - - var po progress.Output - if outStream != nil { - // Include a buffer so that slow client connections don't affect - // transfer performance. - progressChan := make(chan progress.Progress, 100) - - writesDone := make(chan struct{}) - - defer func() { - close(progressChan) - <-writesDone - }() - - var cancelFunc context.CancelFunc - ctx, cancelFunc = context.WithCancel(ctx) - - go func() { - progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan) - close(writesDone) - }() - - po = progress.ChanOutput(progressChan) - } else { - po = progress.DiscardOutput() - } - - // TODO: replace these with manager - is := &pluginConfigStore{ - pm: pm, - plugin: p, - } - lss := make(map[string]distribution.PushLayerProvider) - lss[runtime.GOOS] = &pluginLayerProvider{ - pm: pm, - plugin: p, - } - rs := &pluginReference{ - name: ref, - pluginID: p.Config, - } - - uploadManager := xfer.NewLayerUploadManager(3) - - imagePushConfig := &distribution.ImagePushConfig{ - Config: distribution.Config{ - MetaHeaders: metaHeader, - AuthConfig: authConfig, - ProgressOutput: po, - RegistryService: pm.config.RegistryService, - ReferenceStore: rs, - ImageEventLogger: pm.config.LogPluginEvent, - ImageStore: is, - RequireSchema2: true, - }, - ConfigMediaType: schema2.MediaTypePluginConfig, - LayerStores: lss, - UploadManager: uploadManager, - } - - return distribution.Push(ctx, ref, imagePushConfig) -} - -type pluginReference struct { - name reference.Named - pluginID digest.Digest -} - -func (r *pluginReference) References(id digest.Digest) []reference.Named { - if r.pluginID != id { - return nil - } - return []reference.Named{r.name} -} - -func (r *pluginReference) ReferencesByName(ref reference.Named) []refstore.Association { - return []refstore.Association{ - { - Ref: r.name, - ID: r.pluginID, - }, - } -} - -func (r *pluginReference) Get(ref reference.Named) (digest.Digest, error) { - if r.name.String() != ref.String() { - return digest.Digest(""), refstore.ErrDoesNotExist - } - return r.pluginID, nil -} - -func (r *pluginReference) AddTag(ref reference.Named, id digest.Digest, force bool) error { - // Read only, ignore - return nil -} -func (r *pluginReference) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { - // Read only, ignore - return nil -} -func (r *pluginReference) Delete(ref reference.Named) (bool, error) { - // Read only, ignore - return false, nil -} - -type pluginConfigStore struct { - pm *Manager - plugin *v2.Plugin -} - -func (s *pluginConfigStore) Put([]byte) (digest.Digest, error) { - return digest.Digest(""), errors.New("cannot store config on push") -} - -func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) { - if s.plugin.Config != d { - return nil, errors.New("plugin not found") - } - rwc, err := s.pm.blobStore.Get(d) - if err != nil { - return nil, err - } - defer rwc.Close() - return ioutil.ReadAll(rwc) -} - -func (s *pluginConfigStore) RootFSFromConfig(c []byte) (*image.RootFS, error) { - return configToRootFS(c) -} - -func (s *pluginConfigStore) PlatformFromConfig(c []byte) (*specs.Platform, error) { - // TODO: LCOW/Plugins. This will need revisiting. For now use the runtime OS - return &specs.Platform{OS: runtime.GOOS}, nil -} - -type pluginLayerProvider struct { - pm *Manager - plugin *v2.Plugin -} - -func (p *pluginLayerProvider) Get(id layer.ChainID) (distribution.PushLayer, error) { - rootFS := rootFSFromPlugin(p.plugin.PluginObj.Config.Rootfs) - var i int - for i = 1; i <= len(rootFS.DiffIDs); i++ { - if layer.CreateChainID(rootFS.DiffIDs[:i]) == id { - break - } - } - if i > len(rootFS.DiffIDs) { - return nil, errors.New("layer not found") - } - return &pluginLayer{ - pm: p.pm, - diffIDs: rootFS.DiffIDs[:i], - blobs: p.plugin.Blobsums[:i], - }, nil -} - -type pluginLayer struct { - pm *Manager - diffIDs []layer.DiffID - blobs []digest.Digest -} - -func (l *pluginLayer) ChainID() layer.ChainID { - return layer.CreateChainID(l.diffIDs) -} - -func (l *pluginLayer) DiffID() layer.DiffID { - return l.diffIDs[len(l.diffIDs)-1] -} - -func (l *pluginLayer) Parent() distribution.PushLayer { - if len(l.diffIDs) == 1 { - return nil - } - return &pluginLayer{ - pm: l.pm, - diffIDs: l.diffIDs[:len(l.diffIDs)-1], - blobs: l.blobs[:len(l.diffIDs)-1], - } -} - -func (l *pluginLayer) Open() (io.ReadCloser, error) { - return l.pm.blobStore.Get(l.blobs[len(l.diffIDs)-1]) -} - -func (l *pluginLayer) Size() (int64, error) { - return l.pm.blobStore.Size(l.blobs[len(l.diffIDs)-1]) -} - -func (l *pluginLayer) MediaType() string { - return schema2.MediaTypeLayer -} - -func (l *pluginLayer) Release() { - // Nothing needs to be release, no references held -} - -// Remove deletes plugin's root directory. -func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { - p, err := pm.config.Store.GetV2Plugin(name) - pm.mu.RLock() - c := pm.cMap[p] - pm.mu.RUnlock() - - if err != nil { - return err - } - - if !config.ForceRemove { - if p.GetRefCount() > 0 { - return inUseError(p.Name()) - } - if p.IsEnabled() { - return enabledError(p.Name()) - } - } - - if p.IsEnabled() { - if err := pm.disable(p, c); err != nil { - logrus.Errorf("failed to disable plugin '%s': %s", p.Name(), err) - } - } - - defer func() { - go pm.GC() - }() - - id := p.GetID() - pluginDir := filepath.Join(pm.config.Root, id) - - if err := mount.RecursiveUnmount(pluginDir); err != nil { - return errors.Wrap(err, "error unmounting plugin data") - } - - if err := atomicRemoveAll(pluginDir); err != nil { - return err - } - - pm.config.Store.Remove(p) - pm.config.LogPluginEvent(id, name, "remove") - pm.publisher.Publish(EventRemove{Plugin: p.PluginObj}) - return nil -} - -// Set sets plugin args -func (pm *Manager) Set(name string, args []string) error { - p, err := pm.config.Store.GetV2Plugin(name) - if err != nil { - return err - } - if err := p.Set(args); err != nil { - return err - } - return pm.save(p) -} - -// CreateFromContext creates a plugin from the given pluginDir which contains -// both the rootfs and the config.json and a repoName with optional tag. -func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) { - pm.muGC.RLock() - defer pm.muGC.RUnlock() - - ref, err := reference.ParseNormalizedNamed(options.RepoName) - if err != nil { - return errors.Wrapf(err, "failed to parse reference %v", options.RepoName) - } - if _, ok := ref.(reference.Canonical); ok { - return errors.Errorf("canonical references are not permitted") - } - name := reference.FamiliarString(reference.TagNameOnly(ref)) - - if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin() - return err - } - - tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs") - if err != nil { - return errors.Wrap(err, "failed to create temp directory") - } - defer os.RemoveAll(tmpRootFSDir) - - var configJSON []byte - rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON) - - rootFSBlob, err := pm.blobStore.New() - if err != nil { - return err - } - defer rootFSBlob.Close() - gzw := gzip.NewWriter(rootFSBlob) - layerDigester := digest.Canonical.Digester() - rootFSReader := io.TeeReader(rootFS, io.MultiWriter(gzw, layerDigester.Hash())) - - if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil { - return err - } - if err := rootFS.Close(); err != nil { - return err - } - - if configJSON == nil { - return errors.New("config not found") - } - - if err := gzw.Close(); err != nil { - return errors.Wrap(err, "error closing gzip writer") - } - - var config types.PluginConfig - if err := json.Unmarshal(configJSON, &config); err != nil { - return errors.Wrap(err, "failed to parse config") - } - - if err := pm.validateConfig(config); err != nil { - return err - } - - pm.mu.Lock() - defer pm.mu.Unlock() - - rootFSBlobsum, err := rootFSBlob.Commit() - if err != nil { - return err - } - defer func() { - if err != nil { - go pm.GC() - } - }() - - config.Rootfs = &types.PluginConfigRootfs{ - Type: "layers", - DiffIds: []string{layerDigester.Digest().String()}, - } - - config.DockerVersion = dockerversion.Version - - configBlob, err := pm.blobStore.New() - if err != nil { - return err - } - defer configBlob.Close() - if err := json.NewEncoder(configBlob).Encode(config); err != nil { - return errors.Wrap(err, "error encoding json config") - } - configBlobsum, err := configBlob.Commit() - if err != nil { - return err - } - - p, err := pm.createPlugin(name, configBlobsum, []digest.Digest{rootFSBlobsum}, tmpRootFSDir, nil) - if err != nil { - return err - } - p.PluginObj.PluginReference = name - - pm.publisher.Publish(EventCreate{Plugin: p.PluginObj}) - pm.config.LogPluginEvent(p.PluginObj.ID, name, "create") - - return nil -} - -func (pm *Manager) validateConfig(config types.PluginConfig) error { - return nil // TODO: -} - -func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser { - pr, pw := io.Pipe() - go func() { - tarReader := tar.NewReader(in) - tarWriter := tar.NewWriter(pw) - defer in.Close() - - hasRootFS := false - - for { - hdr, err := tarReader.Next() - if err == io.EOF { - if !hasRootFS { - pw.CloseWithError(errors.Wrap(err, "no rootfs found")) - return - } - // Signals end of archive. - tarWriter.Close() - pw.Close() - return - } - if err != nil { - pw.CloseWithError(errors.Wrap(err, "failed to read from tar")) - return - } - - content := io.Reader(tarReader) - name := path.Clean(hdr.Name) - if path.IsAbs(name) { - name = name[1:] - } - if name == configFileName { - dt, err := ioutil.ReadAll(content) - if err != nil { - pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName)) - return - } - *config = dt - } - if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName { - hdr.Name = path.Clean(path.Join(parts[1:]...)) - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") { - hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:] - } - if err := tarWriter.WriteHeader(hdr); err != nil { - pw.CloseWithError(errors.Wrap(err, "error writing tar header")) - return - } - if _, err := pools.Copy(tarWriter, content); err != nil { - pw.CloseWithError(errors.Wrap(err, "error copying tar data")) - return - } - hasRootFS = true - } else { - io.Copy(ioutil.Discard, content) - } - } - }() - return pr -} - -func atomicRemoveAll(dir string) error { - renamed := dir + "-removing" - - err := os.Rename(dir, renamed) - switch { - case os.IsNotExist(err), err == nil: - // even if `dir` doesn't exist, we can still try and remove `renamed` - case os.IsExist(err): - // Some previous remove failed, check if the origin dir exists - if e := system.EnsureRemoveAll(renamed); e != nil { - return errors.Wrap(err, "rename target already exists and could not be removed") - } - if _, err := os.Stat(dir); os.IsNotExist(err) { - // origin doesn't exist, nothing left to do - return nil - } - - // attempt to rename again - if err := os.Rename(dir, renamed); err != nil { - return errors.Wrap(err, "failed to rename dir for atomic removal") - } - default: - return errors.Wrap(err, "failed to rename dir for atomic removal") - } - - if err := system.EnsureRemoveAll(renamed); err != nil { - os.Rename(renamed, dir) - return err - } - return nil -} diff --git a/vendor/github.com/docker/docker/plugin/backend_unsupported.go b/vendor/github.com/docker/docker/plugin/backend_unsupported.go deleted file mode 100644 index c0666e858..000000000 --- a/vendor/github.com/docker/docker/plugin/backend_unsupported.go +++ /dev/null @@ -1,72 +0,0 @@ -// +build !linux - -package plugin // import "github.com/docker/docker/plugin" - -import ( - "context" - "errors" - "io" - "net/http" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" -) - -var errNotSupported = errors.New("plugins are not supported on this platform") - -// Disable deactivates a plugin, which implies that they cannot be used by containers. -func (pm *Manager) Disable(name string, config *types.PluginDisableConfig) error { - return errNotSupported -} - -// Enable activates a plugin, which implies that they are ready to be used by containers. -func (pm *Manager) Enable(name string, config *types.PluginEnableConfig) error { - return errNotSupported -} - -// Inspect examines a plugin config -func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) { - return nil, errNotSupported -} - -// Privileges pulls a plugin config and computes the privileges required to install it. -func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { - return nil, errNotSupported -} - -// Pull pulls a plugin, check if the correct privileges are provided and install the plugin. -func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, out io.Writer, opts ...CreateOpt) error { - return errNotSupported -} - -// Upgrade pulls a plugin, check if the correct privileges are provided and install the plugin. -func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) error { - return errNotSupported -} - -// List displays the list of plugins and associated metadata. -func (pm *Manager) List(pluginFilters filters.Args) ([]types.Plugin, error) { - return nil, errNotSupported -} - -// Push pushes a plugin to the store. -func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, out io.Writer) error { - return errNotSupported -} - -// Remove deletes plugin's root directory. -func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error { - return errNotSupported -} - -// Set sets plugin args -func (pm *Manager) Set(name string, args []string) error { - return errNotSupported -} - -// CreateFromContext creates a plugin from the given pluginDir which contains -// both the rootfs and the config.json and a repoName with optional tag. -func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) error { - return errNotSupported -} diff --git a/vendor/github.com/docker/docker/plugin/blobstore.go b/vendor/github.com/docker/docker/plugin/blobstore.go deleted file mode 100644 index a24e7bdf4..000000000 --- a/vendor/github.com/docker/docker/plugin/blobstore.go +++ /dev/null @@ -1,190 +0,0 @@ -package plugin // import "github.com/docker/docker/plugin" - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/progress" - "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type blobstore interface { - New() (WriteCommitCloser, error) - Get(dgst digest.Digest) (io.ReadCloser, error) - Size(dgst digest.Digest) (int64, error) -} - -type basicBlobStore struct { - path string -} - -func newBasicBlobStore(p string) (*basicBlobStore, error) { - tmpdir := filepath.Join(p, "tmp") - if err := os.MkdirAll(tmpdir, 0700); err != nil { - return nil, errors.Wrapf(err, "failed to mkdir %v", p) - } - return &basicBlobStore{path: p}, nil -} - -func (b *basicBlobStore) New() (WriteCommitCloser, error) { - f, err := ioutil.TempFile(filepath.Join(b.path, "tmp"), ".insertion") - if err != nil { - return nil, errors.Wrap(err, "failed to create temp file") - } - return newInsertion(f), nil -} - -func (b *basicBlobStore) Get(dgst digest.Digest) (io.ReadCloser, error) { - return os.Open(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) -} - -func (b *basicBlobStore) Size(dgst digest.Digest) (int64, error) { - stat, err := os.Stat(filepath.Join(b.path, string(dgst.Algorithm()), dgst.Hex())) - if err != nil { - return 0, err - } - return stat.Size(), nil -} - -func (b *basicBlobStore) gc(whitelist map[digest.Digest]struct{}) { - for _, alg := range []string{string(digest.Canonical)} { - items, err := ioutil.ReadDir(filepath.Join(b.path, alg)) - if err != nil { - continue - } - for _, fi := range items { - if _, exists := whitelist[digest.Digest(alg+":"+fi.Name())]; !exists { - p := filepath.Join(b.path, alg, fi.Name()) - err := os.RemoveAll(p) - logrus.Debugf("cleaned up blob %v: %v", p, err) - } - } - } - -} - -// WriteCommitCloser defines object that can be committed to blobstore. -type WriteCommitCloser interface { - io.WriteCloser - Commit() (digest.Digest, error) -} - -type insertion struct { - io.Writer - f *os.File - digester digest.Digester - closed bool -} - -func newInsertion(tempFile *os.File) *insertion { - digester := digest.Canonical.Digester() - return &insertion{f: tempFile, digester: digester, Writer: io.MultiWriter(tempFile, digester.Hash())} -} - -func (i *insertion) Commit() (digest.Digest, error) { - p := i.f.Name() - d := filepath.Join(filepath.Join(p, "../../")) - i.f.Sync() - defer os.RemoveAll(p) - if err := i.f.Close(); err != nil { - return "", err - } - i.closed = true - dgst := i.digester.Digest() - if err := os.MkdirAll(filepath.Join(d, string(dgst.Algorithm())), 0700); err != nil { - return "", errors.Wrapf(err, "failed to mkdir %v", d) - } - if err := os.Rename(p, filepath.Join(d, string(dgst.Algorithm()), dgst.Hex())); err != nil { - return "", errors.Wrapf(err, "failed to rename %v", p) - } - return dgst, nil -} - -func (i *insertion) Close() error { - if i.closed { - return nil - } - defer os.RemoveAll(i.f.Name()) - return i.f.Close() -} - -type downloadManager struct { - blobStore blobstore - tmpDir string - blobs []digest.Digest - configDigest digest.Digest -} - -func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os string, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { - for _, l := range layers { - b, err := dm.blobStore.New() - if err != nil { - return initialRootFS, nil, err - } - defer b.Close() - rc, _, err := l.Download(ctx, progressOutput) - if err != nil { - return initialRootFS, nil, errors.Wrap(err, "failed to download") - } - defer rc.Close() - r := io.TeeReader(rc, b) - inflatedLayerData, err := archive.DecompressStream(r) - if err != nil { - return initialRootFS, nil, err - } - defer inflatedLayerData.Close() - digester := digest.Canonical.Digester() - if _, err := chrootarchive.ApplyLayer(dm.tmpDir, io.TeeReader(inflatedLayerData, digester.Hash())); err != nil { - return initialRootFS, nil, err - } - initialRootFS.Append(layer.DiffID(digester.Digest())) - d, err := b.Commit() - if err != nil { - return initialRootFS, nil, err - } - dm.blobs = append(dm.blobs, d) - } - return initialRootFS, nil, nil -} - -func (dm *downloadManager) Put(dt []byte) (digest.Digest, error) { - b, err := dm.blobStore.New() - if err != nil { - return "", err - } - defer b.Close() - n, err := b.Write(dt) - if err != nil { - return "", err - } - if n != len(dt) { - return "", io.ErrShortWrite - } - d, err := b.Commit() - dm.configDigest = d - return d, err -} - -func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) { - return nil, fmt.Errorf("digest not found") -} -func (dm *downloadManager) RootFSFromConfig(c []byte) (*image.RootFS, error) { - return configToRootFS(c) -} -func (dm *downloadManager) PlatformFromConfig(c []byte) (*specs.Platform, error) { - // TODO: LCOW/Plugins. This will need revisiting. For now use the runtime OS - return &specs.Platform{OS: runtime.GOOS}, nil -} diff --git a/vendor/github.com/docker/docker/plugin/defs.go b/vendor/github.com/docker/docker/plugin/defs.go deleted file mode 100644 index 31f7c6bcc..000000000 --- a/vendor/github.com/docker/docker/plugin/defs.go +++ /dev/null @@ -1,50 +0,0 @@ -package plugin // import "github.com/docker/docker/plugin" - -import ( - "sync" - - "github.com/docker/docker/pkg/plugins" - "github.com/docker/docker/plugin/v2" - "github.com/opencontainers/runtime-spec/specs-go" -) - -// Store manages the plugin inventory in memory and on-disk -type Store struct { - sync.RWMutex - plugins map[string]*v2.Plugin - specOpts map[string][]SpecOpt - /* handlers are necessary for transition path of legacy plugins - * to the new model. Legacy plugins use Handle() for registering an - * activation callback.*/ - handlers map[string][]func(string, *plugins.Client) -} - -// NewStore creates a Store. -func NewStore() *Store { - return &Store{ - plugins: make(map[string]*v2.Plugin), - specOpts: make(map[string][]SpecOpt), - handlers: make(map[string][]func(string, *plugins.Client)), - } -} - -// SpecOpt is used for subsystems that need to modify the runtime spec of a plugin -type SpecOpt func(*specs.Spec) - -// CreateOpt is used to configure specific plugin details when created -type CreateOpt func(p *v2.Plugin) - -// WithSwarmService is a CreateOpt that flags the passed in a plugin as a plugin -// managed by swarm -func WithSwarmService(id string) CreateOpt { - return func(p *v2.Plugin) { - p.SwarmServiceID = id - } -} - -// WithSpecMounts is a SpecOpt which appends the provided mounts to the runtime spec -func WithSpecMounts(mounts []specs.Mount) SpecOpt { - return func(s *specs.Spec) { - s.Mounts = append(s.Mounts, mounts...) - } -} diff --git a/vendor/github.com/docker/docker/plugin/errors.go b/vendor/github.com/docker/docker/plugin/errors.go deleted file mode 100644 index 44d99b39b..000000000 --- a/vendor/github.com/docker/docker/plugin/errors.go +++ /dev/null @@ -1,66 +0,0 @@ -package plugin // import "github.com/docker/docker/plugin" - -import "fmt" - -type errNotFound string - -func (name errNotFound) Error() string { - return fmt.Sprintf("plugin %q not found", string(name)) -} - -func (errNotFound) NotFound() {} - -type errAmbiguous string - -func (name errAmbiguous) Error() string { - return fmt.Sprintf("multiple plugins found for %q", string(name)) -} - -func (name errAmbiguous) InvalidParameter() {} - -type errDisabled string - -func (name errDisabled) Error() string { - return fmt.Sprintf("plugin %s found but disabled", string(name)) -} - -func (name errDisabled) Conflict() {} - -type invalidFilter struct { - filter string - value []string -} - -func (e invalidFilter) Error() string { - msg := "Invalid filter '" + e.filter - if len(e.value) > 0 { - msg += fmt.Sprintf("=%s", e.value) - } - return msg + "'" -} - -func (invalidFilter) InvalidParameter() {} - -type inUseError string - -func (e inUseError) Error() string { - return "plugin " + string(e) + " is in use" -} - -func (inUseError) Conflict() {} - -type enabledError string - -func (e enabledError) Error() string { - return "plugin " + string(e) + " is enabled" -} - -func (enabledError) Conflict() {} - -type alreadyExistsError string - -func (e alreadyExistsError) Error() string { - return "plugin " + string(e) + " already exists" -} - -func (alreadyExistsError) Conflict() {} diff --git a/vendor/github.com/docker/docker/plugin/events.go b/vendor/github.com/docker/docker/plugin/events.go deleted file mode 100644 index d204340aa..000000000 --- a/vendor/github.com/docker/docker/plugin/events.go +++ /dev/null @@ -1,111 +0,0 @@ -package plugin // import "github.com/docker/docker/plugin" - -import ( - "fmt" - "reflect" - - "github.com/docker/docker/api/types" -) - -// Event is emitted for actions performed on the plugin manager -type Event interface { - matches(Event) bool -} - -// EventCreate is an event which is emitted when a plugin is created -// This is either by pull or create from context. -// -// Use the `Interfaces` field to match only plugins that implement a specific -// interface. -// These are matched against using "or" logic. -// If no interfaces are listed, all are matched. -type EventCreate struct { - Interfaces map[string]bool - Plugin types.Plugin -} - -func (e EventCreate) matches(observed Event) bool { - oe, ok := observed.(EventCreate) - if !ok { - return false - } - if len(e.Interfaces) == 0 { - return true - } - - var ifaceMatch bool - for _, in := range oe.Plugin.Config.Interface.Types { - if e.Interfaces[in.Capability] { - ifaceMatch = true - break - } - } - return ifaceMatch -} - -// EventRemove is an event which is emitted when a plugin is removed -// It maches on the passed in plugin's ID only. -type EventRemove struct { - Plugin types.Plugin -} - -func (e EventRemove) matches(observed Event) bool { - oe, ok := observed.(EventRemove) - if !ok { - return false - } - return e.Plugin.ID == oe.Plugin.ID -} - -// EventDisable is an event that is emitted when a plugin is disabled -// It maches on the passed in plugin's ID only. -type EventDisable struct { - Plugin types.Plugin -} - -func (e EventDisable) matches(observed Event) bool { - oe, ok := observed.(EventDisable) - if !ok { - return false - } - return e.Plugin.ID == oe.Plugin.ID -} - -// EventEnable is an event that is emitted when a plugin is disabled -// It maches on the passed in plugin's ID only. -type EventEnable struct { - Plugin types.Plugin -} - -func (e EventEnable) matches(observed Event) bool { - oe, ok := observed.(EventEnable) - if !ok { - return false - } - return e.Plugin.ID == oe.Plugin.ID -} - -// SubscribeEvents provides an event channel to listen for structured events from -// the plugin manager actions, CRUD operations. -// The caller must call the returned `cancel()` function once done with the channel -// or this will leak resources. -func (pm *Manager) SubscribeEvents(buffer int, watchEvents ...Event) (eventCh <-chan interface{}, cancel func()) { - topic := func(i interface{}) bool { - observed, ok := i.(Event) - if !ok { - panic(fmt.Sprintf("unexpected type passed to event channel: %v", reflect.TypeOf(i))) - } - for _, e := range watchEvents { - if e.matches(observed) { - return true - } - } - // If no specific events are specified always assume a matched event - // If some events were specified and none matched above, then the event - // doesn't match - return watchEvents == nil - } - ch := pm.publisher.SubscribeTopicWithBuffer(topic, buffer) - cancelFunc := func() { pm.publisher.Evict(ch) } - return ch, cancelFunc -} diff --git a/vendor/github.com/docker/docker/plugin/executor/containerd/containerd.go b/vendor/github.com/docker/docker/plugin/executor/containerd/containerd.go deleted file mode 100644 index 8f1c8a4a1..000000000 --- a/vendor/github.com/docker/docker/plugin/executor/containerd/containerd.go +++ /dev/null @@ -1,175 +0,0 @@ -package containerd // import "github.com/docker/docker/plugin/executor/containerd" - -import ( - "context" - "io" - "path/filepath" - "sync" - "time" - - "github.com/containerd/containerd/cio" - "github.com/containerd/containerd/runtime/linux/runctypes" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/libcontainerd" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// pluginNamespace is the name used for the plugins namespace -const pluginNamespace = "plugins.moby" - -// ExitHandler represents an object that is called when the exit event is received from containerd -type ExitHandler interface { - HandleExitEvent(id string) error -} - -// Client is used by the exector to perform operations. -// TODO(@cpuguy83): This should really just be based off the containerd client interface. -// However right now this whole package is tied to github.com/docker/docker/libcontainerd -type Client interface { - Create(ctx context.Context, containerID string, spec *specs.Spec, runtimeOptions interface{}) error - Restore(ctx context.Context, containerID string, attachStdio libcontainerd.StdioCallback) (alive bool, pid int, err error) - Status(ctx context.Context, containerID string) (libcontainerd.Status, error) - Delete(ctx context.Context, containerID string) error - DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) - Start(ctx context.Context, containerID, checkpointDir string, withStdin bool, attachStdio libcontainerd.StdioCallback) (pid int, err error) - SignalProcess(ctx context.Context, containerID, processID string, signal int) error -} - -// New creates a new containerd plugin executor -func New(rootDir string, remote libcontainerd.Remote, exitHandler ExitHandler) (*Executor, error) { - e := &Executor{ - rootDir: rootDir, - exitHandler: exitHandler, - } - client, err := remote.NewClient(pluginNamespace, e) - if err != nil { - return nil, errors.Wrap(err, "error creating containerd exec client") - } - e.client = client - return e, nil -} - -// Executor is the containerd client implementation of a plugin executor -type Executor struct { - rootDir string - client Client - exitHandler ExitHandler -} - -// deleteTaskAndContainer deletes plugin task and then plugin container from containerd -func deleteTaskAndContainer(ctx context.Context, cli Client, id string) { - _, _, err := cli.DeleteTask(ctx, id) - if err != nil && !errdefs.IsNotFound(err) { - logrus.WithError(err).WithField("id", id).Error("failed to delete plugin task from containerd") - } - - err = cli.Delete(ctx, id) - if err != nil && !errdefs.IsNotFound(err) { - logrus.WithError(err).WithField("id", id).Error("failed to delete plugin container from containerd") - } -} - -// Create creates a new container -func (e *Executor) Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error { - opts := runctypes.RuncOptions{ - RuntimeRoot: filepath.Join(e.rootDir, "runtime-root"), - } - ctx := context.Background() - err := e.client.Create(ctx, id, &spec, &opts) - if err != nil { - status, err2 := e.client.Status(ctx, id) - if err2 != nil { - if !errdefs.IsNotFound(err2) { - logrus.WithError(err2).WithField("id", id).Warn("Received an error while attempting to read plugin status") - } - } else { - if status != libcontainerd.StatusRunning && status != libcontainerd.StatusUnknown { - if err2 := e.client.Delete(ctx, id); err2 != nil && !errdefs.IsNotFound(err2) { - logrus.WithError(err2).WithField("plugin", id).Error("Error cleaning up containerd container") - } - err = e.client.Create(ctx, id, &spec, &opts) - } - } - - if err != nil { - return errors.Wrap(err, "error creating containerd container") - } - } - - _, err = e.client.Start(ctx, id, "", false, attachStreamsFunc(stdout, stderr)) - if err != nil { - deleteTaskAndContainer(ctx, e.client, id) - } - return err -} - -// Restore restores a container -func (e *Executor) Restore(id string, stdout, stderr io.WriteCloser) (bool, error) { - alive, _, err := e.client.Restore(context.Background(), id, attachStreamsFunc(stdout, stderr)) - if err != nil && !errdefs.IsNotFound(err) { - return false, err - } - if !alive { - deleteTaskAndContainer(context.Background(), e.client, id) - } - return alive, nil -} - -// IsRunning returns if the container with the given id is running -func (e *Executor) IsRunning(id string) (bool, error) { - status, err := e.client.Status(context.Background(), id) - return status == libcontainerd.StatusRunning, err -} - -// Signal sends the specified signal to the container -func (e *Executor) Signal(id string, signal int) error { - return e.client.SignalProcess(context.Background(), id, libcontainerd.InitProcessName, signal) -} - -// ProcessEvent handles events from containerd -// All events are ignored except the exit event, which is sent of to the stored handler -func (e *Executor) ProcessEvent(id string, et libcontainerd.EventType, ei libcontainerd.EventInfo) error { - switch et { - case libcontainerd.EventExit: - deleteTaskAndContainer(context.Background(), e.client, id) - return e.exitHandler.HandleExitEvent(ei.ContainerID) - } - return nil -} - -type rio struct { - cio.IO - - wg sync.WaitGroup -} - -func (c *rio) Wait() { - c.wg.Wait() - c.IO.Wait() -} - -func attachStreamsFunc(stdout, stderr io.WriteCloser) libcontainerd.StdioCallback { - return func(iop *cio.DirectIO) (cio.IO, error) { - if iop.Stdin != nil { - iop.Stdin.Close() - // closing stdin shouldn't be needed here, it should never be open - panic("plugin stdin shouldn't have been created!") - } - - rio := &rio{IO: iop} - rio.wg.Add(2) - go func() { - io.Copy(stdout, iop.Stdout) - stdout.Close() - rio.wg.Done() - }() - go func() { - io.Copy(stderr, iop.Stderr) - stderr.Close() - rio.wg.Done() - }() - return rio, nil - } -} diff --git a/vendor/github.com/docker/docker/plugin/manager.go b/vendor/github.com/docker/docker/plugin/manager.go deleted file mode 100644 index c6f896129..000000000 --- a/vendor/github.com/docker/docker/plugin/manager.go +++ /dev/null @@ -1,384 +0,0 @@ -package plugin // import "github.com/docker/docker/plugin" - -import ( - "encoding/json" - "io" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "regexp" - "sort" - "strings" - "sync" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/authorization" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/pubsub" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/plugin/v2" - "github.com/docker/docker/registry" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const configFileName = "config.json" -const rootFSFileName = "rootfs" - -var validFullID = regexp.MustCompile(`^([a-f0-9]{64})$`) - -// Executor is the interface that the plugin manager uses to interact with for starting/stopping plugins -type Executor interface { - Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error - IsRunning(id string) (bool, error) - Restore(id string, stdout, stderr io.WriteCloser) (alive bool, err error) - Signal(id string, signal int) error -} - -func (pm *Manager) restorePlugin(p *v2.Plugin, c *controller) error { - if p.IsEnabled() { - return pm.restore(p, c) - } - return nil -} - -type eventLogger func(id, name, action string) - -// ManagerConfig defines configuration needed to start new manager. -type ManagerConfig struct { - Store *Store // remove - RegistryService registry.Service - LiveRestoreEnabled bool // TODO: remove - LogPluginEvent eventLogger - Root string - ExecRoot string - CreateExecutor ExecutorCreator - AuthzMiddleware *authorization.Middleware -} - -// ExecutorCreator is used in the manager config to pass in an `Executor` -type ExecutorCreator func(*Manager) (Executor, error) - -// Manager controls the plugin subsystem. -type Manager struct { - config ManagerConfig - mu sync.RWMutex // protects cMap - muGC sync.RWMutex // protects blobstore deletions - cMap map[*v2.Plugin]*controller - blobStore *basicBlobStore - publisher *pubsub.Publisher - executor Executor -} - -// controller represents the manager's control on a plugin. -type controller struct { - restart bool - exitChan chan bool - timeoutInSecs int -} - -// pluginRegistryService ensures that all resolved repositories -// are of the plugin class. -type pluginRegistryService struct { - registry.Service -} - -func (s pluginRegistryService) ResolveRepository(name reference.Named) (repoInfo *registry.RepositoryInfo, err error) { - repoInfo, err = s.Service.ResolveRepository(name) - if repoInfo != nil { - repoInfo.Class = "plugin" - } - return -} - -// NewManager returns a new plugin manager. -func NewManager(config ManagerConfig) (*Manager, error) { - if config.RegistryService != nil { - config.RegistryService = pluginRegistryService{config.RegistryService} - } - manager := &Manager{ - config: config, - } - for _, dirName := range []string{manager.config.Root, manager.config.ExecRoot, manager.tmpDir()} { - if err := os.MkdirAll(dirName, 0700); err != nil { - return nil, errors.Wrapf(err, "failed to mkdir %v", dirName) - } - } - var err error - manager.executor, err = config.CreateExecutor(manager) - if err != nil { - return nil, err - } - - manager.blobStore, err = newBasicBlobStore(filepath.Join(manager.config.Root, "storage/blobs")) - if err != nil { - return nil, err - } - - manager.cMap = make(map[*v2.Plugin]*controller) - if err := manager.reload(); err != nil { - return nil, errors.Wrap(err, "failed to restore plugins") - } - - manager.publisher = pubsub.NewPublisher(0, 0) - return manager, nil -} - -func (pm *Manager) tmpDir() string { - return filepath.Join(pm.config.Root, "tmp") -} - -// HandleExitEvent is called when the executor receives the exit event -// In the future we may change this, but for now all we care about is the exit event. -func (pm *Manager) HandleExitEvent(id string) error { - p, err := pm.config.Store.GetV2Plugin(id) - if err != nil { - return err - } - - if err := os.RemoveAll(filepath.Join(pm.config.ExecRoot, id)); err != nil && !os.IsNotExist(err) { - logrus.WithError(err).WithField("id", id).Error("Could not remove plugin bundle dir") - } - - pm.mu.RLock() - c := pm.cMap[p] - if c.exitChan != nil { - close(c.exitChan) - c.exitChan = nil // ignore duplicate events (containerd issue #2299) - } - restart := c.restart - pm.mu.RUnlock() - - if restart { - pm.enable(p, c, true) - } else { - if err := mount.RecursiveUnmount(filepath.Join(pm.config.Root, id)); err != nil { - return errors.Wrap(err, "error cleaning up plugin mounts") - } - } - return nil -} - -func handleLoadError(err error, id string) { - if err == nil { - return - } - logger := logrus.WithError(err).WithField("id", id) - if os.IsNotExist(errors.Cause(err)) { - // Likely some error while removing on an older version of docker - logger.Warn("missing plugin config, skipping: this may be caused due to a failed remove and requires manual cleanup.") - return - } - logger.Error("error loading plugin, skipping") -} - -func (pm *Manager) reload() error { // todo: restore - dir, err := ioutil.ReadDir(pm.config.Root) - if err != nil { - return errors.Wrapf(err, "failed to read %v", pm.config.Root) - } - plugins := make(map[string]*v2.Plugin) - for _, v := range dir { - if validFullID.MatchString(v.Name()) { - p, err := pm.loadPlugin(v.Name()) - if err != nil { - handleLoadError(err, v.Name()) - continue - } - plugins[p.GetID()] = p - } else { - if validFullID.MatchString(strings.TrimSuffix(v.Name(), "-removing")) { - // There was likely some error while removing this plugin, let's try to remove again here - if err := system.EnsureRemoveAll(v.Name()); err != nil { - logrus.WithError(err).WithField("id", v.Name()).Warn("error while attempting to clean up previously removed plugin") - } - } - } - } - - pm.config.Store.SetAll(plugins) - - var wg sync.WaitGroup - wg.Add(len(plugins)) - for _, p := range plugins { - c := &controller{exitChan: make(chan bool)} - pm.mu.Lock() - pm.cMap[p] = c - pm.mu.Unlock() - - go func(p *v2.Plugin) { - defer wg.Done() - if err := pm.restorePlugin(p, c); err != nil { - logrus.WithError(err).WithField("id", p.GetID()).Error("Failed to restore plugin") - return - } - - if p.Rootfs != "" { - p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs") - } - - // We should only enable rootfs propagation for certain plugin types that need it. - for _, typ := range p.PluginObj.Config.Interface.Types { - if (typ.Capability == "volumedriver" || typ.Capability == "graphdriver") && typ.Prefix == "docker" && strings.HasPrefix(typ.Version, "1.") { - if p.PluginObj.Config.PropagatedMount != "" { - propRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") - - // check if we need to migrate an older propagated mount from before - // these mounts were stored outside the plugin rootfs - if _, err := os.Stat(propRoot); os.IsNotExist(err) { - rootfsProp := filepath.Join(p.Rootfs, p.PluginObj.Config.PropagatedMount) - if _, err := os.Stat(rootfsProp); err == nil { - if err := os.Rename(rootfsProp, propRoot); err != nil { - logrus.WithError(err).WithField("dir", propRoot).Error("error migrating propagated mount storage") - } - } - } - - if err := os.MkdirAll(propRoot, 0755); err != nil { - logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) - } - } - } - } - - pm.save(p) - requiresManualRestore := !pm.config.LiveRestoreEnabled && p.IsEnabled() - - if requiresManualRestore { - // if liveRestore is not enabled, the plugin will be stopped now so we should enable it - if err := pm.enable(p, c, true); err != nil { - logrus.WithError(err).WithField("id", p.GetID()).Error("failed to enable plugin") - } - } - }(p) - } - wg.Wait() - return nil -} - -// Get looks up the requested plugin in the store. -func (pm *Manager) Get(idOrName string) (*v2.Plugin, error) { - return pm.config.Store.GetV2Plugin(idOrName) -} - -func (pm *Manager) loadPlugin(id string) (*v2.Plugin, error) { - p := filepath.Join(pm.config.Root, id, configFileName) - dt, err := ioutil.ReadFile(p) - if err != nil { - return nil, errors.Wrapf(err, "error reading %v", p) - } - var plugin v2.Plugin - if err := json.Unmarshal(dt, &plugin); err != nil { - return nil, errors.Wrapf(err, "error decoding %v", p) - } - return &plugin, nil -} - -func (pm *Manager) save(p *v2.Plugin) error { - pluginJSON, err := json.Marshal(p) - if err != nil { - return errors.Wrap(err, "failed to marshal plugin json") - } - if err := ioutils.AtomicWriteFile(filepath.Join(pm.config.Root, p.GetID(), configFileName), pluginJSON, 0600); err != nil { - return errors.Wrap(err, "failed to write atomically plugin json") - } - return nil -} - -// GC cleans up unreferenced blobs. This is recommended to run in a goroutine -func (pm *Manager) GC() { - pm.muGC.Lock() - defer pm.muGC.Unlock() - - whitelist := make(map[digest.Digest]struct{}) - for _, p := range pm.config.Store.GetAll() { - whitelist[p.Config] = struct{}{} - for _, b := range p.Blobsums { - whitelist[b] = struct{}{} - } - } - - pm.blobStore.gc(whitelist) -} - -type logHook struct{ id string } - -func (logHook) Levels() []logrus.Level { - return logrus.AllLevels -} - -func (l logHook) Fire(entry *logrus.Entry) error { - entry.Data = logrus.Fields{"plugin": l.id} - return nil -} - -func makeLoggerStreams(id string) (stdout, stderr io.WriteCloser) { - logger := logrus.New() - logger.Hooks.Add(logHook{id}) - return logger.WriterLevel(logrus.InfoLevel), logger.WriterLevel(logrus.ErrorLevel) -} - -func validatePrivileges(requiredPrivileges, privileges types.PluginPrivileges) error { - if !isEqual(requiredPrivileges, privileges, isEqualPrivilege) { - return errors.New("incorrect privileges") - } - - return nil -} - -func isEqual(arrOne, arrOther types.PluginPrivileges, compare func(x, y types.PluginPrivilege) bool) bool { - if len(arrOne) != len(arrOther) { - return false - } - - sort.Sort(arrOne) - sort.Sort(arrOther) - - for i := 1; i < arrOne.Len(); i++ { - if !compare(arrOne[i], arrOther[i]) { - return false - } - } - - return true -} - -func isEqualPrivilege(a, b types.PluginPrivilege) bool { - if a.Name != b.Name { - return false - } - - return reflect.DeepEqual(a.Value, b.Value) -} - -func configToRootFS(c []byte) (*image.RootFS, error) { - var pluginConfig types.PluginConfig - if err := json.Unmarshal(c, &pluginConfig); err != nil { - return nil, err - } - // validation for empty rootfs is in distribution code - if pluginConfig.Rootfs == nil { - return nil, nil - } - - return rootFSFromPlugin(pluginConfig.Rootfs), nil -} - -func rootFSFromPlugin(pluginfs *types.PluginConfigRootfs) *image.RootFS { - rootFS := image.RootFS{ - Type: pluginfs.Type, - DiffIDs: make([]layer.DiffID, len(pluginfs.DiffIds)), - } - for i := range pluginfs.DiffIds { - rootFS.DiffIDs[i] = layer.DiffID(pluginfs.DiffIds[i]) - } - - return &rootFS -} diff --git a/vendor/github.com/docker/docker/plugin/manager_linux.go b/vendor/github.com/docker/docker/plugin/manager_linux.go deleted file mode 100644 index 3c6f9c553..000000000 --- a/vendor/github.com/docker/docker/plugin/manager_linux.go +++ /dev/null @@ -1,335 +0,0 @@ -package plugin // import "github.com/docker/docker/plugin" - -import ( - "encoding/json" - "net" - "os" - "path/filepath" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/daemon/initlayer" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/containerfs" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/plugins" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/plugin/v2" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { - p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs") - if p.IsEnabled() && !force { - return errors.Wrap(enabledError(p.Name()), "plugin already enabled") - } - spec, err := p.InitSpec(pm.config.ExecRoot) - if err != nil { - return err - } - - c.restart = true - c.exitChan = make(chan bool) - - pm.mu.Lock() - pm.cMap[p] = c - pm.mu.Unlock() - - var propRoot string - if p.PluginObj.Config.PropagatedMount != "" { - propRoot = filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") - - if err := os.MkdirAll(propRoot, 0755); err != nil { - logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err) - } - - if err := mount.MakeRShared(propRoot); err != nil { - return errors.Wrap(err, "error setting up propagated mount dir") - } - } - - rootFS := containerfs.NewLocalContainerFS(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName)) - if err := initlayer.Setup(rootFS, idtools.IDPair{UID: 0, GID: 0}); err != nil { - return errors.WithStack(err) - } - - stdout, stderr := makeLoggerStreams(p.GetID()) - if err := pm.executor.Create(p.GetID(), *spec, stdout, stderr); err != nil { - if p.PluginObj.Config.PropagatedMount != "" { - if err := mount.Unmount(propRoot); err != nil { - logrus.Warnf("Could not unmount %s: %v", propRoot, err) - } - } - return errors.WithStack(err) - } - return pm.pluginPostStart(p, c) -} - -func (pm *Manager) pluginPostStart(p *v2.Plugin, c *controller) error { - sockAddr := filepath.Join(pm.config.ExecRoot, p.GetID(), p.GetSocket()) - p.SetTimeout(time.Duration(c.timeoutInSecs) * time.Second) - addr := &net.UnixAddr{Net: "unix", Name: sockAddr} - p.SetAddr(addr) - - if p.Protocol() == plugins.ProtocolSchemeHTTPV1 { - client, err := plugins.NewClientWithTimeout(addr.Network()+"://"+addr.String(), nil, p.Timeout()) - if err != nil { - c.restart = false - shutdownPlugin(p, c.exitChan, pm.executor) - return errors.WithStack(err) - } - - p.SetPClient(client) - } - - // Initial sleep before net Dial to allow plugin to listen on socket. - time.Sleep(500 * time.Millisecond) - maxRetries := 3 - var retries int - for { - // net dial into the unix socket to see if someone's listening. - conn, err := net.Dial("unix", sockAddr) - if err == nil { - conn.Close() - break - } - - time.Sleep(3 * time.Second) - retries++ - - if retries > maxRetries { - logrus.Debugf("error net dialing plugin: %v", err) - c.restart = false - // While restoring plugins, we need to explicitly set the state to disabled - pm.config.Store.SetState(p, false) - shutdownPlugin(p, c.exitChan, pm.executor) - return err - } - - } - pm.config.Store.SetState(p, true) - pm.config.Store.CallHandler(p) - - return pm.save(p) -} - -func (pm *Manager) restore(p *v2.Plugin, c *controller) error { - stdout, stderr := makeLoggerStreams(p.GetID()) - alive, err := pm.executor.Restore(p.GetID(), stdout, stderr) - if err != nil { - return err - } - - if pm.config.LiveRestoreEnabled { - if !alive { - return pm.enable(p, c, true) - } - - c.exitChan = make(chan bool) - c.restart = true - pm.mu.Lock() - pm.cMap[p] = c - pm.mu.Unlock() - return pm.pluginPostStart(p, c) - } - - if alive { - // TODO(@cpuguy83): Should we always just re-attach to the running plugin instead of doing this? - c.restart = false - shutdownPlugin(p, c.exitChan, pm.executor) - } - - return nil -} - -func shutdownPlugin(p *v2.Plugin, ec chan bool, executor Executor) { - pluginID := p.GetID() - - err := executor.Signal(pluginID, int(unix.SIGTERM)) - if err != nil { - logrus.Errorf("Sending SIGTERM to plugin failed with error: %v", err) - } else { - select { - case <-ec: - logrus.Debug("Clean shutdown of plugin") - case <-time.After(time.Second * 10): - logrus.Debug("Force shutdown plugin") - if err := executor.Signal(pluginID, int(unix.SIGKILL)); err != nil { - logrus.Errorf("Sending SIGKILL to plugin failed with error: %v", err) - } - select { - case <-ec: - logrus.Debug("SIGKILL plugin shutdown") - case <-time.After(time.Second * 10): - logrus.Debug("Force shutdown plugin FAILED") - } - } - } -} - -func (pm *Manager) disable(p *v2.Plugin, c *controller) error { - if !p.IsEnabled() { - return errors.Wrap(errDisabled(p.Name()), "plugin is already disabled") - } - - c.restart = false - shutdownPlugin(p, c.exitChan, pm.executor) - pm.config.Store.SetState(p, false) - return pm.save(p) -} - -// Shutdown stops all plugins and called during daemon shutdown. -func (pm *Manager) Shutdown() { - plugins := pm.config.Store.GetAll() - for _, p := range plugins { - pm.mu.RLock() - c := pm.cMap[p] - pm.mu.RUnlock() - - if pm.config.LiveRestoreEnabled && p.IsEnabled() { - logrus.Debug("Plugin active when liveRestore is set, skipping shutdown") - continue - } - if pm.executor != nil && p.IsEnabled() { - c.restart = false - shutdownPlugin(p, c.exitChan, pm.executor) - } - } - if err := mount.RecursiveUnmount(pm.config.Root); err != nil { - logrus.WithError(err).Warn("error cleaning up plugin mounts") - } -} - -func (pm *Manager) upgradePlugin(p *v2.Plugin, configDigest digest.Digest, blobsums []digest.Digest, tmpRootFSDir string, privileges *types.PluginPrivileges) (err error) { - config, err := pm.setupNewPlugin(configDigest, blobsums, privileges) - if err != nil { - return err - } - - pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) - orig := filepath.Join(pdir, "rootfs") - - // Make sure nothing is mounted - // This could happen if the plugin was disabled with `-f` with active mounts. - // If there is anything in `orig` is still mounted, this should error out. - if err := mount.RecursiveUnmount(orig); err != nil { - return errdefs.System(err) - } - - backup := orig + "-old" - if err := os.Rename(orig, backup); err != nil { - return errors.Wrap(errdefs.System(err), "error backing up plugin data before upgrade") - } - - defer func() { - if err != nil { - if rmErr := os.RemoveAll(orig); rmErr != nil && !os.IsNotExist(rmErr) { - logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up after failed upgrade") - return - } - if mvErr := os.Rename(backup, orig); mvErr != nil { - err = errors.Wrap(mvErr, "error restoring old plugin root on upgrade failure") - } - if rmErr := os.RemoveAll(tmpRootFSDir); rmErr != nil && !os.IsNotExist(rmErr) { - logrus.WithError(rmErr).WithField("plugin", p.Name()).Errorf("error cleaning up plugin upgrade dir: %s", tmpRootFSDir) - } - } else { - if rmErr := os.RemoveAll(backup); rmErr != nil && !os.IsNotExist(rmErr) { - logrus.WithError(rmErr).WithField("dir", backup).Error("error cleaning up old plugin root after successful upgrade") - } - - p.Config = configDigest - p.Blobsums = blobsums - } - }() - - if err := os.Rename(tmpRootFSDir, orig); err != nil { - return errors.Wrap(errdefs.System(err), "error upgrading") - } - - p.PluginObj.Config = config - err = pm.save(p) - return errors.Wrap(err, "error saving upgraded plugin config") -} - -func (pm *Manager) setupNewPlugin(configDigest digest.Digest, blobsums []digest.Digest, privileges *types.PluginPrivileges) (types.PluginConfig, error) { - configRC, err := pm.blobStore.Get(configDigest) - if err != nil { - return types.PluginConfig{}, err - } - defer configRC.Close() - - var config types.PluginConfig - dec := json.NewDecoder(configRC) - if err := dec.Decode(&config); err != nil { - return types.PluginConfig{}, errors.Wrapf(err, "failed to parse config") - } - if dec.More() { - return types.PluginConfig{}, errors.New("invalid config json") - } - - requiredPrivileges := computePrivileges(config) - if err != nil { - return types.PluginConfig{}, err - } - if privileges != nil { - if err := validatePrivileges(requiredPrivileges, *privileges); err != nil { - return types.PluginConfig{}, err - } - } - - return config, nil -} - -// createPlugin creates a new plugin. take lock before calling. -func (pm *Manager) createPlugin(name string, configDigest digest.Digest, blobsums []digest.Digest, rootFSDir string, privileges *types.PluginPrivileges, opts ...CreateOpt) (p *v2.Plugin, err error) { - if err := pm.config.Store.validateName(name); err != nil { // todo: this check is wrong. remove store - return nil, errdefs.InvalidParameter(err) - } - - config, err := pm.setupNewPlugin(configDigest, blobsums, privileges) - if err != nil { - return nil, err - } - - p = &v2.Plugin{ - PluginObj: types.Plugin{ - Name: name, - ID: stringid.GenerateRandomID(), - Config: config, - }, - Config: configDigest, - Blobsums: blobsums, - } - p.InitEmptySettings() - for _, o := range opts { - o(p) - } - - pdir := filepath.Join(pm.config.Root, p.PluginObj.ID) - if err := os.MkdirAll(pdir, 0700); err != nil { - return nil, errors.Wrapf(err, "failed to mkdir %v", pdir) - } - - defer func() { - if err != nil { - os.RemoveAll(pdir) - } - }() - - if err := os.Rename(rootFSDir, filepath.Join(pdir, rootFSFileName)); err != nil { - return nil, errors.Wrap(err, "failed to rename rootfs") - } - - if err := pm.save(p); err != nil { - return nil, err - } - - pm.config.Store.Add(p) // todo: remove - - return p, nil -} diff --git a/vendor/github.com/docker/docker/plugin/manager_windows.go b/vendor/github.com/docker/docker/plugin/manager_windows.go deleted file mode 100644 index 90cc52c99..000000000 --- a/vendor/github.com/docker/docker/plugin/manager_windows.go +++ /dev/null @@ -1,28 +0,0 @@ -package plugin // import "github.com/docker/docker/plugin" - -import ( - "fmt" - - "github.com/docker/docker/plugin/v2" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error { - return fmt.Errorf("Not implemented") -} - -func (pm *Manager) initSpec(p *v2.Plugin) (*specs.Spec, error) { - return nil, fmt.Errorf("Not implemented") -} - -func (pm *Manager) disable(p *v2.Plugin, c *controller) error { - return fmt.Errorf("Not implemented") -} - -func (pm *Manager) restore(p *v2.Plugin, c *controller) error { - return fmt.Errorf("Not implemented") -} - -// Shutdown plugins -func (pm *Manager) Shutdown() { -} diff --git a/vendor/github.com/docker/docker/plugin/store.go b/vendor/github.com/docker/docker/plugin/store.go deleted file mode 100644 index 8e96c11da..000000000 --- a/vendor/github.com/docker/docker/plugin/store.go +++ /dev/null @@ -1,291 +0,0 @@ -package plugin // import "github.com/docker/docker/plugin" - -import ( - "fmt" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/plugins" - "github.com/docker/docker/plugin/v2" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -/* allowV1PluginsFallback determines daemon's support for V1 plugins. - * When the time comes to remove support for V1 plugins, flipping - * this bool is all that will be needed. - */ -const allowV1PluginsFallback = true - -/* defaultAPIVersion is the version of the plugin API for volume, network, - IPAM and authz. This is a very stable API. When we update this API, then - pluginType should include a version. e.g. "networkdriver/2.0". -*/ -const defaultAPIVersion = "1.0" - -// GetV2Plugin retrieves a plugin by name, id or partial ID. -func (ps *Store) GetV2Plugin(refOrID string) (*v2.Plugin, error) { - ps.RLock() - defer ps.RUnlock() - - id, err := ps.resolvePluginID(refOrID) - if err != nil { - return nil, err - } - - p, idOk := ps.plugins[id] - if !idOk { - return nil, errors.WithStack(errNotFound(id)) - } - - return p, nil -} - -// validateName returns error if name is already reserved. always call with lock and full name -func (ps *Store) validateName(name string) error { - for _, p := range ps.plugins { - if p.Name() == name { - return alreadyExistsError(name) - } - } - return nil -} - -// GetAll retrieves all plugins. -func (ps *Store) GetAll() map[string]*v2.Plugin { - ps.RLock() - defer ps.RUnlock() - return ps.plugins -} - -// SetAll initialized plugins during daemon restore. -func (ps *Store) SetAll(plugins map[string]*v2.Plugin) { - ps.Lock() - defer ps.Unlock() - - for _, p := range plugins { - ps.setSpecOpts(p) - } - ps.plugins = plugins -} - -func (ps *Store) getAllByCap(capability string) []plugingetter.CompatPlugin { - ps.RLock() - defer ps.RUnlock() - - result := make([]plugingetter.CompatPlugin, 0, 1) - for _, p := range ps.plugins { - if p.IsEnabled() { - if _, err := p.FilterByCap(capability); err == nil { - result = append(result, p) - } - } - } - return result -} - -// SetState sets the active state of the plugin and updates plugindb. -func (ps *Store) SetState(p *v2.Plugin, state bool) { - ps.Lock() - defer ps.Unlock() - - p.PluginObj.Enabled = state -} - -func (ps *Store) setSpecOpts(p *v2.Plugin) { - var specOpts []SpecOpt - for _, typ := range p.GetTypes() { - opts, ok := ps.specOpts[typ.String()] - if ok { - specOpts = append(specOpts, opts...) - } - } - - p.SetSpecOptModifier(func(s *specs.Spec) { - for _, o := range specOpts { - o(s) - } - }) -} - -// Add adds a plugin to memory and plugindb. -// An error will be returned if there is a collision. -func (ps *Store) Add(p *v2.Plugin) error { - ps.Lock() - defer ps.Unlock() - - if v, exist := ps.plugins[p.GetID()]; exist { - return fmt.Errorf("plugin %q has the same ID %s as %q", p.Name(), p.GetID(), v.Name()) - } - - ps.setSpecOpts(p) - - ps.plugins[p.GetID()] = p - return nil -} - -// Remove removes a plugin from memory and plugindb. -func (ps *Store) Remove(p *v2.Plugin) { - ps.Lock() - delete(ps.plugins, p.GetID()) - ps.Unlock() -} - -// Get returns an enabled plugin matching the given name and capability. -func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlugin, error) { - // Lookup using new model. - if ps != nil { - p, err := ps.GetV2Plugin(name) - if err == nil { - if p.IsEnabled() { - fp, err := p.FilterByCap(capability) - if err != nil { - return nil, err - } - p.AddRefCount(mode) - return fp, nil - } - - // Plugin was found but it is disabled, so we should not fall back to legacy plugins - // but we should error out right away - return nil, errDisabled(name) - } - if _, ok := errors.Cause(err).(errNotFound); !ok { - return nil, err - } - } - - if !allowV1PluginsFallback { - return nil, errNotFound(name) - } - - p, err := plugins.Get(name, capability) - if err == nil { - return p, nil - } - if errors.Cause(err) == plugins.ErrNotFound { - return nil, errNotFound(name) - } - return nil, errors.Wrap(errdefs.System(err), "legacy plugin") -} - -// GetAllManagedPluginsByCap returns a list of managed plugins matching the given capability. -func (ps *Store) GetAllManagedPluginsByCap(capability string) []plugingetter.CompatPlugin { - return ps.getAllByCap(capability) -} - -// GetAllByCap returns a list of enabled plugins matching the given capability. -func (ps *Store) GetAllByCap(capability string) ([]plugingetter.CompatPlugin, error) { - result := make([]plugingetter.CompatPlugin, 0, 1) - - /* Daemon start always calls plugin.Init thereby initializing a store. - * So store on experimental builds can never be nil, even while - * handling legacy plugins. However, there are legacy plugin unit - * tests where the volume subsystem directly talks with the plugin, - * bypassing the daemon. For such tests, this check is necessary. - */ - if ps != nil { - ps.RLock() - result = ps.getAllByCap(capability) - ps.RUnlock() - } - - // Lookup with legacy model - if allowV1PluginsFallback { - pl, err := plugins.GetAll(capability) - if err != nil { - return nil, errors.Wrap(errdefs.System(err), "legacy plugin") - } - for _, p := range pl { - result = append(result, p) - } - } - return result, nil -} - -func pluginType(cap string) string { - return fmt.Sprintf("docker.%s/%s", strings.ToLower(cap), defaultAPIVersion) -} - -// Handle sets a callback for a given capability. It is only used by network -// and ipam drivers during plugin registration. The callback registers the -// driver with the subsystem (network, ipam). -func (ps *Store) Handle(capability string, callback func(string, *plugins.Client)) { - typ := pluginType(capability) - - // Register callback with new plugin model. - ps.Lock() - handlers, ok := ps.handlers[typ] - if !ok { - handlers = []func(string, *plugins.Client){} - } - handlers = append(handlers, callback) - ps.handlers[typ] = handlers - ps.Unlock() - - // Register callback with legacy plugin model. - if allowV1PluginsFallback { - plugins.Handle(capability, callback) - } -} - -// RegisterRuntimeOpt stores a list of SpecOpts for the provided capability. -// These options are applied to the runtime spec before a plugin is started for the specified capability. -func (ps *Store) RegisterRuntimeOpt(cap string, opts ...SpecOpt) { - ps.Lock() - defer ps.Unlock() - typ := pluginType(cap) - ps.specOpts[typ] = append(ps.specOpts[typ], opts...) -} - -// CallHandler calls the registered callback. It is invoked during plugin enable. -func (ps *Store) CallHandler(p *v2.Plugin) { - for _, typ := range p.GetTypes() { - for _, handler := range ps.handlers[typ.String()] { - handler(p.Name(), p.Client()) - } - } -} - -func (ps *Store) resolvePluginID(idOrName string) (string, error) { - ps.RLock() // todo: fix - defer ps.RUnlock() - - if validFullID.MatchString(idOrName) { - return idOrName, nil - } - - ref, err := reference.ParseNormalizedNamed(idOrName) - if err != nil { - return "", errors.WithStack(errNotFound(idOrName)) - } - if _, ok := ref.(reference.Canonical); ok { - logrus.Warnf("canonical references cannot be resolved: %v", reference.FamiliarString(ref)) - return "", errors.WithStack(errNotFound(idOrName)) - } - - ref = reference.TagNameOnly(ref) - - for _, p := range ps.plugins { - if p.PluginObj.Name == reference.FamiliarString(ref) { - return p.PluginObj.ID, nil - } - } - - var found *v2.Plugin - for id, p := range ps.plugins { // this can be optimized - if strings.HasPrefix(id, idOrName) { - if found != nil { - return "", errors.WithStack(errAmbiguous(idOrName)) - } - found = p - } - } - if found == nil { - return "", errors.WithStack(errNotFound(idOrName)) - } - return found.PluginObj.ID, nil -} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin.go b/vendor/github.com/docker/docker/plugin/v2/plugin.go deleted file mode 100644 index 6852511c5..000000000 --- a/vendor/github.com/docker/docker/plugin/v2/plugin.go +++ /dev/null @@ -1,311 +0,0 @@ -package v2 // import "github.com/docker/docker/plugin/v2" - -import ( - "fmt" - "net" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/plugins" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/runtime-spec/specs-go" -) - -// Plugin represents an individual plugin. -type Plugin struct { - mu sync.RWMutex - PluginObj types.Plugin `json:"plugin"` // todo: embed struct - pClient *plugins.Client - refCount int - Rootfs string // TODO: make private - - Config digest.Digest - Blobsums []digest.Digest - - modifyRuntimeSpec func(*specs.Spec) - - SwarmServiceID string - timeout time.Duration - addr net.Addr -} - -const defaultPluginRuntimeDestination = "/run/docker/plugins" - -// ErrInadequateCapability indicates that the plugin did not have the requested capability. -type ErrInadequateCapability struct { - cap string -} - -func (e ErrInadequateCapability) Error() string { - return fmt.Sprintf("plugin does not provide %q capability", e.cap) -} - -// ScopedPath returns the path scoped to the plugin rootfs -func (p *Plugin) ScopedPath(s string) string { - if p.PluginObj.Config.PropagatedMount != "" && strings.HasPrefix(s, p.PluginObj.Config.PropagatedMount) { - // re-scope to the propagated mount path on the host - return filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount", strings.TrimPrefix(s, p.PluginObj.Config.PropagatedMount)) - } - return filepath.Join(p.Rootfs, s) -} - -// Client returns the plugin client. -// Deprecated: use p.Addr() and manually create the client -func (p *Plugin) Client() *plugins.Client { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.pClient -} - -// SetPClient set the plugin client. -// Deprecated: Hardcoded plugin client is deprecated -func (p *Plugin) SetPClient(client *plugins.Client) { - p.mu.Lock() - defer p.mu.Unlock() - - p.pClient = client -} - -// IsV1 returns true for V1 plugins and false otherwise. -func (p *Plugin) IsV1() bool { - return false -} - -// Name returns the plugin name. -func (p *Plugin) Name() string { - return p.PluginObj.Name -} - -// FilterByCap query the plugin for a given capability. -func (p *Plugin) FilterByCap(capability string) (*Plugin, error) { - capability = strings.ToLower(capability) - for _, typ := range p.PluginObj.Config.Interface.Types { - if typ.Capability == capability && typ.Prefix == "docker" { - return p, nil - } - } - return nil, ErrInadequateCapability{capability} -} - -// InitEmptySettings initializes empty settings for a plugin. -func (p *Plugin) InitEmptySettings() { - p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts)) - copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts) - p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices)) - copy(p.PluginObj.Settings.Devices, p.PluginObj.Config.Linux.Devices) - p.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env)) - for _, env := range p.PluginObj.Config.Env { - if env.Value != nil { - p.PluginObj.Settings.Env = append(p.PluginObj.Settings.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value)) - } - } - p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value)) - copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value) -} - -// Set is used to pass arguments to the plugin. -func (p *Plugin) Set(args []string) error { - p.mu.Lock() - defer p.mu.Unlock() - - if p.PluginObj.Enabled { - return fmt.Errorf("cannot set on an active plugin, disable plugin before setting") - } - - sets, err := newSettables(args) - if err != nil { - return err - } - - // TODO(vieux): lots of code duplication here, needs to be refactored. - -next: - for _, s := range sets { - // range over all the envs in the config - for _, env := range p.PluginObj.Config.Env { - // found the env in the config - if env.Name == s.name { - // is it settable ? - if ok, err := s.isSettable(allowedSettableFieldsEnv, env.Settable); err != nil { - return err - } else if !ok { - return fmt.Errorf("%q is not settable", s.prettyName()) - } - // is it, so lets update the settings in memory - updateSettingsEnv(&p.PluginObj.Settings.Env, &s) - continue next - } - } - - // range over all the mounts in the config - for _, mount := range p.PluginObj.Config.Mounts { - // found the mount in the config - if mount.Name == s.name { - // is it settable ? - if ok, err := s.isSettable(allowedSettableFieldsMounts, mount.Settable); err != nil { - return err - } else if !ok { - return fmt.Errorf("%q is not settable", s.prettyName()) - } - - // it is, so lets update the settings in memory - if mount.Source == nil { - return fmt.Errorf("Plugin config has no mount source") - } - *mount.Source = s.value - continue next - } - } - - // range over all the devices in the config - for _, device := range p.PluginObj.Config.Linux.Devices { - // found the device in the config - if device.Name == s.name { - // is it settable ? - if ok, err := s.isSettable(allowedSettableFieldsDevices, device.Settable); err != nil { - return err - } else if !ok { - return fmt.Errorf("%q is not settable", s.prettyName()) - } - - // it is, so lets update the settings in memory - if device.Path == nil { - return fmt.Errorf("Plugin config has no device path") - } - *device.Path = s.value - continue next - } - } - - // found the name in the config - if p.PluginObj.Config.Args.Name == s.name { - // is it settable ? - if ok, err := s.isSettable(allowedSettableFieldsArgs, p.PluginObj.Config.Args.Settable); err != nil { - return err - } else if !ok { - return fmt.Errorf("%q is not settable", s.prettyName()) - } - - // it is, so lets update the settings in memory - p.PluginObj.Settings.Args = strings.Split(s.value, " ") - continue next - } - - return fmt.Errorf("setting %q not found in the plugin configuration", s.name) - } - - return nil -} - -// IsEnabled returns the active state of the plugin. -func (p *Plugin) IsEnabled() bool { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.PluginObj.Enabled -} - -// GetID returns the plugin's ID. -func (p *Plugin) GetID() string { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.PluginObj.ID -} - -// GetSocket returns the plugin socket. -func (p *Plugin) GetSocket() string { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.PluginObj.Config.Interface.Socket -} - -// GetTypes returns the interface types of a plugin. -func (p *Plugin) GetTypes() []types.PluginInterfaceType { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.PluginObj.Config.Interface.Types -} - -// GetRefCount returns the reference count. -func (p *Plugin) GetRefCount() int { - p.mu.RLock() - defer p.mu.RUnlock() - - return p.refCount -} - -// AddRefCount adds to reference count. -func (p *Plugin) AddRefCount(count int) { - p.mu.Lock() - defer p.mu.Unlock() - - p.refCount += count -} - -// Acquire increments the plugin's reference count -// This should be followed up by `Release()` when the plugin is no longer in use. -func (p *Plugin) Acquire() { - p.AddRefCount(plugingetter.Acquire) -} - -// Release decrements the plugin's reference count -// This should only be called when the plugin is no longer in use, e.g. with -// via `Acquire()` or getter.Get("name", "type", plugingetter.Acquire) -func (p *Plugin) Release() { - p.AddRefCount(plugingetter.Release) -} - -// SetSpecOptModifier sets the function to use to modify the generated -// runtime spec. -func (p *Plugin) SetSpecOptModifier(f func(*specs.Spec)) { - p.mu.Lock() - p.modifyRuntimeSpec = f - p.mu.Unlock() -} - -// Timeout gets the currently configured connection timeout. -// This should be used when dialing the plugin. -func (p *Plugin) Timeout() time.Duration { - p.mu.RLock() - t := p.timeout - p.mu.RUnlock() - return t -} - -// SetTimeout sets the timeout to use for dialing. -func (p *Plugin) SetTimeout(t time.Duration) { - p.mu.Lock() - p.timeout = t - p.mu.Unlock() -} - -// Addr returns the net.Addr to use to connect to the plugin socket -func (p *Plugin) Addr() net.Addr { - p.mu.RLock() - addr := p.addr - p.mu.RUnlock() - return addr -} - -// SetAddr sets the plugin address which can be used for dialing the plugin. -func (p *Plugin) SetAddr(addr net.Addr) { - p.mu.Lock() - p.addr = addr - p.mu.Unlock() -} - -// Protocol is the protocol that should be used for interacting with the plugin. -func (p *Plugin) Protocol() string { - if p.PluginObj.Config.Interface.ProtocolScheme != "" { - return p.PluginObj.Config.Interface.ProtocolScheme - } - return plugins.ProtocolSchemeHTTPV1 -} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go deleted file mode 100644 index 58c432fcd..000000000 --- a/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go +++ /dev/null @@ -1,141 +0,0 @@ -package v2 // import "github.com/docker/docker/plugin/v2" - -import ( - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/oci" - "github.com/docker/docker/pkg/system" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -// InitSpec creates an OCI spec from the plugin's config. -func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { - s := oci.DefaultSpec() - - s.Root = &specs.Root{ - Path: p.Rootfs, - Readonly: false, // TODO: all plugins should be readonly? settable in config? - } - - userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts)) - for _, m := range p.PluginObj.Settings.Mounts { - userMounts[m.Destination] = struct{}{} - } - - execRoot = filepath.Join(execRoot, p.PluginObj.ID) - if err := os.MkdirAll(execRoot, 0700); err != nil { - return nil, errors.WithStack(err) - } - - if p.PluginObj.Config.PropagatedMount != "" { - pRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") - s.Mounts = append(s.Mounts, specs.Mount{ - Source: pRoot, - Destination: p.PluginObj.Config.PropagatedMount, - Type: "bind", - Options: []string{"rbind", "rw", "rshared"}, - }) - s.Linux.RootfsPropagation = "rshared" - } - - mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{ - Source: &execRoot, - Destination: defaultPluginRuntimeDestination, - Type: "bind", - Options: []string{"rbind", "rshared"}, - }) - - if p.PluginObj.Config.Network.Type != "" { - // TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize) - if p.PluginObj.Config.Network.Type == "host" { - oci.RemoveNamespace(&s, specs.LinuxNamespaceType("network")) - } - etcHosts := "/etc/hosts" - resolvConf := "/etc/resolv.conf" - mounts = append(mounts, - types.PluginMount{ - Source: &etcHosts, - Destination: etcHosts, - Type: "bind", - Options: []string{"rbind", "ro"}, - }, - types.PluginMount{ - Source: &resolvConf, - Destination: resolvConf, - Type: "bind", - Options: []string{"rbind", "ro"}, - }) - } - if p.PluginObj.Config.PidHost { - oci.RemoveNamespace(&s, specs.LinuxNamespaceType("pid")) - } - - if p.PluginObj.Config.IpcHost { - oci.RemoveNamespace(&s, specs.LinuxNamespaceType("ipc")) - } - - for _, mnt := range mounts { - m := specs.Mount{ - Destination: mnt.Destination, - Type: mnt.Type, - Options: mnt.Options, - } - if mnt.Source == nil { - return nil, errors.New("mount source is not specified") - } - m.Source = *mnt.Source - s.Mounts = append(s.Mounts, m) - } - - for i, m := range s.Mounts { - if strings.HasPrefix(m.Destination, "/dev/") { - if _, ok := userMounts[m.Destination]; ok { - s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...) - } - } - } - - if p.PluginObj.Config.Linux.AllowAllDevices { - s.Linux.Resources.Devices = []specs.LinuxDeviceCgroup{{Allow: true, Access: "rwm"}} - } - for _, dev := range p.PluginObj.Settings.Devices { - path := *dev.Path - d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm") - if err != nil { - return nil, errors.WithStack(err) - } - s.Linux.Devices = append(s.Linux.Devices, d...) - s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...) - } - - envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1) - envs[0] = "PATH=" + system.DefaultPathEnv(runtime.GOOS) - envs = append(envs, p.PluginObj.Settings.Env...) - - args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...) - cwd := p.PluginObj.Config.WorkDir - if len(cwd) == 0 { - cwd = "/" - } - s.Process.Terminal = false - s.Process.Args = args - s.Process.Cwd = cwd - s.Process.Env = envs - - caps := s.Process.Capabilities - caps.Bounding = append(caps.Bounding, p.PluginObj.Config.Linux.Capabilities...) - caps.Permitted = append(caps.Permitted, p.PluginObj.Config.Linux.Capabilities...) - caps.Inheritable = append(caps.Inheritable, p.PluginObj.Config.Linux.Capabilities...) - caps.Effective = append(caps.Effective, p.PluginObj.Config.Linux.Capabilities...) - - if p.modifyRuntimeSpec != nil { - p.modifyRuntimeSpec(&s) - } - - return &s, nil -} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go deleted file mode 100644 index 5242fe124..000000000 --- a/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !linux - -package v2 // import "github.com/docker/docker/plugin/v2" - -import ( - "errors" - - "github.com/opencontainers/runtime-spec/specs-go" -) - -// InitSpec creates an OCI spec from the plugin's config. -func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { - return nil, errors.New("not supported") -} diff --git a/vendor/github.com/docker/docker/plugin/v2/settable.go b/vendor/github.com/docker/docker/plugin/v2/settable.go deleted file mode 100644 index efda56470..000000000 --- a/vendor/github.com/docker/docker/plugin/v2/settable.go +++ /dev/null @@ -1,102 +0,0 @@ -package v2 // import "github.com/docker/docker/plugin/v2" - -import ( - "errors" - "fmt" - "strings" -) - -type settable struct { - name string - field string - value string -} - -var ( - allowedSettableFieldsEnv = []string{"value"} - allowedSettableFieldsArgs = []string{"value"} - allowedSettableFieldsDevices = []string{"path"} - allowedSettableFieldsMounts = []string{"source"} - - errMultipleFields = errors.New("multiple fields are settable, one must be specified") - errInvalidFormat = errors.New("invalid format, must be [.][=]") -) - -func newSettables(args []string) ([]settable, error) { - sets := make([]settable, 0, len(args)) - for _, arg := range args { - set, err := newSettable(arg) - if err != nil { - return nil, err - } - sets = append(sets, set) - } - return sets, nil -} - -func newSettable(arg string) (settable, error) { - var set settable - if i := strings.Index(arg, "="); i == 0 { - return set, errInvalidFormat - } else if i < 0 { - set.name = arg - } else { - set.name = arg[:i] - set.value = arg[i+1:] - } - - if i := strings.LastIndex(set.name, "."); i > 0 { - set.field = set.name[i+1:] - set.name = arg[:i] - } - - return set, nil -} - -// prettyName return name.field if there is a field, otherwise name. -func (set *settable) prettyName() string { - if set.field != "" { - return fmt.Sprintf("%s.%s", set.name, set.field) - } - return set.name -} - -func (set *settable) isSettable(allowedSettableFields []string, settable []string) (bool, error) { - if set.field == "" { - if len(settable) == 1 { - // if field is not specified and there only one settable, default to it. - set.field = settable[0] - } else if len(settable) > 1 { - return false, errMultipleFields - } - } - - isAllowed := false - for _, allowedSettableField := range allowedSettableFields { - if set.field == allowedSettableField { - isAllowed = true - break - } - } - - if isAllowed { - for _, settableField := range settable { - if set.field == settableField { - return true, nil - } - } - } - - return false, nil -} - -func updateSettingsEnv(env *[]string, set *settable) { - for i, e := range *env { - if parts := strings.SplitN(e, "=", 2); parts[0] == set.name { - (*env)[i] = fmt.Sprintf("%s=%s", set.name, set.value) - return - } - } - - *env = append(*env, fmt.Sprintf("%s=%s", set.name, set.value)) -} diff --git a/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go b/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go deleted file mode 100644 index b021668c8..000000000 --- a/vendor/github.com/docker/docker/profiles/apparmor/apparmor.go +++ /dev/null @@ -1,114 +0,0 @@ -// +build linux - -package apparmor // import "github.com/docker/docker/profiles/apparmor" - -import ( - "bufio" - "io" - "io/ioutil" - "os" - "path" - "strings" - "text/template" - - "github.com/docker/docker/pkg/aaparser" -) - -var ( - // profileDirectory is the file store for apparmor profiles and macros. - profileDirectory = "/etc/apparmor.d" -) - -// profileData holds information about the given profile for generation. -type profileData struct { - // Name is profile name. - Name string - // Imports defines the apparmor functions to import, before defining the profile. - Imports []string - // InnerImports defines the apparmor functions to import in the profile. - InnerImports []string - // Version is the {major, minor, patch} version of apparmor_parser as a single number. - Version int -} - -// generateDefault creates an apparmor profile from ProfileData. -func (p *profileData) generateDefault(out io.Writer) error { - compiled, err := template.New("apparmor_profile").Parse(baseTemplate) - if err != nil { - return err - } - - if macroExists("tunables/global") { - p.Imports = append(p.Imports, "#include ") - } else { - p.Imports = append(p.Imports, "@{PROC}=/proc/") - } - - if macroExists("abstractions/base") { - p.InnerImports = append(p.InnerImports, "#include ") - } - - ver, err := aaparser.GetVersion() - if err != nil { - return err - } - p.Version = ver - - return compiled.Execute(out, p) -} - -// macrosExists checks if the passed macro exists. -func macroExists(m string) bool { - _, err := os.Stat(path.Join(profileDirectory, m)) - return err == nil -} - -// InstallDefault generates a default profile in a temp directory determined by -// os.TempDir(), then loads the profile into the kernel using 'apparmor_parser'. -func InstallDefault(name string) error { - p := profileData{ - Name: name, - } - - // Install to a temporary directory. - f, err := ioutil.TempFile("", name) - if err != nil { - return err - } - profilePath := f.Name() - - defer f.Close() - defer os.Remove(profilePath) - - if err := p.generateDefault(f); err != nil { - return err - } - - return aaparser.LoadProfile(profilePath) -} - -// IsLoaded checks if a profile with the given name has been loaded into the -// kernel. -func IsLoaded(name string) (bool, error) { - file, err := os.Open("/sys/kernel/security/apparmor/profiles") - if err != nil { - return false, err - } - defer file.Close() - - r := bufio.NewReader(file) - for { - p, err := r.ReadString('\n') - if err == io.EOF { - break - } - if err != nil { - return false, err - } - if strings.HasPrefix(p, name+" ") { - return true, nil - } - } - - return false, nil -} diff --git a/vendor/github.com/docker/docker/profiles/apparmor/template.go b/vendor/github.com/docker/docker/profiles/apparmor/template.go deleted file mode 100644 index c00a3f70e..000000000 --- a/vendor/github.com/docker/docker/profiles/apparmor/template.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build linux - -package apparmor // import "github.com/docker/docker/profiles/apparmor" - -// baseTemplate defines the default apparmor profile for containers. -const baseTemplate = ` -{{range $value := .Imports}} -{{$value}} -{{end}} - -profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { -{{range $value := .InnerImports}} - {{$value}} -{{end}} - - network, - capability, - file, - umount, - - deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir) - # deny write to files not in /proc//** or /proc/sys/** - deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w, - deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel) - deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/ - deny @{PROC}/sysrq-trigger rwklx, - deny @{PROC}/kcore rwklx, - - deny mount, - - deny /sys/[^f]*/** wklx, - deny /sys/f[^s]*/** wklx, - deny /sys/fs/[^c]*/** wklx, - deny /sys/fs/c[^g]*/** wklx, - deny /sys/fs/cg[^r]*/** wklx, - deny /sys/firmware/** rwklx, - deny /sys/kernel/security/** rwklx, - -{{if ge .Version 208095}} - # suppress ptrace denials when using 'docker ps' or using 'ps' inside a container - ptrace (trace,read) peer={{.Name}}, -{{end}} -} -` diff --git a/vendor/github.com/docker/docker/profiles/seccomp/generate.go b/vendor/github.com/docker/docker/profiles/seccomp/generate.go deleted file mode 100644 index 32f22bb37..000000000 --- a/vendor/github.com/docker/docker/profiles/seccomp/generate.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build ignore - -package main - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/profiles/seccomp" -) - -// saves the default seccomp profile as a json file so people can use it as a -// base for their own custom profiles -func main() { - wd, err := os.Getwd() - if err != nil { - panic(err) - } - f := filepath.Join(wd, "default.json") - - // write the default profile to the file - b, err := json.MarshalIndent(seccomp.DefaultProfile(), "", "\t") - if err != nil { - panic(err) - } - - if err := ioutil.WriteFile(f, b, 0644); err != nil { - panic(err) - } -} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go deleted file mode 100644 index 4438670a5..000000000 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp.go +++ /dev/null @@ -1,160 +0,0 @@ -// +build linux - -package seccomp // import "github.com/docker/docker/profiles/seccomp" - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/opencontainers/runtime-spec/specs-go" - libseccomp "github.com/seccomp/libseccomp-golang" -) - -//go:generate go run -tags 'seccomp' generate.go - -// GetDefaultProfile returns the default seccomp profile. -func GetDefaultProfile(rs *specs.Spec) (*specs.LinuxSeccomp, error) { - return setupSeccomp(DefaultProfile(), rs) -} - -// LoadProfile takes a json string and decodes the seccomp profile. -func LoadProfile(body string, rs *specs.Spec) (*specs.LinuxSeccomp, error) { - var config types.Seccomp - if err := json.Unmarshal([]byte(body), &config); err != nil { - return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err) - } - return setupSeccomp(&config, rs) -} - -var nativeToSeccomp = map[string]types.Arch{ - "amd64": types.ArchX86_64, - "arm64": types.ArchAARCH64, - "mips64": types.ArchMIPS64, - "mips64n32": types.ArchMIPS64N32, - "mipsel64": types.ArchMIPSEL64, - "mipsel64n32": types.ArchMIPSEL64N32, - "s390x": types.ArchS390X, -} - -// inSlice tests whether a string is contained in a slice of strings or not. -// Comparison is case sensitive -func inSlice(slice []string, s string) bool { - for _, ss := range slice { - if s == ss { - return true - } - } - return false -} - -func setupSeccomp(config *types.Seccomp, rs *specs.Spec) (*specs.LinuxSeccomp, error) { - if config == nil { - return nil, nil - } - - // No default action specified, no syscalls listed, assume seccomp disabled - if config.DefaultAction == "" && len(config.Syscalls) == 0 { - return nil, nil - } - - newConfig := &specs.LinuxSeccomp{} - - var arch string - var native, err = libseccomp.GetNativeArch() - if err == nil { - arch = native.String() - } - - if len(config.Architectures) != 0 && len(config.ArchMap) != 0 { - return nil, errors.New("'architectures' and 'archMap' were specified in the seccomp profile, use either 'architectures' or 'archMap'") - } - - // if config.Architectures == 0 then libseccomp will figure out the architecture to use - if len(config.Architectures) != 0 { - for _, a := range config.Architectures { - newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a)) - } - } - - if len(config.ArchMap) != 0 { - for _, a := range config.ArchMap { - seccompArch, ok := nativeToSeccomp[arch] - if ok { - if a.Arch == seccompArch { - newConfig.Architectures = append(newConfig.Architectures, specs.Arch(a.Arch)) - for _, sa := range a.SubArches { - newConfig.Architectures = append(newConfig.Architectures, specs.Arch(sa)) - } - break - } - } - } - } - - newConfig.DefaultAction = specs.LinuxSeccompAction(config.DefaultAction) - -Loop: - // Loop through all syscall blocks and convert them to libcontainer format after filtering them - for _, call := range config.Syscalls { - if len(call.Excludes.Arches) > 0 { - if inSlice(call.Excludes.Arches, arch) { - continue Loop - } - } - if len(call.Excludes.Caps) > 0 { - for _, c := range call.Excludes.Caps { - if inSlice(rs.Process.Capabilities.Bounding, c) { - continue Loop - } - } - } - if len(call.Includes.Arches) > 0 { - if !inSlice(call.Includes.Arches, arch) { - continue Loop - } - } - if len(call.Includes.Caps) > 0 { - for _, c := range call.Includes.Caps { - if !inSlice(rs.Process.Capabilities.Bounding, c) { - continue Loop - } - } - } - - if call.Name != "" && len(call.Names) != 0 { - return nil, errors.New("'name' and 'names' were specified in the seccomp profile, use either 'name' or 'names'") - } - - if call.Name != "" { - newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(call.Name, call.Action, call.Args)) - } - - for _, n := range call.Names { - newConfig.Syscalls = append(newConfig.Syscalls, createSpecsSyscall(n, call.Action, call.Args)) - } - } - - return newConfig, nil -} - -func createSpecsSyscall(name string, action types.Action, args []*types.Arg) specs.LinuxSyscall { - newCall := specs.LinuxSyscall{ - Names: []string{name}, - Action: specs.LinuxSeccompAction(action), - } - - // Loop through all the arguments of the syscall and convert them - for _, arg := range args { - newArg := specs.LinuxSeccompArg{ - Index: arg.Index, - Value: arg.Value, - ValueTwo: arg.ValueTwo, - Op: specs.LinuxSeccompOperator(arg.Op), - } - - newCall.Args = append(newCall.Args, newArg) - } - return newCall -} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go deleted file mode 100644 index be29aa4f7..000000000 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_default.go +++ /dev/null @@ -1,640 +0,0 @@ -// +build linux,seccomp - -package seccomp // import "github.com/docker/docker/profiles/seccomp" - -import ( - "github.com/docker/docker/api/types" - "golang.org/x/sys/unix" -) - -func arches() []types.Architecture { - return []types.Architecture{ - { - Arch: types.ArchX86_64, - SubArches: []types.Arch{types.ArchX86, types.ArchX32}, - }, - { - Arch: types.ArchAARCH64, - SubArches: []types.Arch{types.ArchARM}, - }, - { - Arch: types.ArchMIPS64, - SubArches: []types.Arch{types.ArchMIPS, types.ArchMIPS64N32}, - }, - { - Arch: types.ArchMIPS64N32, - SubArches: []types.Arch{types.ArchMIPS, types.ArchMIPS64}, - }, - { - Arch: types.ArchMIPSEL64, - SubArches: []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64N32}, - }, - { - Arch: types.ArchMIPSEL64N32, - SubArches: []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64}, - }, - { - Arch: types.ArchS390X, - SubArches: []types.Arch{types.ArchS390}, - }, - } -} - -// DefaultProfile defines the whitelist for the default seccomp profile. -func DefaultProfile() *types.Seccomp { - syscalls := []*types.Syscall{ - { - Names: []string{ - "accept", - "accept4", - "access", - "adjtimex", - "alarm", - "bind", - "brk", - "capget", - "capset", - "chdir", - "chmod", - "chown", - "chown32", - "clock_getres", - "clock_gettime", - "clock_nanosleep", - "close", - "connect", - "copy_file_range", - "creat", - "dup", - "dup2", - "dup3", - "epoll_create", - "epoll_create1", - "epoll_ctl", - "epoll_ctl_old", - "epoll_pwait", - "epoll_wait", - "epoll_wait_old", - "eventfd", - "eventfd2", - "execve", - "execveat", - "exit", - "exit_group", - "faccessat", - "fadvise64", - "fadvise64_64", - "fallocate", - "fanotify_mark", - "fchdir", - "fchmod", - "fchmodat", - "fchown", - "fchown32", - "fchownat", - "fcntl", - "fcntl64", - "fdatasync", - "fgetxattr", - "flistxattr", - "flock", - "fork", - "fremovexattr", - "fsetxattr", - "fstat", - "fstat64", - "fstatat64", - "fstatfs", - "fstatfs64", - "fsync", - "ftruncate", - "ftruncate64", - "futex", - "futimesat", - "getcpu", - "getcwd", - "getdents", - "getdents64", - "getegid", - "getegid32", - "geteuid", - "geteuid32", - "getgid", - "getgid32", - "getgroups", - "getgroups32", - "getitimer", - "getpeername", - "getpgid", - "getpgrp", - "getpid", - "getppid", - "getpriority", - "getrandom", - "getresgid", - "getresgid32", - "getresuid", - "getresuid32", - "getrlimit", - "get_robust_list", - "getrusage", - "getsid", - "getsockname", - "getsockopt", - "get_thread_area", - "gettid", - "gettimeofday", - "getuid", - "getuid32", - "getxattr", - "inotify_add_watch", - "inotify_init", - "inotify_init1", - "inotify_rm_watch", - "io_cancel", - "ioctl", - "io_destroy", - "io_getevents", - "ioprio_get", - "ioprio_set", - "io_setup", - "io_submit", - "ipc", - "kill", - "lchown", - "lchown32", - "lgetxattr", - "link", - "linkat", - "listen", - "listxattr", - "llistxattr", - "_llseek", - "lremovexattr", - "lseek", - "lsetxattr", - "lstat", - "lstat64", - "madvise", - "memfd_create", - "mincore", - "mkdir", - "mkdirat", - "mknod", - "mknodat", - "mlock", - "mlock2", - "mlockall", - "mmap", - "mmap2", - "mprotect", - "mq_getsetattr", - "mq_notify", - "mq_open", - "mq_timedreceive", - "mq_timedsend", - "mq_unlink", - "mremap", - "msgctl", - "msgget", - "msgrcv", - "msgsnd", - "msync", - "munlock", - "munlockall", - "munmap", - "nanosleep", - "newfstatat", - "_newselect", - "open", - "openat", - "pause", - "pipe", - "pipe2", - "poll", - "ppoll", - "prctl", - "pread64", - "preadv", - "preadv2", - "prlimit64", - "pselect6", - "pwrite64", - "pwritev", - "pwritev2", - "read", - "readahead", - "readlink", - "readlinkat", - "readv", - "recv", - "recvfrom", - "recvmmsg", - "recvmsg", - "remap_file_pages", - "removexattr", - "rename", - "renameat", - "renameat2", - "restart_syscall", - "rmdir", - "rt_sigaction", - "rt_sigpending", - "rt_sigprocmask", - "rt_sigqueueinfo", - "rt_sigreturn", - "rt_sigsuspend", - "rt_sigtimedwait", - "rt_tgsigqueueinfo", - "sched_getaffinity", - "sched_getattr", - "sched_getparam", - "sched_get_priority_max", - "sched_get_priority_min", - "sched_getscheduler", - "sched_rr_get_interval", - "sched_setaffinity", - "sched_setattr", - "sched_setparam", - "sched_setscheduler", - "sched_yield", - "seccomp", - "select", - "semctl", - "semget", - "semop", - "semtimedop", - "send", - "sendfile", - "sendfile64", - "sendmmsg", - "sendmsg", - "sendto", - "setfsgid", - "setfsgid32", - "setfsuid", - "setfsuid32", - "setgid", - "setgid32", - "setgroups", - "setgroups32", - "setitimer", - "setpgid", - "setpriority", - "setregid", - "setregid32", - "setresgid", - "setresgid32", - "setresuid", - "setresuid32", - "setreuid", - "setreuid32", - "setrlimit", - "set_robust_list", - "setsid", - "setsockopt", - "set_thread_area", - "set_tid_address", - "setuid", - "setuid32", - "setxattr", - "shmat", - "shmctl", - "shmdt", - "shmget", - "shutdown", - "sigaltstack", - "signalfd", - "signalfd4", - "sigreturn", - "socket", - "socketcall", - "socketpair", - "splice", - "stat", - "stat64", - "statfs", - "statfs64", - "statx", - "symlink", - "symlinkat", - "sync", - "sync_file_range", - "syncfs", - "sysinfo", - "syslog", - "tee", - "tgkill", - "time", - "timer_create", - "timer_delete", - "timerfd_create", - "timerfd_gettime", - "timerfd_settime", - "timer_getoverrun", - "timer_gettime", - "timer_settime", - "times", - "tkill", - "truncate", - "truncate64", - "ugetrlimit", - "umask", - "uname", - "unlink", - "unlinkat", - "utime", - "utimensat", - "utimes", - "vfork", - "vmsplice", - "wait4", - "waitid", - "waitpid", - "write", - "writev", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Names: []string{"personality"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0x0, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"personality"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0x0008, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"personality"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0x20000, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"personality"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0x20008, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{"personality"}, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0xffffffff, - Op: types.OpEqualTo, - }, - }, - }, - { - Names: []string{ - "sync_file_range2", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Arches: []string{"ppc64le"}, - }, - }, - { - Names: []string{ - "arm_fadvise64_64", - "arm_sync_file_range", - "sync_file_range2", - "breakpoint", - "cacheflush", - "set_tls", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Arches: []string{"arm", "arm64"}, - }, - }, - { - Names: []string{ - "arch_prctl", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Arches: []string{"amd64", "x32"}, - }, - }, - { - Names: []string{ - "modify_ldt", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Arches: []string{"amd64", "x32", "x86"}, - }, - }, - { - Names: []string{ - "s390_pci_mmio_read", - "s390_pci_mmio_write", - "s390_runtime_instr", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Arches: []string{"s390", "s390x"}, - }, - }, - { - Names: []string{ - "open_by_handle_at", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_DAC_READ_SEARCH"}, - }, - }, - { - Names: []string{ - "bpf", - "clone", - "fanotify_init", - "lookup_dcookie", - "mount", - "name_to_handle_at", - "perf_event_open", - "quotactl", - "setdomainname", - "sethostname", - "setns", - "umount", - "umount2", - "unshare", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_ADMIN"}, - }, - }, - { - Names: []string{ - "clone", - }, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET, - ValueTwo: 0, - Op: types.OpMaskedEqual, - }, - }, - Excludes: types.Filter{ - Caps: []string{"CAP_SYS_ADMIN"}, - Arches: []string{"s390", "s390x"}, - }, - }, - { - Names: []string{ - "clone", - }, - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 1, - Value: unix.CLONE_NEWNS | unix.CLONE_NEWUTS | unix.CLONE_NEWIPC | unix.CLONE_NEWUSER | unix.CLONE_NEWPID | unix.CLONE_NEWNET, - ValueTwo: 0, - Op: types.OpMaskedEqual, - }, - }, - Comment: "s390 parameter ordering for clone is different", - Includes: types.Filter{ - Arches: []string{"s390", "s390x"}, - }, - Excludes: types.Filter{ - Caps: []string{"CAP_SYS_ADMIN"}, - }, - }, - { - Names: []string{ - "reboot", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_BOOT"}, - }, - }, - { - Names: []string{ - "chroot", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_CHROOT"}, - }, - }, - { - Names: []string{ - "delete_module", - "init_module", - "finit_module", - "query_module", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_MODULE"}, - }, - }, - { - Names: []string{ - "acct", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_PACCT"}, - }, - }, - { - Names: []string{ - "kcmp", - "process_vm_readv", - "process_vm_writev", - "ptrace", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_PTRACE"}, - }, - }, - { - Names: []string{ - "iopl", - "ioperm", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_RAWIO"}, - }, - }, - { - Names: []string{ - "settimeofday", - "stime", - "clock_settime", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_TIME"}, - }, - }, - { - Names: []string{ - "vhangup", - }, - Action: types.ActAllow, - Args: []*types.Arg{}, - Includes: types.Filter{ - Caps: []string{"CAP_SYS_TTY_CONFIG"}, - }, - }, - } - - return &types.Seccomp{ - DefaultAction: types.ActErrno, - ArchMap: arches(), - Syscalls: syscalls, - } -} diff --git a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go b/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go deleted file mode 100644 index 67e06401f..000000000 --- a/vendor/github.com/docker/docker/profiles/seccomp/seccomp_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build linux,!seccomp - -package seccomp // import "github.com/docker/docker/profiles/seccomp" - -import ( - "github.com/docker/docker/api/types" -) - -// DefaultProfile returns a nil pointer on unsupported systems. -func DefaultProfile() *types.Seccomp { - return nil -} diff --git a/vendor/github.com/docker/docker/reference/errors.go b/vendor/github.com/docker/docker/reference/errors.go deleted file mode 100644 index 2d294c672..000000000 --- a/vendor/github.com/docker/docker/reference/errors.go +++ /dev/null @@ -1,25 +0,0 @@ -package reference // import "github.com/docker/docker/reference" - -type notFoundError string - -func (e notFoundError) Error() string { - return string(e) -} - -func (notFoundError) NotFound() {} - -type invalidTagError string - -func (e invalidTagError) Error() string { - return string(e) -} - -func (invalidTagError) InvalidParameter() {} - -type conflictingTagError string - -func (e conflictingTagError) Error() string { - return string(e) -} - -func (conflictingTagError) Conflict() {} diff --git a/vendor/github.com/docker/docker/reference/store.go b/vendor/github.com/docker/docker/reference/store.go deleted file mode 100644 index b01051bf5..000000000 --- a/vendor/github.com/docker/docker/reference/store.go +++ /dev/null @@ -1,343 +0,0 @@ -package reference // import "github.com/docker/docker/reference" - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "sort" - "sync" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/pkg/ioutils" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -var ( - // ErrDoesNotExist is returned if a reference is not found in the - // store. - ErrDoesNotExist notFoundError = "reference does not exist" -) - -// An Association is a tuple associating a reference with an image ID. -type Association struct { - Ref reference.Named - ID digest.Digest -} - -// Store provides the set of methods which can operate on a reference store. -type Store interface { - References(id digest.Digest) []reference.Named - ReferencesByName(ref reference.Named) []Association - AddTag(ref reference.Named, id digest.Digest, force bool) error - AddDigest(ref reference.Canonical, id digest.Digest, force bool) error - Delete(ref reference.Named) (bool, error) - Get(ref reference.Named) (digest.Digest, error) -} - -type store struct { - mu sync.RWMutex - // jsonPath is the path to the file where the serialized tag data is - // stored. - jsonPath string - // Repositories is a map of repositories, indexed by name. - Repositories map[string]repository - // referencesByIDCache is a cache of references indexed by ID, to speed - // up References. - referencesByIDCache map[digest.Digest]map[string]reference.Named -} - -// Repository maps tags to digests. The key is a stringified Reference, -// including the repository name. -type repository map[string]digest.Digest - -type lexicalRefs []reference.Named - -func (a lexicalRefs) Len() int { return len(a) } -func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a lexicalRefs) Less(i, j int) bool { - return a[i].String() < a[j].String() -} - -type lexicalAssociations []Association - -func (a lexicalAssociations) Len() int { return len(a) } -func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a lexicalAssociations) Less(i, j int) bool { - return a[i].Ref.String() < a[j].Ref.String() -} - -// NewReferenceStore creates a new reference store, tied to a file path where -// the set of references are serialized in JSON format. -func NewReferenceStore(jsonPath string) (Store, error) { - abspath, err := filepath.Abs(jsonPath) - if err != nil { - return nil, err - } - - store := &store{ - jsonPath: abspath, - Repositories: make(map[string]repository), - referencesByIDCache: make(map[digest.Digest]map[string]reference.Named), - } - // Load the json file if it exists, otherwise create it. - if err := store.reload(); os.IsNotExist(err) { - if err := store.save(); err != nil { - return nil, err - } - } else if err != nil { - return nil, err - } - return store, nil -} - -// AddTag adds a tag reference to the store. If force is set to true, existing -// references can be overwritten. This only works for tags, not digests. -func (store *store) AddTag(ref reference.Named, id digest.Digest, force bool) error { - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return errors.WithStack(invalidTagError("refusing to create a tag with a digest reference")) - } - return store.addReference(reference.TagNameOnly(ref), id, force) -} - -// AddDigest adds a digest reference to the store. -func (store *store) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error { - return store.addReference(ref, id, force) -} - -func favorDigest(originalRef reference.Named) (reference.Named, error) { - ref := originalRef - // If the reference includes a digest and a tag, we must store only the - // digest. - canonical, isCanonical := originalRef.(reference.Canonical) - _, isNamedTagged := originalRef.(reference.NamedTagged) - - if isCanonical && isNamedTagged { - trimmed, err := reference.WithDigest(reference.TrimNamed(canonical), canonical.Digest()) - if err != nil { - // should never happen - return originalRef, err - } - ref = trimmed - } - return ref, nil -} - -func (store *store) addReference(ref reference.Named, id digest.Digest, force bool) error { - ref, err := favorDigest(ref) - if err != nil { - return err - } - - refName := reference.FamiliarName(ref) - refStr := reference.FamiliarString(ref) - - if refName == string(digest.Canonical) { - return errors.WithStack(invalidTagError("refusing to create an ambiguous tag using digest algorithm as name")) - } - - store.mu.Lock() - defer store.mu.Unlock() - - repository, exists := store.Repositories[refName] - if !exists || repository == nil { - repository = make(map[string]digest.Digest) - store.Repositories[refName] = repository - } - - oldID, exists := repository[refStr] - - if exists { - // force only works for tags - if digested, isDigest := ref.(reference.Canonical); isDigest { - return errors.WithStack(conflictingTagError("Cannot overwrite digest " + digested.Digest().String())) - } - - if !force { - return errors.WithStack( - conflictingTagError( - fmt.Sprintf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use the force option", refStr, oldID.String()), - ), - ) - } - - if store.referencesByIDCache[oldID] != nil { - delete(store.referencesByIDCache[oldID], refStr) - if len(store.referencesByIDCache[oldID]) == 0 { - delete(store.referencesByIDCache, oldID) - } - } - } - - repository[refStr] = id - if store.referencesByIDCache[id] == nil { - store.referencesByIDCache[id] = make(map[string]reference.Named) - } - store.referencesByIDCache[id][refStr] = ref - - return store.save() -} - -// Delete deletes a reference from the store. It returns true if a deletion -// happened, or false otherwise. -func (store *store) Delete(ref reference.Named) (bool, error) { - ref, err := favorDigest(ref) - if err != nil { - return false, err - } - - ref = reference.TagNameOnly(ref) - - refName := reference.FamiliarName(ref) - refStr := reference.FamiliarString(ref) - - store.mu.Lock() - defer store.mu.Unlock() - - repository, exists := store.Repositories[refName] - if !exists { - return false, ErrDoesNotExist - } - - if id, exists := repository[refStr]; exists { - delete(repository, refStr) - if len(repository) == 0 { - delete(store.Repositories, refName) - } - if store.referencesByIDCache[id] != nil { - delete(store.referencesByIDCache[id], refStr) - if len(store.referencesByIDCache[id]) == 0 { - delete(store.referencesByIDCache, id) - } - } - return true, store.save() - } - - return false, ErrDoesNotExist -} - -// Get retrieves an item from the store by reference -func (store *store) Get(ref reference.Named) (digest.Digest, error) { - if canonical, ok := ref.(reference.Canonical); ok { - // If reference contains both tag and digest, only - // lookup by digest as it takes precedence over - // tag, until tag/digest combos are stored. - if _, ok := ref.(reference.Tagged); ok { - var err error - ref, err = reference.WithDigest(reference.TrimNamed(canonical), canonical.Digest()) - if err != nil { - return "", err - } - } - } else { - ref = reference.TagNameOnly(ref) - } - - refName := reference.FamiliarName(ref) - refStr := reference.FamiliarString(ref) - - store.mu.RLock() - defer store.mu.RUnlock() - - repository, exists := store.Repositories[refName] - if !exists || repository == nil { - return "", ErrDoesNotExist - } - - id, exists := repository[refStr] - if !exists { - return "", ErrDoesNotExist - } - - return id, nil -} - -// References returns a slice of references to the given ID. The slice -// will be nil if there are no references to this ID. -func (store *store) References(id digest.Digest) []reference.Named { - store.mu.RLock() - defer store.mu.RUnlock() - - // Convert the internal map to an array for two reasons: - // 1) We must not return a mutable - // 2) It would be ugly to expose the extraneous map keys to callers. - - var references []reference.Named - for _, ref := range store.referencesByIDCache[id] { - references = append(references, ref) - } - - sort.Sort(lexicalRefs(references)) - - return references -} - -// ReferencesByName returns the references for a given repository name. -// If there are no references known for this repository name, -// ReferencesByName returns nil. -func (store *store) ReferencesByName(ref reference.Named) []Association { - refName := reference.FamiliarName(ref) - - store.mu.RLock() - defer store.mu.RUnlock() - - repository, exists := store.Repositories[refName] - if !exists { - return nil - } - - var associations []Association - for refStr, refID := range repository { - ref, err := reference.ParseNormalizedNamed(refStr) - if err != nil { - // Should never happen - return nil - } - associations = append(associations, - Association{ - Ref: ref, - ID: refID, - }) - } - - sort.Sort(lexicalAssociations(associations)) - - return associations -} - -func (store *store) save() error { - // Store the json - jsonData, err := json.Marshal(store) - if err != nil { - return err - } - return ioutils.AtomicWriteFile(store.jsonPath, jsonData, 0600) -} - -func (store *store) reload() error { - f, err := os.Open(store.jsonPath) - if err != nil { - return err - } - defer f.Close() - if err := json.NewDecoder(f).Decode(&store); err != nil { - return err - } - - for _, repository := range store.Repositories { - for refStr, refID := range repository { - ref, err := reference.ParseNormalizedNamed(refStr) - if err != nil { - // Should never happen - continue - } - if store.referencesByIDCache[refID] == nil { - store.referencesByIDCache[refID] = make(map[string]reference.Named) - } - store.referencesByIDCache[refID][refStr] = ref - } - } - - return nil -} diff --git a/vendor/github.com/docker/docker/registry/auth.go b/vendor/github.com/docker/docker/registry/auth.go deleted file mode 100644 index 1f2043a0d..000000000 --- a/vendor/github.com/docker/docker/registry/auth.go +++ /dev/null @@ -1,296 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "io/ioutil" - "net/http" - "net/url" - "strings" - "time" - - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/auth/challenge" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - // AuthClientID is used the ClientID used for the token server - AuthClientID = "docker" -) - -// loginV1 tries to register/login to the v1 registry server. -func loginV1(authConfig *types.AuthConfig, apiEndpoint APIEndpoint, userAgent string) (string, string, error) { - registryEndpoint := apiEndpoint.ToV1Endpoint(userAgent, nil) - serverAddress := registryEndpoint.String() - - logrus.Debugf("attempting v1 login to registry endpoint %s", serverAddress) - - if serverAddress == "" { - return "", "", errdefs.System(errors.New("server Error: Server Address not set")) - } - - req, err := http.NewRequest("GET", serverAddress+"users/", nil) - if err != nil { - return "", "", err - } - req.SetBasicAuth(authConfig.Username, authConfig.Password) - resp, err := registryEndpoint.client.Do(req) - if err != nil { - // fallback when request could not be completed - return "", "", fallbackError{ - err: err, - } - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return "", "", errdefs.System(err) - } - - switch resp.StatusCode { - case http.StatusOK: - return "Login Succeeded", "", nil - case http.StatusUnauthorized: - return "", "", errdefs.Unauthorized(errors.New("Wrong login/password, please try again")) - case http.StatusForbidden: - // *TODO: Use registry configuration to determine what this says, if anything? - return "", "", errdefs.Forbidden(errors.Errorf("Login: Account is not active. Please see the documentation of the registry %s for instructions how to activate it.", serverAddress)) - case http.StatusInternalServerError: - logrus.Errorf("%s returned status code %d. Response Body :\n%s", req.URL.String(), resp.StatusCode, body) - return "", "", errdefs.System(errors.New("Internal Server Error")) - } - return "", "", errdefs.System(errors.Errorf("Login: %s (Code: %d; Headers: %s)", body, - resp.StatusCode, resp.Header)) -} - -type loginCredentialStore struct { - authConfig *types.AuthConfig -} - -func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { - return lcs.authConfig.Username, lcs.authConfig.Password -} - -func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { - return lcs.authConfig.IdentityToken -} - -func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { - lcs.authConfig.IdentityToken = token -} - -type staticCredentialStore struct { - auth *types.AuthConfig -} - -// NewStaticCredentialStore returns a credential store -// which always returns the same credential values. -func NewStaticCredentialStore(auth *types.AuthConfig) auth.CredentialStore { - return staticCredentialStore{ - auth: auth, - } -} - -func (scs staticCredentialStore) Basic(*url.URL) (string, string) { - if scs.auth == nil { - return "", "" - } - return scs.auth.Username, scs.auth.Password -} - -func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { - if scs.auth == nil { - return "" - } - return scs.auth.IdentityToken -} - -func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { -} - -type fallbackError struct { - err error -} - -func (err fallbackError) Error() string { - return err.err.Error() -} - -// loginV2 tries to login to the v2 registry server. The given registry -// endpoint will be pinged to get authorization challenges. These challenges -// will be used to authenticate against the registry to validate credentials. -func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { - logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/") - - modifiers := Headers(userAgent, nil) - authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...) - - credentialAuthConfig := *authConfig - creds := loginCredentialStore{ - authConfig: &credentialAuthConfig, - } - - loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) - if err != nil { - return "", "", err - } - - endpointStr := strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" - req, err := http.NewRequest("GET", endpointStr, nil) - if err != nil { - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err - } - - resp, err := loginClient.Do(req) - if err != nil { - err = translateV2AuthError(err) - if !foundV2 { - err = fallbackError{err: err} - } - - return "", "", err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusOK { - return "Login Succeeded", credentialAuthConfig.IdentityToken, nil - } - - // TODO(dmcgowan): Attempt to further interpret result, status code and error code string - err = errors.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) - if !foundV2 { - err = fallbackError{err: err} - } - return "", "", err -} - -func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) { - challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport) - if err != nil { - if !foundV2 { - err = fallbackError{err: err} - } - return nil, foundV2, err - } - - tokenHandlerOptions := auth.TokenHandlerOptions{ - Transport: authTransport, - Credentials: creds, - OfflineAccess: true, - ClientID: AuthClientID, - Scopes: scopes, - } - tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) - tr := transport.NewTransport(authTransport, modifiers...) - - return &http.Client{ - Transport: tr, - Timeout: 15 * time.Second, - }, foundV2, nil - -} - -// ConvertToHostname converts a registry url which has http|https prepended -// to just an hostname. -func ConvertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.TrimPrefix(url, "http://") - } else if strings.HasPrefix(url, "https://") { - stripped = strings.TrimPrefix(url, "https://") - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] -} - -// ResolveAuthConfig matches an auth configuration to a server address or a URL -func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { - configKey := GetAuthConfigKey(index) - // First try the happy case - if c, found := authConfigs[configKey]; found || index.Official { - return c - } - - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for registry, ac := range authConfigs { - if configKey == ConvertToHostname(registry) { - return ac - } - } - - // When all else fails, return an empty auth config - return types.AuthConfig{} -} - -// PingResponseError is used when the response from a ping -// was received but invalid. -type PingResponseError struct { - Err error -} - -func (err PingResponseError) Error() string { - return err.Err.Error() -} - -// PingV2Registry attempts to ping a v2 registry and on success return a -// challenge manager for the supported authentication types and -// whether v2 was confirmed by the response. If a response is received but -// cannot be interpreted a PingResponseError will be returned. -// nolint: interfacer -func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) { - var ( - foundV2 = false - v2Version = auth.APIVersion{ - Type: "registry", - Version: "2.0", - } - ) - - pingClient := &http.Client{ - Transport: transport, - Timeout: 15 * time.Second, - } - endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" - req, err := http.NewRequest("GET", endpointStr, nil) - if err != nil { - return nil, false, err - } - resp, err := pingClient.Do(req) - if err != nil { - return nil, false, err - } - defer resp.Body.Close() - - versions := auth.APIVersions(resp, DefaultRegistryVersionHeader) - for _, pingVersion := range versions { - if pingVersion == v2Version { - // The version header indicates we're definitely - // talking to a v2 registry. So don't allow future - // fallbacks to the v1 protocol. - - foundV2 = true - break - } - } - - challengeManager := challenge.NewSimpleManager() - if err := challengeManager.AddResponse(resp); err != nil { - return nil, foundV2, PingResponseError{ - Err: err, - } - } - - return challengeManager, foundV2, nil -} diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go deleted file mode 100644 index de5a526b6..000000000 --- a/vendor/github.com/docker/docker/registry/config.go +++ /dev/null @@ -1,442 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "fmt" - "net" - "net/url" - "regexp" - "strconv" - "strings" - - "github.com/docker/distribution/reference" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// ServiceOptions holds command line options. -type ServiceOptions struct { - AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"` - Mirrors []string `json:"registry-mirrors,omitempty"` - InsecureRegistries []string `json:"insecure-registries,omitempty"` - - // V2Only controls access to legacy registries. If it is set to true via the - // command line flag the daemon will not attempt to contact v1 legacy registries - V2Only bool `json:"disable-legacy-registry,omitempty"` -} - -// serviceConfig holds daemon configuration for the registry service. -type serviceConfig struct { - registrytypes.ServiceConfig - V2Only bool -} - -var ( - // DefaultNamespace is the default namespace - DefaultNamespace = "docker.io" - // DefaultRegistryVersionHeader is the name of the default HTTP header - // that carries Registry version info - DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" - - // IndexHostname is the index hostname - IndexHostname = "index.docker.io" - // IndexServer is used for user auth and image search - IndexServer = "https://" + IndexHostname + "/v1/" - // IndexName is the name of the index - IndexName = "docker.io" - - // DefaultV2Registry is the URI of the default v2 registry - DefaultV2Registry = &url.URL{ - Scheme: "https", - Host: "registry-1.docker.io", - } -) - -var ( - // ErrInvalidRepositoryName is an error returned if the repository name did - // not have the correct form - ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") - - emptyServiceConfig, _ = newServiceConfig(ServiceOptions{}) -) - -var ( - validHostPortRegex = regexp.MustCompile(`^` + reference.DomainRegexp.String() + `$`) -) - -// for mocking in unit tests -var lookupIP = net.LookupIP - -// newServiceConfig returns a new instance of ServiceConfig -func newServiceConfig(options ServiceOptions) (*serviceConfig, error) { - config := &serviceConfig{ - ServiceConfig: registrytypes.ServiceConfig{ - InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), - IndexConfigs: make(map[string]*registrytypes.IndexInfo), - // Hack: Bypass setting the mirrors to IndexConfigs since they are going away - // and Mirrors are only for the official registry anyways. - }, - V2Only: options.V2Only, - } - if err := config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts); err != nil { - return nil, err - } - if err := config.LoadMirrors(options.Mirrors); err != nil { - return nil, err - } - if err := config.LoadInsecureRegistries(options.InsecureRegistries); err != nil { - return nil, err - } - - return config, nil -} - -// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries into config. -func (config *serviceConfig) LoadAllowNondistributableArtifacts(registries []string) error { - cidrs := map[string]*registrytypes.NetIPNet{} - hostnames := map[string]bool{} - - for _, r := range registries { - if _, err := ValidateIndexName(r); err != nil { - return err - } - if validateNoScheme(r) != nil { - return fmt.Errorf("allow-nondistributable-artifacts registry %s should not contain '://'", r) - } - - if _, ipnet, err := net.ParseCIDR(r); err == nil { - // Valid CIDR. - cidrs[ipnet.String()] = (*registrytypes.NetIPNet)(ipnet) - } else if err := validateHostPort(r); err == nil { - // Must be `host:port` if not CIDR. - hostnames[r] = true - } else { - return fmt.Errorf("allow-nondistributable-artifacts registry %s is not valid: %v", r, err) - } - } - - config.AllowNondistributableArtifactsCIDRs = make([]*(registrytypes.NetIPNet), 0) - for _, c := range cidrs { - config.AllowNondistributableArtifactsCIDRs = append(config.AllowNondistributableArtifactsCIDRs, c) - } - - config.AllowNondistributableArtifactsHostnames = make([]string, 0) - for h := range hostnames { - config.AllowNondistributableArtifactsHostnames = append(config.AllowNondistributableArtifactsHostnames, h) - } - - return nil -} - -// LoadMirrors loads mirrors to config, after removing duplicates. -// Returns an error if mirrors contains an invalid mirror. -func (config *serviceConfig) LoadMirrors(mirrors []string) error { - mMap := map[string]struct{}{} - unique := []string{} - - for _, mirror := range mirrors { - m, err := ValidateMirror(mirror) - if err != nil { - return err - } - if _, exist := mMap[m]; !exist { - mMap[m] = struct{}{} - unique = append(unique, m) - } - } - - config.Mirrors = unique - - // Configure public registry since mirrors may have changed. - config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ - Name: IndexName, - Mirrors: config.Mirrors, - Secure: true, - Official: true, - } - - return nil -} - -// LoadInsecureRegistries loads insecure registries to config -func (config *serviceConfig) LoadInsecureRegistries(registries []string) error { - // Localhost is by default considered as an insecure registry - // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). - // - // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change - // daemon flags on boot2docker? - registries = append(registries, "127.0.0.0/8") - - // Store original InsecureRegistryCIDRs and IndexConfigs - // Clean InsecureRegistryCIDRs and IndexConfigs in config, as passed registries has all insecure registry info. - originalCIDRs := config.ServiceConfig.InsecureRegistryCIDRs - originalIndexInfos := config.ServiceConfig.IndexConfigs - - config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0) - config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo) - -skip: - for _, r := range registries { - // validate insecure registry - if _, err := ValidateIndexName(r); err != nil { - // before returning err, roll back to original data - config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs - config.ServiceConfig.IndexConfigs = originalIndexInfos - return err - } - if strings.HasPrefix(strings.ToLower(r), "http://") { - logrus.Warnf("insecure registry %s should not contain 'http://' and 'http://' has been removed from the insecure registry config", r) - r = r[7:] - } else if strings.HasPrefix(strings.ToLower(r), "https://") { - logrus.Warnf("insecure registry %s should not contain 'https://' and 'https://' has been removed from the insecure registry config", r) - r = r[8:] - } else if validateNoScheme(r) != nil { - // Insecure registry should not contain '://' - // before returning err, roll back to original data - config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs - config.ServiceConfig.IndexConfigs = originalIndexInfos - return fmt.Errorf("insecure registry %s should not contain '://'", r) - } - // Check if CIDR was passed to --insecure-registry - _, ipnet, err := net.ParseCIDR(r) - if err == nil { - // Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip. - data := (*registrytypes.NetIPNet)(ipnet) - for _, value := range config.InsecureRegistryCIDRs { - if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() { - continue skip - } - } - // ipnet is not found, add it in config.InsecureRegistryCIDRs - config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, data) - - } else { - if err := validateHostPort(r); err != nil { - config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs - config.ServiceConfig.IndexConfigs = originalIndexInfos - return fmt.Errorf("insecure registry %s is not valid: %v", r, err) - - } - // Assume `host:port` if not CIDR. - config.IndexConfigs[r] = ®istrytypes.IndexInfo{ - Name: r, - Mirrors: make([]string, 0), - Secure: false, - Official: false, - } - } - } - - // Configure public registry. - config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ - Name: IndexName, - Mirrors: config.Mirrors, - Secure: true, - Official: true, - } - - return nil -} - -// allowNondistributableArtifacts returns true if the provided hostname is part of the list of registries -// that allow push of nondistributable artifacts. -// -// The list can contain elements with CIDR notation to specify a whole subnet. If the subnet contains an IP -// of the registry specified by hostname, true is returned. -// -// hostname should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name -// or an IP address. If it is a domain name, then it will be resolved to IP addresses for matching. If -// resolution fails, CIDR matching is not performed. -func allowNondistributableArtifacts(config *serviceConfig, hostname string) bool { - for _, h := range config.AllowNondistributableArtifactsHostnames { - if h == hostname { - return true - } - } - - return isCIDRMatch(config.AllowNondistributableArtifactsCIDRs, hostname) -} - -// isSecureIndex returns false if the provided indexName is part of the list of insecure registries -// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. -// -// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. -// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered -// insecure. -// -// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name -// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained -// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element -// of insecureRegistries. -func isSecureIndex(config *serviceConfig, indexName string) bool { - // Check for configured index, first. This is needed in case isSecureIndex - // is called from anything besides newIndexInfo, in order to honor per-index configurations. - if index, ok := config.IndexConfigs[indexName]; ok { - return index.Secure - } - - return !isCIDRMatch(config.InsecureRegistryCIDRs, indexName) -} - -// isCIDRMatch returns true if URLHost matches an element of cidrs. URLHost is a URL.Host (`host:port` or `host`) -// where the `host` part can be either a domain name or an IP address. If it is a domain name, then it will be -// resolved to IP addresses for matching. If resolution fails, false is returned. -func isCIDRMatch(cidrs []*registrytypes.NetIPNet, URLHost string) bool { - host, _, err := net.SplitHostPort(URLHost) - if err != nil { - // Assume URLHost is of the form `host` without the port and go on. - host = URLHost - } - - addrs, err := lookupIP(host) - if err != nil { - ip := net.ParseIP(host) - if ip != nil { - addrs = []net.IP{ip} - } - - // if ip == nil, then `host` is neither an IP nor it could be looked up, - // either because the index is unreachable, or because the index is behind an HTTP proxy. - // So, len(addrs) == 0 and we're not aborting. - } - - // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. - for _, addr := range addrs { - for _, ipnet := range cidrs { - // check if the addr falls in the subnet - if (*net.IPNet)(ipnet).Contains(addr) { - return true - } - } - } - - return false -} - -// ValidateMirror validates an HTTP(S) registry mirror -func ValidateMirror(val string) (string, error) { - uri, err := url.Parse(val) - if err != nil { - return "", fmt.Errorf("invalid mirror: %q is not a valid URI", val) - } - if uri.Scheme != "http" && uri.Scheme != "https" { - return "", fmt.Errorf("invalid mirror: unsupported scheme %q in %q", uri.Scheme, uri) - } - if (uri.Path != "" && uri.Path != "/") || uri.RawQuery != "" || uri.Fragment != "" { - return "", fmt.Errorf("invalid mirror: path, query, or fragment at end of the URI %q", uri) - } - if uri.User != nil { - // strip password from output - uri.User = url.UserPassword(uri.User.Username(), "xxxxx") - return "", fmt.Errorf("invalid mirror: username/password not allowed in URI %q", uri) - } - return strings.TrimSuffix(val, "/") + "/", nil -} - -// ValidateIndexName validates an index name. -func ValidateIndexName(val string) (string, error) { - // TODO: upstream this to check to reference package - if val == "index.docker.io" { - val = "docker.io" - } - if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { - return "", fmt.Errorf("invalid index name (%s). Cannot begin or end with a hyphen", val) - } - return val, nil -} - -func validateNoScheme(reposName string) error { - if strings.Contains(reposName, "://") { - // It cannot contain a scheme! - return ErrInvalidRepositoryName - } - return nil -} - -func validateHostPort(s string) error { - // Split host and port, and in case s can not be splitted, assume host only - host, port, err := net.SplitHostPort(s) - if err != nil { - host = s - port = "" - } - // If match against the `host:port` pattern fails, - // it might be `IPv6:port`, which will be captured by net.ParseIP(host) - if !validHostPortRegex.MatchString(s) && net.ParseIP(host) == nil { - return fmt.Errorf("invalid host %q", host) - } - if port != "" { - v, err := strconv.Atoi(port) - if err != nil { - return err - } - if v < 0 || v > 65535 { - return fmt.Errorf("invalid port %q", port) - } - } - return nil -} - -// newIndexInfo returns IndexInfo configuration from indexName -func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) { - var err error - indexName, err = ValidateIndexName(indexName) - if err != nil { - return nil, err - } - - // Return any configured index info, first. - if index, ok := config.IndexConfigs[indexName]; ok { - return index, nil - } - - // Construct a non-configured index info. - index := ®istrytypes.IndexInfo{ - Name: indexName, - Mirrors: make([]string, 0), - Official: false, - } - index.Secure = isSecureIndex(config, indexName) - return index, nil -} - -// GetAuthConfigKey special-cases using the full index address of the official -// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. -func GetAuthConfigKey(index *registrytypes.IndexInfo) string { - if index.Official { - return IndexServer - } - return index.Name -} - -// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo -func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { - index, err := newIndexInfo(config, reference.Domain(name)) - if err != nil { - return nil, err - } - official := !strings.ContainsRune(reference.FamiliarName(name), '/') - - return &RepositoryInfo{ - Name: reference.TrimNamed(name), - Index: index, - Official: official, - }, nil -} - -// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but -// lacks registry configuration. -func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { - return newRepositoryInfo(emptyServiceConfig, reposName) -} - -// ParseSearchIndexInfo will use repository name to get back an indexInfo. -func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { - indexName, _ := splitReposSearchTerm(reposName) - - indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) - if err != nil { - return nil, err - } - return indexInfo, nil -} diff --git a/vendor/github.com/docker/docker/registry/config_unix.go b/vendor/github.com/docker/docker/registry/config_unix.go deleted file mode 100644 index 20fb47bca..000000000 --- a/vendor/github.com/docker/docker/registry/config_unix.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !windows - -package registry // import "github.com/docker/docker/registry" - -var ( - // CertsDir is the directory where certificates are stored - CertsDir = "/etc/docker/certs.d" -) - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:/index.docker.io/v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return s -} diff --git a/vendor/github.com/docker/docker/registry/config_windows.go b/vendor/github.com/docker/docker/registry/config_windows.go deleted file mode 100644 index 6de0508f8..000000000 --- a/vendor/github.com/docker/docker/registry/config_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "os" - "path/filepath" - "strings" -) - -// CertsDir is the directory where certificates are stored -var CertsDir = os.Getenv("programdata") + `\docker\certs.d` - -// cleanPath is used to ensure that a directory name is valid on the target -// platform. It will be passed in something *similar* to a URL such as -// https:\index.docker.io\v1. Not all platforms support directory names -// which contain those characters (such as : on Windows) -func cleanPath(s string) string { - return filepath.FromSlash(strings.Replace(s, ":", "", -1)) -} diff --git a/vendor/github.com/docker/docker/registry/endpoint_v1.go b/vendor/github.com/docker/docker/registry/endpoint_v1.go deleted file mode 100644 index 832fdb95a..000000000 --- a/vendor/github.com/docker/docker/registry/endpoint_v1.go +++ /dev/null @@ -1,198 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "crypto/tls" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/docker/distribution/registry/client/transport" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/sirupsen/logrus" -) - -// V1Endpoint stores basic information about a V1 registry endpoint. -type V1Endpoint struct { - client *http.Client - URL *url.URL - IsSecure bool -} - -// NewV1Endpoint parses the given address to return a registry endpoint. -func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - tlsConfig, err := newTLSConfig(index.Name, index.Secure) - if err != nil { - return nil, err - } - - endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) - if err != nil { - return nil, err - } - - if err := validateEndpoint(endpoint); err != nil { - return nil, err - } - - return endpoint, nil -} - -func validateEndpoint(endpoint *V1Endpoint) error { - logrus.Debugf("pinging registry endpoint %s", endpoint) - - // Try HTTPS ping to registry - endpoint.URL.Scheme = "https" - if _, err := endpoint.Ping(); err != nil { - if endpoint.IsSecure { - // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` - // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. - return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) - } - - // If registry is insecure and HTTPS failed, fallback to HTTP. - logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) - endpoint.URL.Scheme = "http" - - var err2 error - if _, err2 = endpoint.Ping(); err2 == nil { - return nil - } - - return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) - } - - return nil -} - -func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) *V1Endpoint { - endpoint := &V1Endpoint{ - IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify, - URL: new(url.URL), - } - - *endpoint.URL = address - - // TODO(tiborvass): make sure a ConnectTimeout transport is used - tr := NewTransport(tlsConfig) - endpoint.client = HTTPClient(transport.NewTransport(tr, Headers(userAgent, metaHeaders)...)) - return endpoint -} - -// trimV1Address trims the version off the address and returns the -// trimmed address or an error if there is a non-V1 version. -func trimV1Address(address string) (string, error) { - var ( - chunks []string - apiVersionStr string - ) - - if strings.HasSuffix(address, "/") { - address = address[:len(address)-1] - } - - chunks = strings.Split(address, "/") - apiVersionStr = chunks[len(chunks)-1] - if apiVersionStr == "v1" { - return strings.Join(chunks[:len(chunks)-1], "/"), nil - } - - for k, v := range apiVersions { - if k != APIVersion1 && apiVersionStr == v { - return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr) - } - } - - return address, nil -} - -func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { - if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { - address = "https://" + address - } - - address, err := trimV1Address(address) - if err != nil { - return nil, err - } - - uri, err := url.Parse(address) - if err != nil { - return nil, err - } - - endpoint := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) - if err != nil { - return nil, err - } - - return endpoint, nil -} - -// Get the formatted URL for the root of this registry Endpoint -func (e *V1Endpoint) String() string { - return e.URL.String() + "/v1/" -} - -// Path returns a formatted string for the URL -// of this endpoint with the given path appended. -func (e *V1Endpoint) Path(path string) string { - return e.URL.String() + "/v1/" + path -} - -// Ping returns a PingResult which indicates whether the registry is standalone or not. -func (e *V1Endpoint) Ping() (PingResult, error) { - logrus.Debugf("attempting v1 ping for registry endpoint %s", e) - - if e.String() == IndexServer { - // Skip the check, we know this one is valid - // (and we never want to fallback to http in case of error) - return PingResult{Standalone: false}, nil - } - - req, err := http.NewRequest("GET", e.Path("_ping"), nil) - if err != nil { - return PingResult{Standalone: false}, err - } - - resp, err := e.client.Do(req) - if err != nil { - return PingResult{Standalone: false}, err - } - - defer resp.Body.Close() - - jsonString, err := ioutil.ReadAll(resp.Body) - if err != nil { - return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) - } - - // If the header is absent, we assume true for compatibility with earlier - // versions of the registry. default to true - info := PingResult{ - Standalone: true, - } - if err := json.Unmarshal(jsonString, &info); err != nil { - logrus.Debugf("Error unmarshaling the _ping PingResult: %s", err) - // don't stop here. Just assume sane defaults - } - if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { - logrus.Debugf("Registry version header: '%s'", hdr) - info.Version = hdr - } - logrus.Debugf("PingResult.Version: %q", info.Version) - - standalone := resp.Header.Get("X-Docker-Registry-Standalone") - logrus.Debugf("Registry standalone header: '%s'", standalone) - // Accepted values are "true" (case-insensitive) and "1". - if strings.EqualFold(standalone, "true") || standalone == "1" { - info.Standalone = true - } else if len(standalone) > 0 { - // there is a header set, and it is not "true" or "1", so assume fails - info.Standalone = false - } - logrus.Debugf("PingResult.Standalone: %t", info.Standalone) - return info, nil -} diff --git a/vendor/github.com/docker/docker/registry/errors.go b/vendor/github.com/docker/docker/registry/errors.go deleted file mode 100644 index 5bab02e5e..000000000 --- a/vendor/github.com/docker/docker/registry/errors.go +++ /dev/null @@ -1,31 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "net/url" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/docker/errdefs" -) - -type notFoundError string - -func (e notFoundError) Error() string { - return string(e) -} - -func (notFoundError) NotFound() {} - -func translateV2AuthError(err error) error { - switch e := err.(type) { - case *url.Error: - switch e2 := e.Err.(type) { - case errcode.Error: - switch e2.Code { - case errcode.ErrorCodeUnauthorized: - return errdefs.Unauthorized(err) - } - } - } - - return err -} diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go deleted file mode 100644 index 7a84bbfb7..000000000 --- a/vendor/github.com/docker/docker/registry/registry.go +++ /dev/null @@ -1,191 +0,0 @@ -// Package registry contains client primitives to interact with a remote Docker registry. -package registry // import "github.com/docker/docker/registry" - -import ( - "crypto/tls" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "path/filepath" - "strings" - "time" - - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/sirupsen/logrus" -) - -var ( - // ErrAlreadyExists is an error returned if an image being pushed - // already exists on the remote side - ErrAlreadyExists = errors.New("Image already exists") -) - -func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { - // PreferredServerCipherSuites should have no effect - tlsConfig := tlsconfig.ServerDefault() - - tlsConfig.InsecureSkipVerify = !isSecure - - if isSecure && CertsDir != "" { - hostDir := filepath.Join(CertsDir, cleanPath(hostname)) - logrus.Debugf("hostDir: %s", hostDir) - if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { - return nil, err - } - } - - return tlsConfig, nil -} - -func hasFile(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - -// ReadCertsDirectory reads the directory for TLS certificates -// including roots and certificate pairs and updates the -// provided TLS configuration. -func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { - fs, err := ioutil.ReadDir(directory) - if err != nil && !os.IsNotExist(err) { - return err - } - - for _, f := range fs { - if strings.HasSuffix(f.Name(), ".crt") { - if tlsConfig.RootCAs == nil { - systemPool, err := tlsconfig.SystemCertPool() - if err != nil { - return fmt.Errorf("unable to get system cert pool: %v", err) - } - tlsConfig.RootCAs = systemPool - } - logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) - data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) - if err != nil { - return err - } - tlsConfig.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, keyName) { - return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) - if err != nil { - return err - } - tlsConfig.Certificates = append(tlsConfig.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) - if !hasFile(fs, certName) { - return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) - } - } - } - - return nil -} - -// Headers returns request modifiers with a User-Agent and metaHeaders -func Headers(userAgent string, metaHeaders http.Header) []transport.RequestModifier { - modifiers := []transport.RequestModifier{} - if userAgent != "" { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ - "User-Agent": []string{userAgent}, - })) - } - if metaHeaders != nil { - modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) - } - return modifiers -} - -// HTTPClient returns an HTTP client structure which uses the given transport -// and contains the necessary headers for redirected requests -func HTTPClient(transport http.RoundTripper) *http.Client { - return &http.Client{ - Transport: transport, - CheckRedirect: addRequiredHeadersToRedirectedRequests, - } -} - -func trustedLocation(req *http.Request) bool { - var ( - trusteds = []string{"docker.com", "docker.io"} - hostname = strings.SplitN(req.Host, ":", 2)[0] - ) - if req.URL.Scheme != "https" { - return false - } - - for _, trusted := range trusteds { - if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { - return true - } - } - return false -} - -// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers -// for redirected requests -func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { - if via != nil && via[0] != nil { - if trustedLocation(req) && trustedLocation(via[0]) { - req.Header = via[0].Header - return nil - } - for k, v := range via[0].Header { - if k != "Authorization" { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - } - } - return nil -} - -// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the -// default TLS configuration. -func NewTransport(tlsConfig *tls.Config) *http.Transport { - if tlsConfig == nil { - tlsConfig = tlsconfig.ServerDefault() - } - - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - - base := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: direct.Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } - - proxyDialer, err := sockets.DialerFromEnvironment(direct) - if err == nil { - base.Dial = proxyDialer.Dial - } - return base -} diff --git a/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader.go b/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader.go deleted file mode 100644 index 8e97a1a4d..000000000 --- a/vendor/github.com/docker/docker/registry/resumable/resumablerequestreader.go +++ /dev/null @@ -1,96 +0,0 @@ -package resumable // import "github.com/docker/docker/registry/resumable" - -import ( - "fmt" - "io" - "net/http" - "time" - - "github.com/sirupsen/logrus" -) - -type requestReader struct { - client *http.Client - request *http.Request - lastRange int64 - totalSize int64 - currentResponse *http.Response - failures uint32 - maxFailures uint32 - waitDuration time.Duration -} - -// NewRequestReader makes it possible to resume reading a request's body transparently -// maxfail is the number of times we retry to make requests again (not resumes) -// totalsize is the total length of the body; auto detect if not provided -func NewRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { - return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, waitDuration: 5 * time.Second} -} - -// NewRequestReaderWithInitialResponse makes it possible to resume -// reading the body of an already initiated request. -func NewRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { - return &requestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse, waitDuration: 5 * time.Second} -} - -func (r *requestReader) Read(p []byte) (n int, err error) { - if r.client == nil || r.request == nil { - return 0, fmt.Errorf("client and request can't be nil") - } - isFreshRequest := false - if r.lastRange != 0 && r.currentResponse == nil { - readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) - r.request.Header.Set("Range", readRange) - time.Sleep(r.waitDuration) - } - if r.currentResponse == nil { - r.currentResponse, err = r.client.Do(r.request) - isFreshRequest = true - } - if err != nil && r.failures+1 != r.maxFailures { - r.cleanUpResponse() - r.failures++ - time.Sleep(time.Duration(r.failures) * r.waitDuration) - return 0, nil - } else if err != nil { - r.cleanUpResponse() - return 0, err - } - if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { - r.cleanUpResponse() - return 0, io.EOF - } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { - r.cleanUpResponse() - return 0, fmt.Errorf("the server doesn't support byte ranges") - } - if r.totalSize == 0 { - r.totalSize = r.currentResponse.ContentLength - } else if r.totalSize <= 0 { - r.cleanUpResponse() - return 0, fmt.Errorf("failed to auto detect content length") - } - n, err = r.currentResponse.Body.Read(p) - r.lastRange += int64(n) - if err != nil { - r.cleanUpResponse() - } - if err != nil && err != io.EOF { - logrus.Infof("encountered error during pull and clearing it before resume: %s", err) - err = nil - } - return n, err -} - -func (r *requestReader) Close() error { - r.cleanUpResponse() - r.client = nil - r.request = nil - return nil -} - -func (r *requestReader) cleanUpResponse() { - if r.currentResponse != nil { - r.currentResponse.Body.Close() - r.currentResponse = nil - } -} diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go deleted file mode 100644 index b441970ff..000000000 --- a/vendor/github.com/docker/docker/registry/service.go +++ /dev/null @@ -1,328 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "context" - "crypto/tls" - "net/http" - "net/url" - "strings" - "sync" - - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - // DefaultSearchLimit is the default value for maximum number of returned search results. - DefaultSearchLimit = 25 -) - -// Service is the interface defining what a registry service should implement. -type Service interface { - Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) - LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) - LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) - ResolveRepository(name reference.Named) (*RepositoryInfo, error) - Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) - ServiceConfig() *registrytypes.ServiceConfig - TLSConfig(hostname string) (*tls.Config, error) - LoadAllowNondistributableArtifacts([]string) error - LoadMirrors([]string) error - LoadInsecureRegistries([]string) error -} - -// DefaultService is a registry service. It tracks configuration data such as a list -// of mirrors. -type DefaultService struct { - config *serviceConfig - mu sync.Mutex -} - -// NewService returns a new instance of DefaultService ready to be -// installed into an engine. -func NewService(options ServiceOptions) (*DefaultService, error) { - config, err := newServiceConfig(options) - - return &DefaultService{config: config}, err -} - -// ServiceConfig returns the public registry service configuration. -func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { - s.mu.Lock() - defer s.mu.Unlock() - - servConfig := registrytypes.ServiceConfig{ - AllowNondistributableArtifactsCIDRs: make([]*(registrytypes.NetIPNet), 0), - AllowNondistributableArtifactsHostnames: make([]string, 0), - InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), - IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), - Mirrors: make([]string, 0), - } - - // construct a new ServiceConfig which will not retrieve s.Config directly, - // and look up items in s.config with mu locked - servConfig.AllowNondistributableArtifactsCIDRs = append(servConfig.AllowNondistributableArtifactsCIDRs, s.config.ServiceConfig.AllowNondistributableArtifactsCIDRs...) - servConfig.AllowNondistributableArtifactsHostnames = append(servConfig.AllowNondistributableArtifactsHostnames, s.config.ServiceConfig.AllowNondistributableArtifactsHostnames...) - servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...) - - for key, value := range s.config.ServiceConfig.IndexConfigs { - servConfig.IndexConfigs[key] = value - } - - servConfig.Mirrors = append(servConfig.Mirrors, s.config.ServiceConfig.Mirrors...) - - return &servConfig -} - -// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries for Service. -func (s *DefaultService) LoadAllowNondistributableArtifacts(registries []string) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.config.LoadAllowNondistributableArtifacts(registries) -} - -// LoadMirrors loads registry mirrors for Service -func (s *DefaultService) LoadMirrors(mirrors []string) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.config.LoadMirrors(mirrors) -} - -// LoadInsecureRegistries loads insecure registries for Service -func (s *DefaultService) LoadInsecureRegistries(registries []string) error { - s.mu.Lock() - defer s.mu.Unlock() - - return s.config.LoadInsecureRegistries(registries) -} - -// Auth contacts the public registry with the provided credentials, -// and returns OK if authentication was successful. -// It can be used to verify the validity of a client's credentials. -func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) { - // TODO Use ctx when searching for repositories - serverAddress := authConfig.ServerAddress - if serverAddress == "" { - serverAddress = IndexServer - } - if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { - serverAddress = "https://" + serverAddress - } - u, err := url.Parse(serverAddress) - if err != nil { - return "", "", errdefs.InvalidParameter(errors.Errorf("unable to parse server address: %v", err)) - } - - endpoints, err := s.LookupPushEndpoints(u.Host) - if err != nil { - return "", "", errdefs.InvalidParameter(err) - } - - for _, endpoint := range endpoints { - login := loginV2 - if endpoint.Version == APIVersion1 { - login = loginV1 - } - - status, token, err = login(authConfig, endpoint, userAgent) - if err == nil { - return - } - if fErr, ok := err.(fallbackError); ok { - err = fErr.err - logrus.Infof("Error logging in to %s endpoint, trying next endpoint: %v", endpoint.Version, err) - continue - } - - return "", "", err - } - - return "", "", err -} - -// splitReposSearchTerm breaks a search term into an index name and remote name -func splitReposSearchTerm(reposName string) (string, string) { - nameParts := strings.SplitN(reposName, "/", 2) - var indexName, remoteName string - if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && - !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { - // This is a Docker Index repos (ex: samalba/hipache or ubuntu) - // 'docker.io' - indexName = IndexName - remoteName = reposName - } else { - indexName = nameParts[0] - remoteName = nameParts[1] - } - return indexName, remoteName -} - -// Search queries the public registry for images matching the specified -// search terms, and returns the results. -func (s *DefaultService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { - // TODO Use ctx when searching for repositories - if err := validateNoScheme(term); err != nil { - return nil, err - } - - indexName, remoteName := splitReposSearchTerm(term) - - // Search is a long-running operation, just lock s.config to avoid block others. - s.mu.Lock() - index, err := newIndexInfo(s.config, indexName) - s.mu.Unlock() - - if err != nil { - return nil, err - } - - // *TODO: Search multiple indexes. - endpoint, err := NewV1Endpoint(index, userAgent, http.Header(headers)) - if err != nil { - return nil, err - } - - var client *http.Client - if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { - creds := NewStaticCredentialStore(authConfig) - scopes := []auth.Scope{ - auth.RegistryScope{ - Name: "catalog", - Actions: []string{"search"}, - }, - } - - modifiers := Headers(userAgent, nil) - v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) - if err != nil { - if fErr, ok := err.(fallbackError); ok { - logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err) - } else { - return nil, err - } - } else if foundV2 { - // Copy non transport http client features - v2Client.Timeout = endpoint.client.Timeout - v2Client.CheckRedirect = endpoint.client.CheckRedirect - v2Client.Jar = endpoint.client.Jar - - logrus.Debugf("using v2 client for search to %s", endpoint.URL) - client = v2Client - } - } - - if client == nil { - client = endpoint.client - if err := authorizeClient(client, authConfig, endpoint); err != nil { - return nil, err - } - } - - r := newSession(client, authConfig, endpoint) - - if index.Official { - localName := remoteName - if strings.HasPrefix(localName, "library/") { - // If pull "library/foo", it's stored locally under "foo" - localName = strings.SplitN(localName, "/", 2)[1] - } - - return r.SearchRepositories(localName, limit) - } - return r.SearchRepositories(remoteName, limit) -} - -// ResolveRepository splits a repository name into its components -// and configuration of the associated registry. -func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { - s.mu.Lock() - defer s.mu.Unlock() - return newRepositoryInfo(s.config, name) -} - -// APIEndpoint represents a remote API endpoint -type APIEndpoint struct { - Mirror bool - URL *url.URL - Version APIVersion - AllowNondistributableArtifacts bool - Official bool - TrimHostname bool - TLSConfig *tls.Config -} - -// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint -func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) *V1Endpoint { - return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) -} - -// TLSConfig constructs a client TLS configuration based on server defaults -func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) { - s.mu.Lock() - defer s.mu.Unlock() - - return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) -} - -// tlsConfig constructs a client TLS configuration based on server defaults -func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) { - return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) -} - -func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) { - return s.tlsConfig(mirrorURL.Host) -} - -// LookupPullEndpoints creates a list of endpoints to try to pull from, in order of preference. -// It gives preference to v2 endpoints over v1, mirrors over the actual -// registry, and HTTPS over plain HTTP. -func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - s.mu.Lock() - defer s.mu.Unlock() - - return s.lookupEndpoints(hostname) -} - -// LookupPushEndpoints creates a list of endpoints to try to push to, in order of preference. -// It gives preference to v2 endpoints over v1, and HTTPS over plain HTTP. -// Mirrors are not included. -func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - s.mu.Lock() - defer s.mu.Unlock() - - allEndpoints, err := s.lookupEndpoints(hostname) - if err == nil { - for _, endpoint := range allEndpoints { - if !endpoint.Mirror { - endpoints = append(endpoints, endpoint) - } - } - } - return endpoints, err -} - -func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) { - endpoints, err = s.lookupV2Endpoints(hostname) - if err != nil { - return nil, err - } - - if s.config.V2Only { - return endpoints, nil - } - - legacyEndpoints, err := s.lookupV1Endpoints(hostname) - if err != nil { - return nil, err - } - endpoints = append(endpoints, legacyEndpoints...) - - return endpoints, nil -} diff --git a/vendor/github.com/docker/docker/registry/service_v1.go b/vendor/github.com/docker/docker/registry/service_v1.go deleted file mode 100644 index d955ec51f..000000000 --- a/vendor/github.com/docker/docker/registry/service_v1.go +++ /dev/null @@ -1,40 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import "net/url" - -func (s *DefaultService) lookupV1Endpoints(hostname string) (endpoints []APIEndpoint, err error) { - if hostname == DefaultNamespace || hostname == DefaultV2Registry.Host || hostname == IndexHostname { - return []APIEndpoint{}, nil - } - - tlsConfig, err := s.tlsConfig(hostname) - if err != nil { - return nil, err - } - - endpoints = []APIEndpoint{ - { - URL: &url.URL{ - Scheme: "https", - Host: hostname, - }, - Version: APIVersion1, - TrimHostname: true, - TLSConfig: tlsConfig, - }, - } - - if tlsConfig.InsecureSkipVerify { - endpoints = append(endpoints, APIEndpoint{ // or this - URL: &url.URL{ - Scheme: "http", - Host: hostname, - }, - Version: APIVersion1, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) - } - return endpoints, nil -} diff --git a/vendor/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go deleted file mode 100644 index 3a56dc911..000000000 --- a/vendor/github.com/docker/docker/registry/service_v2.go +++ /dev/null @@ -1,82 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "net/url" - "strings" - - "github.com/docker/go-connections/tlsconfig" -) - -func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { - tlsConfig := tlsconfig.ServerDefault() - if hostname == DefaultNamespace || hostname == IndexHostname { - // v2 mirrors - for _, mirror := range s.config.Mirrors { - if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { - mirror = "https://" + mirror - } - mirrorURL, err := url.Parse(mirror) - if err != nil { - return nil, err - } - mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL) - if err != nil { - return nil, err - } - endpoints = append(endpoints, APIEndpoint{ - URL: mirrorURL, - // guess mirrors are v2 - Version: APIVersion2, - Mirror: true, - TrimHostname: true, - TLSConfig: mirrorTLSConfig, - }) - } - // v2 registry - endpoints = append(endpoints, APIEndpoint{ - URL: DefaultV2Registry, - Version: APIVersion2, - Official: true, - TrimHostname: true, - TLSConfig: tlsConfig, - }) - - return endpoints, nil - } - - ana := allowNondistributableArtifacts(s.config, hostname) - - tlsConfig, err = s.tlsConfig(hostname) - if err != nil { - return nil, err - } - - endpoints = []APIEndpoint{ - { - URL: &url.URL{ - Scheme: "https", - Host: hostname, - }, - Version: APIVersion2, - AllowNondistributableArtifacts: ana, - TrimHostname: true, - TLSConfig: tlsConfig, - }, - } - - if tlsConfig.InsecureSkipVerify { - endpoints = append(endpoints, APIEndpoint{ - URL: &url.URL{ - Scheme: "http", - Host: hostname, - }, - Version: APIVersion2, - AllowNondistributableArtifacts: ana, - TrimHostname: true, - // used to check if supposed to be secure via InsecureSkipVerify - TLSConfig: tlsConfig, - }) - } - - return endpoints, nil -} diff --git a/vendor/github.com/docker/docker/registry/session.go b/vendor/github.com/docker/docker/registry/session.go deleted file mode 100644 index ef1429959..000000000 --- a/vendor/github.com/docker/docker/registry/session.go +++ /dev/null @@ -1,779 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "bytes" - "crypto/sha256" - // this is required for some certificates - _ "crypto/sha512" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/cookiejar" - "net/url" - "strconv" - "strings" - "sync" - - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/docker/api/types" - registrytypes "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/tarsum" - "github.com/docker/docker/registry/resumable" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var ( - // ErrRepoNotFound is returned if the repository didn't exist on the - // remote side - ErrRepoNotFound notFoundError = "Repository not found" -) - -// A Session is used to communicate with a V1 registry -type Session struct { - indexEndpoint *V1Endpoint - client *http.Client - // TODO(tiborvass): remove authConfig - authConfig *types.AuthConfig - id string -} - -type authTransport struct { - http.RoundTripper - *types.AuthConfig - - alwaysSetBasicAuth bool - token []string - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) -// -// For private v1 registries, set alwaysSetBasicAuth to true. -// -// For the official v1 registry, if there isn't already an Authorization header in the request, -// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. -// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing -// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent -// requests. -// -// If the server sends a token without the client having requested it, it is ignored. -// -// This RoundTripper also has a CancelRequest method important for correct timeout handling. -func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { - if base == nil { - base = http.DefaultTransport - } - return &authTransport{ - RoundTripper: base, - AuthConfig: authConfig, - alwaysSetBasicAuth: alwaysSetBasicAuth, - modReq: make(map[*http.Request]*http.Request), - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -// RoundTrip changes an HTTP request's headers to add the necessary -// authentication-related headers -func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { - // Authorization should not be set on 302 redirect for untrusted locations. - // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. - // As the authorization logic is currently implemented in RoundTrip, - // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. - // This is safe as Docker doesn't set Referrer in other scenarios. - if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { - return tr.RoundTripper.RoundTrip(orig) - } - - req := cloneRequest(orig) - tr.mu.Lock() - tr.modReq[orig] = req - tr.mu.Unlock() - - if tr.alwaysSetBasicAuth { - if tr.AuthConfig == nil { - return nil, errors.New("unexpected error: empty auth config") - } - req.SetBasicAuth(tr.Username, tr.Password) - return tr.RoundTripper.RoundTrip(req) - } - - // Don't override - if req.Header.Get("Authorization") == "" { - if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { - req.SetBasicAuth(tr.Username, tr.Password) - } else if len(tr.token) > 0 { - req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) - } - } - resp, err := tr.RoundTripper.RoundTrip(req) - if err != nil { - delete(tr.modReq, orig) - return nil, err - } - if len(resp.Header["X-Docker-Token"]) > 0 { - tr.token = resp.Header["X-Docker-Token"] - } - resp.Body = &ioutils.OnEOFReader{ - Rc: resp.Body, - Fn: func() { - tr.mu.Lock() - delete(tr.modReq, orig) - tr.mu.Unlock() - }, - } - return resp, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (tr *authTransport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := tr.RoundTripper.(canceler); ok { - tr.mu.Lock() - modReq := tr.modReq[req] - delete(tr.modReq, req) - tr.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func authorizeClient(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) error { - var alwaysSetBasicAuth bool - - // If we're working with a standalone private registry over HTTPS, send Basic Auth headers - // alongside all our requests. - if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { - info, err := endpoint.Ping() - if err != nil { - return err - } - if info.Standalone && authConfig != nil { - logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) - alwaysSetBasicAuth = true - } - } - - // Annotate the transport unconditionally so that v2 can - // properly fallback on v1 when an image is not found. - client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) - - jar, err := cookiejar.New(nil) - if err != nil { - return errors.New("cookiejar.New is not supposed to return an error") - } - client.Jar = jar - - return nil -} - -func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session { - return &Session{ - authConfig: authConfig, - client: client, - indexEndpoint: endpoint, - id: stringid.GenerateRandomID(), - } -} - -// NewSession creates a new session -// TODO(tiborvass): remove authConfig param once registry client v2 is vendored -func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (*Session, error) { - if err := authorizeClient(client, authConfig, endpoint); err != nil { - return nil, err - } - - return newSession(client, authConfig, endpoint), nil -} - -// ID returns this registry session's ID. -func (r *Session) ID() string { - return r.id -} - -// GetRemoteHistory retrieves the history of a given image from the registry. -// It returns a list of the parent's JSON files (including the requested image). -func (r *Session) GetRemoteHistory(imgID, registry string) ([]string, error) { - res, err := r.client.Get(registry + "images/" + imgID + "/ancestry") - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != 200 { - if res.StatusCode == 401 { - return nil, errcode.ErrorCodeUnauthorized.WithArgs() - } - return nil, newJSONError(fmt.Sprintf("Server error: %d trying to fetch remote history for %s", res.StatusCode, imgID), res) - } - - var history []string - if err := json.NewDecoder(res.Body).Decode(&history); err != nil { - return nil, fmt.Errorf("Error while reading the http response: %v", err) - } - - logrus.Debugf("Ancestry: %v", history) - return history, nil -} - -// LookupRemoteImage checks if an image exists in the registry -func (r *Session) LookupRemoteImage(imgID, registry string) error { - res, err := r.client.Get(registry + "images/" + imgID + "/json") - if err != nil { - return err - } - res.Body.Close() - if res.StatusCode != 200 { - return newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) - } - return nil -} - -// GetRemoteImageJSON retrieves an image's JSON metadata from the registry. -func (r *Session) GetRemoteImageJSON(imgID, registry string) ([]byte, int64, error) { - res, err := r.client.Get(registry + "images/" + imgID + "/json") - if err != nil { - return nil, -1, fmt.Errorf("Failed to download json: %s", err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - return nil, -1, newJSONError(fmt.Sprintf("HTTP code %d", res.StatusCode), res) - } - // if the size header is not present, then set it to '-1' - imageSize := int64(-1) - if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { - imageSize, err = strconv.ParseInt(hdr, 10, 64) - if err != nil { - return nil, -1, err - } - } - - jsonString, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, -1, fmt.Errorf("Failed to parse downloaded json: %v (%s)", err, jsonString) - } - return jsonString, imageSize, nil -} - -// GetRemoteImageLayer retrieves an image layer from the registry -func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io.ReadCloser, error) { - var ( - statusCode = 0 - res *http.Response - err error - imageURL = fmt.Sprintf("%simages/%s/layer", registry, imgID) - ) - - req, err := http.NewRequest("GET", imageURL, nil) - if err != nil { - return nil, fmt.Errorf("Error while getting from the server: %v", err) - } - - res, err = r.client.Do(req) - if err != nil { - logrus.Debugf("Error contacting registry %s: %v", registry, err) - // the only case err != nil && res != nil is https://golang.org/src/net/http/client.go#L515 - if res != nil { - if res.Body != nil { - res.Body.Close() - } - statusCode = res.StatusCode - } - return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", - statusCode, imgID) - } - - if res.StatusCode != 200 { - res.Body.Close() - return nil, fmt.Errorf("Server error: Status %d while fetching image layer (%s)", - res.StatusCode, imgID) - } - - if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 { - logrus.Debug("server supports resume") - return resumable.NewRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil - } - logrus.Debug("server doesn't support resume") - return res.Body, nil -} - -// GetRemoteTag retrieves the tag named in the askedTag argument from the given -// repository. It queries each of the registries supplied in the registries -// argument, and returns data from the first one that answers the query -// successfully. -func (r *Session) GetRemoteTag(registries []string, repositoryRef reference.Named, askedTag string) (string, error) { - repository := reference.Path(repositoryRef) - - if strings.Count(repository, "/") == 0 { - // This will be removed once the registry supports auto-resolution on - // the "library" namespace - repository = "library/" + repository - } - for _, host := range registries { - endpoint := fmt.Sprintf("%srepositories/%s/tags/%s", host, repository, askedTag) - res, err := r.client.Get(endpoint) - if err != nil { - return "", err - } - - logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) - defer res.Body.Close() - - if res.StatusCode == 404 { - return "", ErrRepoNotFound - } - if res.StatusCode != 200 { - continue - } - - var tagID string - if err := json.NewDecoder(res.Body).Decode(&tagID); err != nil { - return "", err - } - return tagID, nil - } - return "", fmt.Errorf("Could not reach any registry endpoint") -} - -// GetRemoteTags retrieves all tags from the given repository. It queries each -// of the registries supplied in the registries argument, and returns data from -// the first one that answers the query successfully. It returns a map with -// tag names as the keys and image IDs as the values. -func (r *Session) GetRemoteTags(registries []string, repositoryRef reference.Named) (map[string]string, error) { - repository := reference.Path(repositoryRef) - - if strings.Count(repository, "/") == 0 { - // This will be removed once the registry supports auto-resolution on - // the "library" namespace - repository = "library/" + repository - } - for _, host := range registries { - endpoint := fmt.Sprintf("%srepositories/%s/tags", host, repository) - res, err := r.client.Get(endpoint) - if err != nil { - return nil, err - } - - logrus.Debugf("Got status code %d from %s", res.StatusCode, endpoint) - defer res.Body.Close() - - if res.StatusCode == 404 { - return nil, ErrRepoNotFound - } - if res.StatusCode != 200 { - continue - } - - result := make(map[string]string) - if err := json.NewDecoder(res.Body).Decode(&result); err != nil { - return nil, err - } - return result, nil - } - return nil, fmt.Errorf("Could not reach any registry endpoint") -} - -func buildEndpointsList(headers []string, indexEp string) ([]string, error) { - var endpoints []string - parsedURL, err := url.Parse(indexEp) - if err != nil { - return nil, err - } - var urlScheme = parsedURL.Scheme - // The registry's URL scheme has to match the Index' - for _, ep := range headers { - epList := strings.Split(ep, ",") - for _, epListElement := range epList { - endpoints = append( - endpoints, - fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement))) - } - } - return endpoints, nil -} - -// GetRepositoryData returns lists of images and endpoints for the repository -func (r *Session) GetRepositoryData(name reference.Named) (*RepositoryData, error) { - repositoryTarget := fmt.Sprintf("%srepositories/%s/images", r.indexEndpoint.String(), reference.Path(name)) - - logrus.Debugf("[registry] Calling GET %s", repositoryTarget) - - req, err := http.NewRequest("GET", repositoryTarget, nil) - if err != nil { - return nil, err - } - // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - // check if the error is because of i/o timeout - // and return a non-obtuse error message for users - // "Get https://index.docker.io/v1/repositories/library/busybox/images: i/o timeout" - // was a top search on the docker user forum - if isTimeout(err) { - return nil, fmt.Errorf("network timed out while trying to connect to %s. You may want to check your internet connection or if you are behind a proxy", repositoryTarget) - } - return nil, fmt.Errorf("Error while pulling image: %v", err) - } - defer res.Body.Close() - if res.StatusCode == 401 { - return nil, errcode.ErrorCodeUnauthorized.WithArgs() - } - // TODO: Right now we're ignoring checksums in the response body. - // In the future, we need to use them to check image validity. - if res.StatusCode == 404 { - return nil, newJSONError(fmt.Sprintf("HTTP code: %d", res.StatusCode), res) - } else if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - logrus.Debugf("Error reading response body: %s", err) - } - return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to pull repository %s: %q", res.StatusCode, reference.Path(name), errBody), res) - } - - var endpoints []string - if res.Header.Get("X-Docker-Endpoints") != "" { - endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) - if err != nil { - return nil, err - } - } else { - // Assume the endpoint is on the same host - endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", r.indexEndpoint.URL.Scheme, req.URL.Host)) - } - - remoteChecksums := []*ImgData{} - if err := json.NewDecoder(res.Body).Decode(&remoteChecksums); err != nil { - return nil, err - } - - // Forge a better object from the retrieved data - imgsData := make(map[string]*ImgData, len(remoteChecksums)) - for _, elem := range remoteChecksums { - imgsData[elem.ID] = elem - } - - return &RepositoryData{ - ImgList: imgsData, - Endpoints: endpoints, - }, nil -} - -// PushImageChecksumRegistry uploads checksums for an image -func (r *Session) PushImageChecksumRegistry(imgData *ImgData, registry string) error { - u := registry + "images/" + imgData.ID + "/checksum" - - logrus.Debugf("[registry] Calling PUT %s", u) - - req, err := http.NewRequest("PUT", u, nil) - if err != nil { - return err - } - req.Header.Set("X-Docker-Checksum", imgData.Checksum) - req.Header.Set("X-Docker-Checksum-Payload", imgData.ChecksumPayload) - - res, err := r.client.Do(req) - if err != nil { - return fmt.Errorf("Failed to upload metadata: %v", err) - } - defer res.Body.Close() - if len(res.Cookies()) > 0 { - r.client.Jar.SetCookies(req.URL, res.Cookies()) - } - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return fmt.Errorf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err) - } - var jsonBody map[string]string - if err := json.Unmarshal(errBody, &jsonBody); err != nil { - errBody = []byte(err.Error()) - } else if jsonBody["error"] == "Image already exists" { - return ErrAlreadyExists - } - return fmt.Errorf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody) - } - return nil -} - -// PushImageJSONRegistry pushes JSON metadata for a local image to the registry -func (r *Session) PushImageJSONRegistry(imgData *ImgData, jsonRaw []byte, registry string) error { - - u := registry + "images/" + imgData.ID + "/json" - - logrus.Debugf("[registry] Calling PUT %s", u) - - req, err := http.NewRequest("PUT", u, bytes.NewReader(jsonRaw)) - if err != nil { - return err - } - req.Header.Add("Content-type", "application/json") - - res, err := r.client.Do(req) - if err != nil { - return fmt.Errorf("Failed to upload metadata: %s", err) - } - defer res.Body.Close() - if res.StatusCode == 401 && strings.HasPrefix(registry, "http://") { - return newJSONError("HTTP code 401, Docker will not send auth headers over HTTP.", res) - } - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) - } - var jsonBody map[string]string - if err := json.Unmarshal(errBody, &jsonBody); err != nil { - errBody = []byte(err.Error()) - } else if jsonBody["error"] == "Image already exists" { - return ErrAlreadyExists - } - return newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata: %q", res.StatusCode, errBody), res) - } - return nil -} - -// PushImageLayerRegistry sends the checksum of an image layer to the registry -func (r *Session) PushImageLayerRegistry(imgID string, layer io.Reader, registry string, jsonRaw []byte) (checksum string, checksumPayload string, err error) { - u := registry + "images/" + imgID + "/layer" - - logrus.Debugf("[registry] Calling PUT %s", u) - - tarsumLayer, err := tarsum.NewTarSum(layer, false, tarsum.Version0) - if err != nil { - return "", "", err - } - h := sha256.New() - h.Write(jsonRaw) - h.Write([]byte{'\n'}) - checksumLayer := io.TeeReader(tarsumLayer, h) - - req, err := http.NewRequest("PUT", u, checksumLayer) - if err != nil { - return "", "", err - } - req.Header.Add("Content-Type", "application/octet-stream") - req.ContentLength = -1 - req.TransferEncoding = []string{"chunked"} - res, err := r.client.Do(req) - if err != nil { - return "", "", fmt.Errorf("Failed to upload layer: %v", err) - } - if rc, ok := layer.(io.Closer); ok { - if err := rc.Close(); err != nil { - return "", "", err - } - } - defer res.Body.Close() - - if res.StatusCode != 200 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - return "", "", newJSONError(fmt.Sprintf("HTTP code %d while uploading metadata and error when trying to parse response body: %s", res.StatusCode, err), res) - } - return "", "", newJSONError(fmt.Sprintf("Received HTTP code %d while uploading layer: %q", res.StatusCode, errBody), res) - } - - checksumPayload = "sha256:" + hex.EncodeToString(h.Sum(nil)) - return tarsumLayer.Sum(jsonRaw), checksumPayload, nil -} - -// PushRegistryTag pushes a tag on the registry. -// Remote has the format '/ -func (r *Session) PushRegistryTag(remote reference.Named, revision, tag, registry string) error { - // "jsonify" the string - revision = "\"" + revision + "\"" - path := fmt.Sprintf("repositories/%s/tags/%s", reference.Path(remote), tag) - - req, err := http.NewRequest("PUT", registry+path, strings.NewReader(revision)) - if err != nil { - return err - } - req.Header.Add("Content-type", "application/json") - req.ContentLength = int64(len(revision)) - res, err := r.client.Do(req) - if err != nil { - return err - } - res.Body.Close() - if res.StatusCode != 200 && res.StatusCode != 201 { - return newJSONError(fmt.Sprintf("Internal server error: %d trying to push tag %s on %s", res.StatusCode, tag, reference.Path(remote)), res) - } - return nil -} - -// PushImageJSONIndex uploads an image list to the repository -func (r *Session) PushImageJSONIndex(remote reference.Named, imgList []*ImgData, validate bool, regs []string) (*RepositoryData, error) { - cleanImgList := []*ImgData{} - if validate { - for _, elem := range imgList { - if elem.Checksum != "" { - cleanImgList = append(cleanImgList, elem) - } - } - } else { - cleanImgList = imgList - } - - imgListJSON, err := json.Marshal(cleanImgList) - if err != nil { - return nil, err - } - var suffix string - if validate { - suffix = "images" - } - u := fmt.Sprintf("%srepositories/%s/%s", r.indexEndpoint.String(), reference.Path(remote), suffix) - logrus.Debugf("[registry] PUT %s", u) - logrus.Debugf("Image list pushed to index:\n%s", imgListJSON) - headers := map[string][]string{ - "Content-type": {"application/json"}, - // this will set basic auth in r.client.Transport and send cached X-Docker-Token headers for all subsequent requests - "X-Docker-Token": {"true"}, - } - if validate { - headers["X-Docker-Endpoints"] = regs - } - - // Redirect if necessary - var res *http.Response - for { - if res, err = r.putImageRequest(u, headers, imgListJSON); err != nil { - return nil, err - } - if !shouldRedirect(res) { - break - } - res.Body.Close() - u = res.Header.Get("Location") - logrus.Debugf("Redirected to %s", u) - } - defer res.Body.Close() - - if res.StatusCode == 401 { - return nil, errcode.ErrorCodeUnauthorized.WithArgs() - } - - var tokens, endpoints []string - if !validate { - if res.StatusCode != 200 && res.StatusCode != 201 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - logrus.Debugf("Error reading response body: %s", err) - } - return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push repository %s: %q", res.StatusCode, reference.Path(remote), errBody), res) - } - tokens = res.Header["X-Docker-Token"] - logrus.Debugf("Auth token: %v", tokens) - - if res.Header.Get("X-Docker-Endpoints") == "" { - return nil, fmt.Errorf("Index response didn't contain any endpoints") - } - endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], r.indexEndpoint.String()) - if err != nil { - return nil, err - } - } else { - if res.StatusCode != 204 { - errBody, err := ioutil.ReadAll(res.Body) - if err != nil { - logrus.Debugf("Error reading response body: %s", err) - } - return nil, newJSONError(fmt.Sprintf("Error: Status %d trying to push checksums %s: %q", res.StatusCode, reference.Path(remote), errBody), res) - } - } - - return &RepositoryData{ - Endpoints: endpoints, - }, nil -} - -func (r *Session) putImageRequest(u string, headers map[string][]string, body []byte) (*http.Response, error) { - req, err := http.NewRequest("PUT", u, bytes.NewReader(body)) - if err != nil { - return nil, err - } - req.ContentLength = int64(len(body)) - for k, v := range headers { - req.Header[k] = v - } - response, err := r.client.Do(req) - if err != nil { - return nil, err - } - return response, nil -} - -func shouldRedirect(response *http.Response) bool { - return response.StatusCode >= 300 && response.StatusCode < 400 -} - -// SearchRepositories performs a search against the remote repository -func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) { - if limit < 1 || limit > 100 { - return nil, errdefs.InvalidParameter(errors.Errorf("Limit %d is outside the range of [1, 100]", limit)) - } - logrus.Debugf("Index server: %s", r.indexEndpoint) - u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, errors.Wrap(errdefs.InvalidParameter(err), "Error building request") - } - // Have the AuthTransport send authentication, when logged in. - req.Header.Set("X-Docker-Token", "true") - res, err := r.client.Do(req) - if err != nil { - return nil, errdefs.System(err) - } - defer res.Body.Close() - if res.StatusCode != 200 { - return nil, newJSONError(fmt.Sprintf("Unexpected status code %d", res.StatusCode), res) - } - result := new(registrytypes.SearchResults) - return result, errors.Wrap(json.NewDecoder(res.Body).Decode(result), "error decoding registry search results") -} - -func isTimeout(err error) bool { - type timeout interface { - Timeout() bool - } - e := err - switch urlErr := err.(type) { - case *url.Error: - e = urlErr.Err - } - t, ok := e.(timeout) - return ok && t.Timeout() -} - -func newJSONError(msg string, res *http.Response) error { - return &jsonmessage.JSONError{ - Message: msg, - Code: res.StatusCode, - } -} diff --git a/vendor/github.com/docker/docker/registry/types.go b/vendor/github.com/docker/docker/registry/types.go deleted file mode 100644 index 28ed2bfa5..000000000 --- a/vendor/github.com/docker/docker/registry/types.go +++ /dev/null @@ -1,70 +0,0 @@ -package registry // import "github.com/docker/docker/registry" - -import ( - "github.com/docker/distribution/reference" - registrytypes "github.com/docker/docker/api/types/registry" -) - -// RepositoryData tracks the image list, list of endpoints for a repository -type RepositoryData struct { - // ImgList is a list of images in the repository - ImgList map[string]*ImgData - // Endpoints is a list of endpoints returned in X-Docker-Endpoints - Endpoints []string -} - -// ImgData is used to transfer image checksums to and from the registry -type ImgData struct { - // ID is an opaque string that identifies the image - ID string `json:"id"` - Checksum string `json:"checksum,omitempty"` - ChecksumPayload string `json:"-"` - Tag string `json:",omitempty"` -} - -// PingResult contains the information returned when pinging a registry. It -// indicates the registry's version and whether the registry claims to be a -// standalone registry. -type PingResult struct { - // Version is the registry version supplied by the registry in an HTTP - // header - Version string `json:"version"` - // Standalone is set to true if the registry indicates it is a - // standalone registry in the X-Docker-Registry-Standalone - // header - Standalone bool `json:"standalone"` -} - -// APIVersion is an integral representation of an API version (presently -// either 1 or 2) -type APIVersion int - -func (av APIVersion) String() string { - return apiVersions[av] -} - -// API Version identifiers. -const ( - _ = iota - APIVersion1 APIVersion = iota - APIVersion2 -) - -var apiVersions = map[APIVersion]string{ - APIVersion1: "v1", - APIVersion2: "v2", -} - -// RepositoryInfo describes a repository -type RepositoryInfo struct { - Name reference.Named - // Index points to registry information - Index *registrytypes.IndexInfo - // Official indicates whether the repository is considered official. - // If the registry is official, and the normalized name does not - // contain a '/' (e.g. "foo"), then it is considered an official repo. - Official bool - // Class represents the class of the repository, such as "plugin" - // or "image". - Class string -} diff --git a/vendor/github.com/docker/docker/restartmanager/restartmanager.go b/vendor/github.com/docker/docker/restartmanager/restartmanager.go deleted file mode 100644 index 6468ccf7e..000000000 --- a/vendor/github.com/docker/docker/restartmanager/restartmanager.go +++ /dev/null @@ -1,133 +0,0 @@ -package restartmanager // import "github.com/docker/docker/restartmanager" - -import ( - "errors" - "fmt" - "sync" - "time" - - "github.com/docker/docker/api/types/container" -) - -const ( - backoffMultiplier = 2 - defaultTimeout = 100 * time.Millisecond - maxRestartTimeout = 1 * time.Minute -) - -// ErrRestartCanceled is returned when the restart manager has been -// canceled and will no longer restart the container. -var ErrRestartCanceled = errors.New("restart canceled") - -// RestartManager defines object that controls container restarting rules. -type RestartManager interface { - Cancel() error - ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) -} - -type restartManager struct { - sync.Mutex - sync.Once - policy container.RestartPolicy - restartCount int - timeout time.Duration - active bool - cancel chan struct{} - canceled bool -} - -// New returns a new restartManager based on a policy. -func New(policy container.RestartPolicy, restartCount int) RestartManager { - return &restartManager{policy: policy, restartCount: restartCount, cancel: make(chan struct{})} -} - -func (rm *restartManager) SetPolicy(policy container.RestartPolicy) { - rm.Lock() - rm.policy = policy - rm.Unlock() -} - -func (rm *restartManager) ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) { - if rm.policy.IsNone() { - return false, nil, nil - } - rm.Lock() - unlockOnExit := true - defer func() { - if unlockOnExit { - rm.Unlock() - } - }() - - if rm.canceled { - return false, nil, ErrRestartCanceled - } - - if rm.active { - return false, nil, fmt.Errorf("invalid call on an active restart manager") - } - // if the container ran for more than 10s, regardless of status and policy reset the - // the timeout back to the default. - if executionDuration.Seconds() >= 10 { - rm.timeout = 0 - } - switch { - case rm.timeout == 0: - rm.timeout = defaultTimeout - case rm.timeout < maxRestartTimeout: - rm.timeout *= backoffMultiplier - } - if rm.timeout > maxRestartTimeout { - rm.timeout = maxRestartTimeout - } - - var restart bool - switch { - case rm.policy.IsAlways(): - restart = true - case rm.policy.IsUnlessStopped() && !hasBeenManuallyStopped: - restart = true - case rm.policy.IsOnFailure(): - // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count - if max := rm.policy.MaximumRetryCount; max == 0 || rm.restartCount < max { - restart = exitCode != 0 - } - } - - if !restart { - rm.active = false - return false, nil, nil - } - - rm.restartCount++ - - unlockOnExit = false - rm.active = true - rm.Unlock() - - ch := make(chan error) - go func() { - select { - case <-rm.cancel: - ch <- ErrRestartCanceled - close(ch) - case <-time.After(rm.timeout): - rm.Lock() - close(ch) - rm.active = false - rm.Unlock() - } - }() - - return true, ch, nil -} - -func (rm *restartManager) Cancel() error { - rm.Do(func() { - rm.Lock() - rm.canceled = true - close(rm.cancel) - rm.Unlock() - }) - return nil -} diff --git a/vendor/github.com/docker/docker/runconfig/config.go b/vendor/github.com/docker/docker/runconfig/config.go deleted file mode 100644 index cbacf47df..000000000 --- a/vendor/github.com/docker/docker/runconfig/config.go +++ /dev/null @@ -1,81 +0,0 @@ -package runconfig // import "github.com/docker/docker/runconfig" - -import ( - "encoding/json" - "io" - - "github.com/docker/docker/api/types/container" - networktypes "github.com/docker/docker/api/types/network" - "github.com/docker/docker/pkg/sysinfo" -) - -// ContainerDecoder implements httputils.ContainerDecoder -// calling DecodeContainerConfig. -type ContainerDecoder struct{} - -// DecodeConfig makes ContainerDecoder to implement httputils.ContainerDecoder -func (r ContainerDecoder) DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - return decodeContainerConfig(src) -} - -// DecodeHostConfig makes ContainerDecoder to implement httputils.ContainerDecoder -func (r ContainerDecoder) DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { - return decodeHostConfig(src) -} - -// decodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper -// struct and returns both a Config and a HostConfig struct -// Be aware this function is not checking whether the resulted structs are nil, -// it's your business to do so -func decodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - var w ContainerConfigWrapper - - decoder := json.NewDecoder(src) - if err := decoder.Decode(&w); err != nil { - return nil, nil, nil, err - } - - hc := w.getHostConfig() - - // Perform platform-specific processing of Volumes and Binds. - if w.Config != nil && hc != nil { - - // Initialize the volumes map if currently nil - if w.Config.Volumes == nil { - w.Config.Volumes = make(map[string]struct{}) - } - } - - // Certain parameters need daemon-side validation that cannot be done - // on the client, as only the daemon knows what is valid for the platform. - if err := validateNetMode(w.Config, hc); err != nil { - return nil, nil, nil, err - } - - // Validate isolation - if err := validateIsolation(hc); err != nil { - return nil, nil, nil, err - } - - // Validate QoS - if err := validateQoS(hc); err != nil { - return nil, nil, nil, err - } - - // Validate Resources - if err := validateResources(hc, sysinfo.New(true)); err != nil { - return nil, nil, nil, err - } - - // Validate Privileged - if err := validatePrivileged(hc); err != nil { - return nil, nil, nil, err - } - - // Validate ReadonlyRootfs - if err := validateReadonlyRootfs(hc); err != nil { - return nil, nil, nil, err - } - - return w.Config, hc, w.NetworkingConfig, nil -} diff --git a/vendor/github.com/docker/docker/runconfig/config_unix.go b/vendor/github.com/docker/docker/runconfig/config_unix.go deleted file mode 100644 index 65e8d6fcd..000000000 --- a/vendor/github.com/docker/docker/runconfig/config_unix.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build !windows - -package runconfig // import "github.com/docker/docker/runconfig" - -import ( - "github.com/docker/docker/api/types/container" - networktypes "github.com/docker/docker/api/types/network" -) - -// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) -// and the corresponding HostConfig (non-portable). -type ContainerConfigWrapper struct { - *container.Config - InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"` - Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. - NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` - *container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. -} - -// getHostConfig gets the HostConfig of the Config. -// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper -func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { - hc := w.HostConfig - - if hc == nil && w.InnerHostConfig != nil { - hc = w.InnerHostConfig - } else if w.InnerHostConfig != nil { - if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { - w.InnerHostConfig.Memory = hc.Memory - } - if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { - w.InnerHostConfig.MemorySwap = hc.MemorySwap - } - if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { - w.InnerHostConfig.CPUShares = hc.CPUShares - } - if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" { - w.InnerHostConfig.CpusetCpus = hc.CpusetCpus - } - - if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" { - w.InnerHostConfig.VolumeDriver = hc.VolumeDriver - } - - hc = w.InnerHostConfig - } - - if hc != nil { - if w.Cpuset != "" && hc.CpusetCpus == "" { - hc.CpusetCpus = w.Cpuset - } - } - - // Make sure NetworkMode has an acceptable value. We do this to ensure - // backwards compatible API behavior. - SetDefaultNetModeIfBlank(hc) - - return hc -} diff --git a/vendor/github.com/docker/docker/runconfig/config_windows.go b/vendor/github.com/docker/docker/runconfig/config_windows.go deleted file mode 100644 index cced59d4d..000000000 --- a/vendor/github.com/docker/docker/runconfig/config_windows.go +++ /dev/null @@ -1,19 +0,0 @@ -package runconfig // import "github.com/docker/docker/runconfig" - -import ( - "github.com/docker/docker/api/types/container" - networktypes "github.com/docker/docker/api/types/network" -) - -// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) -// and the corresponding HostConfig (non-portable). -type ContainerConfigWrapper struct { - *container.Config - HostConfig *container.HostConfig `json:"HostConfig,omitempty"` - NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` -} - -// getHostConfig gets the HostConfig of the Config. -func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { - return w.HostConfig -} diff --git a/vendor/github.com/docker/docker/runconfig/errors.go b/vendor/github.com/docker/docker/runconfig/errors.go deleted file mode 100644 index 038fe3966..000000000 --- a/vendor/github.com/docker/docker/runconfig/errors.go +++ /dev/null @@ -1,42 +0,0 @@ -package runconfig // import "github.com/docker/docker/runconfig" - -const ( - // ErrConflictContainerNetworkAndLinks conflict between --net=container and links - ErrConflictContainerNetworkAndLinks validationError = "conflicting options: container type network can't be used with links. This would result in undefined behavior" - // ErrConflictSharedNetwork conflict between private and other networks - ErrConflictSharedNetwork validationError = "container sharing network namespace with another container or host cannot be connected to any other network" - // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. - ErrConflictHostNetwork validationError = "container cannot be disconnected from host network or connected to host network" - // ErrConflictNoNetwork conflict between private and other networks - ErrConflictNoNetwork validationError = "container cannot be connected to multiple networks with one of the networks in private (none) mode" - // ErrConflictNetworkAndDNS conflict between --dns and the network mode - ErrConflictNetworkAndDNS validationError = "conflicting options: dns and the network mode" - // ErrConflictNetworkHostname conflict between the hostname and the network mode - ErrConflictNetworkHostname validationError = "conflicting options: hostname and the network mode" - // ErrConflictHostNetworkAndLinks conflict between --net=host and links - ErrConflictHostNetworkAndLinks validationError = "conflicting options: host type networking can't be used with links. This would result in undefined behavior" - // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode - ErrConflictContainerNetworkAndMac validationError = "conflicting options: mac-address and the network mode" - // ErrConflictNetworkHosts conflict between add-host and the network mode - ErrConflictNetworkHosts validationError = "conflicting options: custom host-to-IP mapping and the network mode" - // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode - ErrConflictNetworkPublishPorts validationError = "conflicting options: port publishing and the container type network mode" - // ErrConflictNetworkExposePorts conflict between the expose option and the network mode - ErrConflictNetworkExposePorts validationError = "conflicting options: port exposing and the container type network mode" - // ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address - ErrUnsupportedNetworkAndIP validationError = "user specified IP address is supported on user defined networks only" - // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address - ErrUnsupportedNetworkNoSubnetAndIP validationError = "user specified IP address is supported only when connecting to networks with user configured subnets" - // ErrUnsupportedNetworkAndAlias conflict between network mode and alias - ErrUnsupportedNetworkAndAlias validationError = "network-scoped alias is supported only for containers in user defined networks" - // ErrConflictUTSHostname conflict between the hostname and the UTS mode - ErrConflictUTSHostname validationError = "conflicting options: hostname and the UTS mode" -) - -type validationError string - -func (e validationError) Error() string { - return string(e) -} - -func (e validationError) InvalidParameter() {} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig.go b/vendor/github.com/docker/docker/runconfig/hostconfig.go deleted file mode 100644 index 7d99e5acf..000000000 --- a/vendor/github.com/docker/docker/runconfig/hostconfig.go +++ /dev/null @@ -1,79 +0,0 @@ -package runconfig // import "github.com/docker/docker/runconfig" - -import ( - "encoding/json" - "io" - "strings" - - "github.com/docker/docker/api/types/container" -) - -// DecodeHostConfig creates a HostConfig based on the specified Reader. -// It assumes the content of the reader will be JSON, and decodes it. -func decodeHostConfig(src io.Reader) (*container.HostConfig, error) { - decoder := json.NewDecoder(src) - - var w ContainerConfigWrapper - if err := decoder.Decode(&w); err != nil { - return nil, err - } - - hc := w.getHostConfig() - return hc, nil -} - -// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure -// to default if it is not populated. This ensures backwards compatibility after -// the validation of the network mode was moved from the docker CLI to the -// docker daemon. -func SetDefaultNetModeIfBlank(hc *container.HostConfig) { - if hc != nil { - if hc.NetworkMode == container.NetworkMode("") { - hc.NetworkMode = container.NetworkMode("default") - } - } -} - -// validateNetContainerMode ensures that the various combinations of requested -// network settings wrt container mode are valid. -func validateNetContainerMode(c *container.Config, hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - parts := strings.Split(string(hc.NetworkMode), ":") - if parts[0] == "container" { - if len(parts) < 2 || parts[1] == "" { - return validationError("Invalid network mode: invalid container format container:") - } - } - - if hc.NetworkMode.IsContainer() && c.Hostname != "" { - return ErrConflictNetworkHostname - } - - if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { - return ErrConflictContainerNetworkAndLinks - } - - if hc.NetworkMode.IsContainer() && len(hc.DNS) > 0 { - return ErrConflictNetworkAndDNS - } - - if hc.NetworkMode.IsContainer() && len(hc.ExtraHosts) > 0 { - return ErrConflictNetworkHosts - } - - if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { - return ErrConflictContainerNetworkAndMac - } - - if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts) { - return ErrConflictNetworkPublishPorts - } - - if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { - return ErrConflictNetworkExposePorts - } - return nil -} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go deleted file mode 100644 index e579b06d9..000000000 --- a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go +++ /dev/null @@ -1,110 +0,0 @@ -// +build !windows - -package runconfig // import "github.com/docker/docker/runconfig" - -import ( - "fmt" - "runtime" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/pkg/sysinfo" -) - -// DefaultDaemonNetworkMode returns the default network stack the daemon should -// use. -func DefaultDaemonNetworkMode() container.NetworkMode { - return container.NetworkMode("bridge") -} - -// IsPreDefinedNetwork indicates if a network is predefined by the daemon -func IsPreDefinedNetwork(network string) bool { - n := container.NetworkMode(network) - return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() -} - -// validateNetMode ensures that the various combinations of requested -// network settings are valid. -func validateNetMode(c *container.Config, hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - - err := validateNetContainerMode(c, hc) - if err != nil { - return err - } - - if hc.UTSMode.IsHost() && c.Hostname != "" { - return ErrConflictUTSHostname - } - - if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { - return ErrConflictHostNetworkAndLinks - } - - return nil -} - -// validateIsolation performs platform specific validation of -// isolation in the hostconfig structure. Linux only supports "default" -// which is LXC container isolation -func validateIsolation(hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - if !hc.Isolation.IsValid() { - return fmt.Errorf("Invalid isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) - } - return nil -} - -// validateQoS performs platform specific validation of the QoS settings -func validateQoS(hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - - if hc.IOMaximumBandwidth != 0 { - return fmt.Errorf("Invalid QoS settings: %s does not support configuration of maximum bandwidth", runtime.GOOS) - } - - if hc.IOMaximumIOps != 0 { - return fmt.Errorf("Invalid QoS settings: %s does not support configuration of maximum IOPs", runtime.GOOS) - } - return nil -} - -// validateResources performs platform specific validation of the resource settings -// cpu-rt-runtime and cpu-rt-period can not be greater than their parent, cpu-rt-runtime requires sys_nice -func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - - if hc.Resources.CPURealtimePeriod > 0 && !si.CPURealtimePeriod { - return fmt.Errorf("Your kernel does not support cgroup cpu real-time period") - } - - if hc.Resources.CPURealtimeRuntime > 0 && !si.CPURealtimeRuntime { - return fmt.Errorf("Your kernel does not support cgroup cpu real-time runtime") - } - - if hc.Resources.CPURealtimePeriod != 0 && hc.Resources.CPURealtimeRuntime != 0 && hc.Resources.CPURealtimeRuntime > hc.Resources.CPURealtimePeriod { - return fmt.Errorf("cpu real-time runtime cannot be higher than cpu real-time period") - } - return nil -} - -// validatePrivileged performs platform specific validation of the Privileged setting -func validatePrivileged(hc *container.HostConfig) error { - return nil -} - -// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting -func validateReadonlyRootfs(hc *container.HostConfig) error { - return nil -} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go deleted file mode 100644 index 33a4668af..000000000 --- a/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go +++ /dev/null @@ -1,96 +0,0 @@ -package runconfig // import "github.com/docker/docker/runconfig" - -import ( - "fmt" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/pkg/sysinfo" -) - -// DefaultDaemonNetworkMode returns the default network stack the daemon should -// use. -func DefaultDaemonNetworkMode() container.NetworkMode { - return container.NetworkMode("nat") -} - -// IsPreDefinedNetwork indicates if a network is predefined by the daemon -func IsPreDefinedNetwork(network string) bool { - return !container.NetworkMode(network).IsUserDefined() -} - -// validateNetMode ensures that the various combinations of requested -// network settings are valid. -func validateNetMode(c *container.Config, hc *container.HostConfig) error { - if hc == nil { - return nil - } - - err := validateNetContainerMode(c, hc) - if err != nil { - return err - } - - if hc.NetworkMode.IsContainer() && hc.Isolation.IsHyperV() { - return fmt.Errorf("Using the network stack of another container is not supported while using Hyper-V Containers") - } - - return nil -} - -// validateIsolation performs platform specific validation of the -// isolation in the hostconfig structure. Windows supports 'default' (or -// blank), 'process', or 'hyperv'. -func validateIsolation(hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - if !hc.Isolation.IsValid() { - return fmt.Errorf("Invalid isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) - } - return nil -} - -// validateQoS performs platform specific validation of the Qos settings -func validateQoS(hc *container.HostConfig) error { - return nil -} - -// validateResources performs platform specific validation of the resource settings -func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - if hc.Resources.CPURealtimePeriod != 0 { - return fmt.Errorf("Windows does not support CPU real-time period") - } - if hc.Resources.CPURealtimeRuntime != 0 { - return fmt.Errorf("Windows does not support CPU real-time runtime") - } - return nil -} - -// validatePrivileged performs platform specific validation of the Privileged setting -func validatePrivileged(hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - if hc.Privileged { - return fmt.Errorf("Windows does not support privileged mode") - } - return nil -} - -// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting -func validateReadonlyRootfs(hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - if hc.ReadonlyRootfs { - return fmt.Errorf("Windows does not support root filesystem in read-only mode") - } - return nil -} diff --git a/vendor/github.com/docker/docker/runconfig/opts/parse.go b/vendor/github.com/docker/docker/runconfig/opts/parse.go deleted file mode 100644 index 8f7baeb63..000000000 --- a/vendor/github.com/docker/docker/runconfig/opts/parse.go +++ /dev/null @@ -1,20 +0,0 @@ -package opts // import "github.com/docker/docker/runconfig/opts" - -import ( - "strings" -) - -// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} -func ConvertKVStringsToMap(values []string) map[string]string { - result := make(map[string]string, len(values)) - for _, value := range values { - kv := strings.SplitN(value, "=", 2) - if len(kv) == 1 { - result[kv[0]] = "" - } else { - result[kv[0]] = kv[1] - } - } - - return result -} diff --git a/vendor/github.com/docker/docker/volume/drivers/adapter.go b/vendor/github.com/docker/docker/volume/drivers/adapter.go deleted file mode 100644 index f6ee07a00..000000000 --- a/vendor/github.com/docker/docker/volume/drivers/adapter.go +++ /dev/null @@ -1,176 +0,0 @@ -package drivers // import "github.com/docker/docker/volume/drivers" - -import ( - "errors" - "strings" - "time" - - "github.com/docker/docker/volume" - "github.com/sirupsen/logrus" -) - -var ( - errNoSuchVolume = errors.New("no such volume") -) - -type volumeDriverAdapter struct { - name string - scopePath func(s string) string - capabilities *volume.Capability - proxy volumeDriver -} - -func (a *volumeDriverAdapter) Name() string { - return a.name -} - -func (a *volumeDriverAdapter) Create(name string, opts map[string]string) (volume.Volume, error) { - if err := a.proxy.Create(name, opts); err != nil { - return nil, err - } - return &volumeAdapter{ - proxy: a.proxy, - name: name, - driverName: a.name, - scopePath: a.scopePath, - }, nil -} - -func (a *volumeDriverAdapter) Remove(v volume.Volume) error { - return a.proxy.Remove(v.Name()) -} - -func (a *volumeDriverAdapter) List() ([]volume.Volume, error) { - ls, err := a.proxy.List() - if err != nil { - return nil, err - } - - var out []volume.Volume - for _, vp := range ls { - out = append(out, &volumeAdapter{ - proxy: a.proxy, - name: vp.Name, - scopePath: a.scopePath, - driverName: a.name, - eMount: a.scopePath(vp.Mountpoint), - }) - } - return out, nil -} - -func (a *volumeDriverAdapter) Get(name string) (volume.Volume, error) { - v, err := a.proxy.Get(name) - if err != nil { - return nil, err - } - - // plugin may have returned no volume and no error - if v == nil { - return nil, errNoSuchVolume - } - - return &volumeAdapter{ - proxy: a.proxy, - name: v.Name, - driverName: a.Name(), - eMount: v.Mountpoint, - createdAt: v.CreatedAt, - status: v.Status, - scopePath: a.scopePath, - }, nil -} - -func (a *volumeDriverAdapter) Scope() string { - cap := a.getCapabilities() - return cap.Scope -} - -func (a *volumeDriverAdapter) getCapabilities() volume.Capability { - if a.capabilities != nil { - return *a.capabilities - } - cap, err := a.proxy.Capabilities() - if err != nil { - // `GetCapabilities` is a not a required endpoint. - // On error assume it's a local-only driver - logrus.WithError(err).WithField("driver", a.name).Debug("Volume driver returned an error while trying to query its capabilities, using default capabilities") - return volume.Capability{Scope: volume.LocalScope} - } - - // don't spam the warn log below just because the plugin didn't provide a scope - if len(cap.Scope) == 0 { - cap.Scope = volume.LocalScope - } - - cap.Scope = strings.ToLower(cap.Scope) - if cap.Scope != volume.LocalScope && cap.Scope != volume.GlobalScope { - logrus.WithField("driver", a.Name()).WithField("scope", a.Scope).Warn("Volume driver returned an invalid scope") - cap.Scope = volume.LocalScope - } - - a.capabilities = &cap - return cap -} - -type volumeAdapter struct { - proxy volumeDriver - name string - scopePath func(string) string - driverName string - eMount string // ephemeral host volume path - createdAt time.Time // time the directory was created - status map[string]interface{} -} - -type proxyVolume struct { - Name string - Mountpoint string - CreatedAt time.Time - Status map[string]interface{} -} - -func (a *volumeAdapter) Name() string { - return a.name -} - -func (a *volumeAdapter) DriverName() string { - return a.driverName -} - -func (a *volumeAdapter) Path() string { - if len(a.eMount) == 0 { - mountpoint, _ := a.proxy.Path(a.name) - a.eMount = a.scopePath(mountpoint) - } - return a.eMount -} - -func (a *volumeAdapter) CachedPath() string { - return a.eMount -} - -func (a *volumeAdapter) Mount(id string) (string, error) { - mountpoint, err := a.proxy.Mount(a.name, id) - a.eMount = a.scopePath(mountpoint) - return a.eMount, err -} - -func (a *volumeAdapter) Unmount(id string) error { - err := a.proxy.Unmount(a.name, id) - if err == nil { - a.eMount = "" - } - return err -} - -func (a *volumeAdapter) CreatedAt() (time.Time, error) { - return a.createdAt, nil -} -func (a *volumeAdapter) Status() map[string]interface{} { - out := make(map[string]interface{}, len(a.status)) - for k, v := range a.status { - out[k] = v - } - return out -} diff --git a/vendor/github.com/docker/docker/volume/drivers/extpoint.go b/vendor/github.com/docker/docker/volume/drivers/extpoint.go deleted file mode 100644 index b2131c20e..000000000 --- a/vendor/github.com/docker/docker/volume/drivers/extpoint.go +++ /dev/null @@ -1,235 +0,0 @@ -//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver - -package drivers // import "github.com/docker/docker/volume/drivers" - -import ( - "fmt" - "sort" - "sync" - - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/locker" - getter "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/plugins" - "github.com/docker/docker/volume" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const extName = "VolumeDriver" - -// volumeDriver defines the available functions that volume plugins must implement. -// This interface is only defined to generate the proxy objects. -// It's not intended to be public or reused. -// nolint: deadcode -type volumeDriver interface { - // Create a volume with the given name - Create(name string, opts map[string]string) (err error) - // Remove the volume with the given name - Remove(name string) (err error) - // Get the mountpoint of the given volume - Path(name string) (mountpoint string, err error) - // Mount the given volume and return the mountpoint - Mount(name, id string) (mountpoint string, err error) - // Unmount the given volume - Unmount(name, id string) (err error) - // List lists all the volumes known to the driver - List() (volumes []*proxyVolume, err error) - // Get retrieves the volume with the requested name - Get(name string) (volume *proxyVolume, err error) - // Capabilities gets the list of capabilities of the driver - Capabilities() (capabilities volume.Capability, err error) -} - -// Store is an in-memory store for volume drivers -type Store struct { - extensions map[string]volume.Driver - mu sync.Mutex - driverLock *locker.Locker - pluginGetter getter.PluginGetter -} - -// NewStore creates a new volume driver store -func NewStore(pg getter.PluginGetter) *Store { - return &Store{ - extensions: make(map[string]volume.Driver), - driverLock: locker.New(), - pluginGetter: pg, - } -} - -type driverNotFoundError string - -func (e driverNotFoundError) Error() string { - return "volume driver not found: " + string(e) -} - -func (driverNotFoundError) NotFound() {} - -// lookup returns the driver associated with the given name. If a -// driver with the given name has not been registered it checks if -// there is a VolumeDriver plugin available with the given name. -func (s *Store) lookup(name string, mode int) (volume.Driver, error) { - if name == "" { - return nil, errdefs.InvalidParameter(errors.New("driver name cannot be empty")) - } - s.driverLock.Lock(name) - defer s.driverLock.Unlock(name) - - s.mu.Lock() - ext, ok := s.extensions[name] - s.mu.Unlock() - if ok { - return ext, nil - } - if s.pluginGetter != nil { - p, err := s.pluginGetter.Get(name, extName, mode) - if err != nil { - return nil, errors.Wrap(err, "error looking up volume plugin "+name) - } - - d, err := makePluginAdapter(p) - if err != nil { - return nil, errors.Wrap(err, "error making plugin client") - } - if err := validateDriver(d); err != nil { - if mode > 0 { - // Undo any reference count changes from the initial `Get` - if _, err := s.pluginGetter.Get(name, extName, mode*-1); err != nil { - logrus.WithError(err).WithField("action", "validate-driver").WithField("plugin", name).Error("error releasing reference to plugin") - } - } - return nil, err - } - - if p.IsV1() { - s.mu.Lock() - s.extensions[name] = d - s.mu.Unlock() - } - return d, nil - } - return nil, driverNotFoundError(name) -} - -func validateDriver(vd volume.Driver) error { - scope := vd.Scope() - if scope != volume.LocalScope && scope != volume.GlobalScope { - return fmt.Errorf("Driver %q provided an invalid capability scope: %s", vd.Name(), scope) - } - return nil -} - -// Register associates the given driver to the given name, checking if -// the name is already associated -func (s *Store) Register(d volume.Driver, name string) bool { - if name == "" { - return false - } - - s.mu.Lock() - defer s.mu.Unlock() - - if _, exists := s.extensions[name]; exists { - return false - } - - if err := validateDriver(d); err != nil { - return false - } - - s.extensions[name] = d - return true -} - -// GetDriver returns a volume driver by its name. -// If the driver is empty, it looks for the local driver. -func (s *Store) GetDriver(name string) (volume.Driver, error) { - return s.lookup(name, getter.Lookup) -} - -// CreateDriver returns a volume driver by its name and increments RefCount. -// If the driver is empty, it looks for the local driver. -func (s *Store) CreateDriver(name string) (volume.Driver, error) { - return s.lookup(name, getter.Acquire) -} - -// ReleaseDriver returns a volume driver by its name and decrements RefCount.. -// If the driver is empty, it looks for the local driver. -func (s *Store) ReleaseDriver(name string) (volume.Driver, error) { - return s.lookup(name, getter.Release) -} - -// GetDriverList returns list of volume drivers registered. -// If no driver is registered, empty string list will be returned. -func (s *Store) GetDriverList() []string { - var driverList []string - s.mu.Lock() - defer s.mu.Unlock() - for driverName := range s.extensions { - driverList = append(driverList, driverName) - } - sort.Strings(driverList) - return driverList -} - -// GetAllDrivers lists all the registered drivers -func (s *Store) GetAllDrivers() ([]volume.Driver, error) { - var plugins []getter.CompatPlugin - if s.pluginGetter != nil { - var err error - plugins, err = s.pluginGetter.GetAllByCap(extName) - if err != nil { - return nil, fmt.Errorf("error listing plugins: %v", err) - } - } - var ds []volume.Driver - - s.mu.Lock() - defer s.mu.Unlock() - - for _, d := range s.extensions { - ds = append(ds, d) - } - - for _, p := range plugins { - name := p.Name() - - if _, ok := s.extensions[name]; ok { - continue - } - - ext, err := makePluginAdapter(p) - if err != nil { - return nil, errors.Wrap(err, "error making plugin client") - } - if p.IsV1() { - s.extensions[name] = ext - } - ds = append(ds, ext) - } - return ds, nil -} - -func makePluginAdapter(p getter.CompatPlugin) (*volumeDriverAdapter, error) { - if pc, ok := p.(getter.PluginWithV1Client); ok { - return &volumeDriverAdapter{name: p.Name(), scopePath: p.ScopedPath, proxy: &volumeDriverProxy{pc.Client()}}, nil - } - - pa, ok := p.(getter.PluginAddr) - if !ok { - return nil, errdefs.System(errors.Errorf("got unknown plugin instance %T", p)) - } - - if pa.Protocol() != plugins.ProtocolSchemeHTTPV1 { - return nil, errors.Errorf("plugin protocol not supported: %s", p) - } - - addr := pa.Addr() - client, err := plugins.NewClientWithTimeout(addr.Network()+"://"+addr.String(), nil, pa.Timeout()) - if err != nil { - return nil, errors.Wrap(err, "error creating plugin client") - } - - return &volumeDriverAdapter{name: p.Name(), scopePath: p.ScopedPath, proxy: &volumeDriverProxy{client}}, nil -} diff --git a/vendor/github.com/docker/docker/volume/drivers/proxy.go b/vendor/github.com/docker/docker/volume/drivers/proxy.go deleted file mode 100644 index 8a44faedd..000000000 --- a/vendor/github.com/docker/docker/volume/drivers/proxy.go +++ /dev/null @@ -1,255 +0,0 @@ -// generated code - DO NOT EDIT - -package drivers // import "github.com/docker/docker/volume/drivers" - -import ( - "errors" - "time" - - "github.com/docker/docker/pkg/plugins" - "github.com/docker/docker/volume" -) - -const ( - longTimeout = 2 * time.Minute - shortTimeout = 1 * time.Minute -) - -type client interface { - CallWithOptions(string, interface{}, interface{}, ...func(*plugins.RequestOpts)) error -} - -type volumeDriverProxy struct { - client -} - -type volumeDriverProxyCreateRequest struct { - Name string - Opts map[string]string -} - -type volumeDriverProxyCreateResponse struct { - Err string -} - -func (pp *volumeDriverProxy) Create(name string, opts map[string]string) (err error) { - var ( - req volumeDriverProxyCreateRequest - ret volumeDriverProxyCreateResponse - ) - - req.Name = name - req.Opts = opts - - if err = pp.CallWithOptions("VolumeDriver.Create", req, &ret, plugins.WithRequestTimeout(longTimeout)); err != nil { - return - } - - if ret.Err != "" { - err = errors.New(ret.Err) - } - - return -} - -type volumeDriverProxyRemoveRequest struct { - Name string -} - -type volumeDriverProxyRemoveResponse struct { - Err string -} - -func (pp *volumeDriverProxy) Remove(name string) (err error) { - var ( - req volumeDriverProxyRemoveRequest - ret volumeDriverProxyRemoveResponse - ) - - req.Name = name - - if err = pp.CallWithOptions("VolumeDriver.Remove", req, &ret, plugins.WithRequestTimeout(shortTimeout)); err != nil { - return - } - - if ret.Err != "" { - err = errors.New(ret.Err) - } - - return -} - -type volumeDriverProxyPathRequest struct { - Name string -} - -type volumeDriverProxyPathResponse struct { - Mountpoint string - Err string -} - -func (pp *volumeDriverProxy) Path(name string) (mountpoint string, err error) { - var ( - req volumeDriverProxyPathRequest - ret volumeDriverProxyPathResponse - ) - - req.Name = name - - if err = pp.CallWithOptions("VolumeDriver.Path", req, &ret, plugins.WithRequestTimeout(shortTimeout)); err != nil { - return - } - - mountpoint = ret.Mountpoint - - if ret.Err != "" { - err = errors.New(ret.Err) - } - - return -} - -type volumeDriverProxyMountRequest struct { - Name string - ID string -} - -type volumeDriverProxyMountResponse struct { - Mountpoint string - Err string -} - -func (pp *volumeDriverProxy) Mount(name string, id string) (mountpoint string, err error) { - var ( - req volumeDriverProxyMountRequest - ret volumeDriverProxyMountResponse - ) - - req.Name = name - req.ID = id - - if err = pp.CallWithOptions("VolumeDriver.Mount", req, &ret, plugins.WithRequestTimeout(longTimeout)); err != nil { - return - } - - mountpoint = ret.Mountpoint - - if ret.Err != "" { - err = errors.New(ret.Err) - } - - return -} - -type volumeDriverProxyUnmountRequest struct { - Name string - ID string -} - -type volumeDriverProxyUnmountResponse struct { - Err string -} - -func (pp *volumeDriverProxy) Unmount(name string, id string) (err error) { - var ( - req volumeDriverProxyUnmountRequest - ret volumeDriverProxyUnmountResponse - ) - - req.Name = name - req.ID = id - - if err = pp.CallWithOptions("VolumeDriver.Unmount", req, &ret, plugins.WithRequestTimeout(shortTimeout)); err != nil { - return - } - - if ret.Err != "" { - err = errors.New(ret.Err) - } - - return -} - -type volumeDriverProxyListRequest struct { -} - -type volumeDriverProxyListResponse struct { - Volumes []*proxyVolume - Err string -} - -func (pp *volumeDriverProxy) List() (volumes []*proxyVolume, err error) { - var ( - req volumeDriverProxyListRequest - ret volumeDriverProxyListResponse - ) - - if err = pp.CallWithOptions("VolumeDriver.List", req, &ret, plugins.WithRequestTimeout(shortTimeout)); err != nil { - return - } - - volumes = ret.Volumes - - if ret.Err != "" { - err = errors.New(ret.Err) - } - - return -} - -type volumeDriverProxyGetRequest struct { - Name string -} - -type volumeDriverProxyGetResponse struct { - Volume *proxyVolume - Err string -} - -func (pp *volumeDriverProxy) Get(name string) (volume *proxyVolume, err error) { - var ( - req volumeDriverProxyGetRequest - ret volumeDriverProxyGetResponse - ) - - req.Name = name - - if err = pp.CallWithOptions("VolumeDriver.Get", req, &ret, plugins.WithRequestTimeout(shortTimeout)); err != nil { - return - } - - volume = ret.Volume - - if ret.Err != "" { - err = errors.New(ret.Err) - } - - return -} - -type volumeDriverProxyCapabilitiesRequest struct { -} - -type volumeDriverProxyCapabilitiesResponse struct { - Capabilities volume.Capability - Err string -} - -func (pp *volumeDriverProxy) Capabilities() (capabilities volume.Capability, err error) { - var ( - req volumeDriverProxyCapabilitiesRequest - ret volumeDriverProxyCapabilitiesResponse - ) - - if err = pp.CallWithOptions("VolumeDriver.Capabilities", req, &ret, plugins.WithRequestTimeout(shortTimeout)); err != nil { - return - } - - capabilities = ret.Capabilities - - if ret.Err != "" { - err = errors.New(ret.Err) - } - - return -} diff --git a/vendor/github.com/docker/docker/volume/local/local.go b/vendor/github.com/docker/docker/volume/local/local.go deleted file mode 100644 index d97347423..000000000 --- a/vendor/github.com/docker/docker/volume/local/local.go +++ /dev/null @@ -1,378 +0,0 @@ -// Package local provides the default implementation for volumes. It -// is used to mount data volume containers and directories local to -// the host server. -package local // import "github.com/docker/docker/volume/local" - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "strings" - "sync" - - "github.com/docker/docker/daemon/names" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/volume" - "github.com/pkg/errors" -) - -// VolumeDataPathName is the name of the directory where the volume data is stored. -// It uses a very distinctive name to avoid collisions migrating data between -// Docker versions. -const ( - VolumeDataPathName = "_data" - volumesPathName = "volumes" -) - -var ( - // ErrNotFound is the typed error returned when the requested volume name can't be found - ErrNotFound = fmt.Errorf("volume not found") - // volumeNameRegex ensures the name assigned for the volume is valid. - // This name is used to create the bind directory, so we need to avoid characters that - // would make the path to escape the root directory. - volumeNameRegex = names.RestrictedNamePattern -) - -type activeMount struct { - count uint64 - mounted bool -} - -// New instantiates a new Root instance with the provided scope. Scope -// is the base path that the Root instance uses to store its -// volumes. The base path is created here if it does not exist. -func New(scope string, rootIDs idtools.IDPair) (*Root, error) { - rootDirectory := filepath.Join(scope, volumesPathName) - - if err := idtools.MkdirAllAndChown(rootDirectory, 0700, rootIDs); err != nil { - return nil, err - } - - r := &Root{ - scope: scope, - path: rootDirectory, - volumes: make(map[string]*localVolume), - rootIDs: rootIDs, - } - - dirs, err := ioutil.ReadDir(rootDirectory) - if err != nil { - return nil, err - } - - for _, d := range dirs { - if !d.IsDir() { - continue - } - - name := filepath.Base(d.Name()) - v := &localVolume{ - driverName: r.Name(), - name: name, - path: r.DataPath(name), - } - r.volumes[name] = v - optsFilePath := filepath.Join(rootDirectory, name, "opts.json") - if b, err := ioutil.ReadFile(optsFilePath); err == nil { - opts := optsConfig{} - if err := json.Unmarshal(b, &opts); err != nil { - return nil, errors.Wrapf(err, "error while unmarshaling volume options for volume: %s", name) - } - // Make sure this isn't an empty optsConfig. - // This could be empty due to buggy behavior in older versions of Docker. - if !reflect.DeepEqual(opts, optsConfig{}) { - v.opts = &opts - } - - // unmount anything that may still be mounted (for example, from an unclean shutdown) - mount.Unmount(v.path) - } - } - - return r, nil -} - -// Root implements the Driver interface for the volume package and -// manages the creation/removal of volumes. It uses only standard vfs -// commands to create/remove dirs within its provided scope. -type Root struct { - m sync.Mutex - scope string - path string - volumes map[string]*localVolume - rootIDs idtools.IDPair -} - -// List lists all the volumes -func (r *Root) List() ([]volume.Volume, error) { - var ls []volume.Volume - r.m.Lock() - for _, v := range r.volumes { - ls = append(ls, v) - } - r.m.Unlock() - return ls, nil -} - -// DataPath returns the constructed path of this volume. -func (r *Root) DataPath(volumeName string) string { - return filepath.Join(r.path, volumeName, VolumeDataPathName) -} - -// Name returns the name of Root, defined in the volume package in the DefaultDriverName constant. -func (r *Root) Name() string { - return volume.DefaultDriverName -} - -// Create creates a new volume.Volume with the provided name, creating -// the underlying directory tree required for this volume in the -// process. -func (r *Root) Create(name string, opts map[string]string) (volume.Volume, error) { - if err := r.validateName(name); err != nil { - return nil, err - } - - r.m.Lock() - defer r.m.Unlock() - - v, exists := r.volumes[name] - if exists { - return v, nil - } - - path := r.DataPath(name) - if err := idtools.MkdirAllAndChown(path, 0755, r.rootIDs); err != nil { - return nil, errors.Wrapf(errdefs.System(err), "error while creating volume path '%s'", path) - } - - var err error - defer func() { - if err != nil { - os.RemoveAll(filepath.Dir(path)) - } - }() - - v = &localVolume{ - driverName: r.Name(), - name: name, - path: path, - } - - if len(opts) != 0 { - if err = setOpts(v, opts); err != nil { - return nil, err - } - var b []byte - b, err = json.Marshal(v.opts) - if err != nil { - return nil, err - } - if err = ioutil.WriteFile(filepath.Join(filepath.Dir(path), "opts.json"), b, 600); err != nil { - return nil, errdefs.System(errors.Wrap(err, "error while persisting volume options")) - } - } - - r.volumes[name] = v - return v, nil -} - -// Remove removes the specified volume and all underlying data. If the -// given volume does not belong to this driver and an error is -// returned. The volume is reference counted, if all references are -// not released then the volume is not removed. -func (r *Root) Remove(v volume.Volume) error { - r.m.Lock() - defer r.m.Unlock() - - lv, ok := v.(*localVolume) - if !ok { - return errdefs.System(errors.Errorf("unknown volume type %T", v)) - } - - if lv.active.count > 0 { - return errdefs.System(errors.Errorf("volume has active mounts")) - } - - if err := lv.unmount(); err != nil { - return err - } - - realPath, err := filepath.EvalSymlinks(lv.path) - if err != nil { - if !os.IsNotExist(err) { - return err - } - realPath = filepath.Dir(lv.path) - } - - if !r.scopedPath(realPath) { - return errdefs.System(errors.Errorf("Unable to remove a directory outside of the local volume root %s: %s", r.scope, realPath)) - } - - if err := removePath(realPath); err != nil { - return err - } - - delete(r.volumes, lv.name) - return removePath(filepath.Dir(lv.path)) -} - -func removePath(path string) error { - if err := os.RemoveAll(path); err != nil { - if os.IsNotExist(err) { - return nil - } - return errdefs.System(errors.Wrapf(err, "error removing volume path '%s'", path)) - } - return nil -} - -// Get looks up the volume for the given name and returns it if found -func (r *Root) Get(name string) (volume.Volume, error) { - r.m.Lock() - v, exists := r.volumes[name] - r.m.Unlock() - if !exists { - return nil, ErrNotFound - } - return v, nil -} - -// Scope returns the local volume scope -func (r *Root) Scope() string { - return volume.LocalScope -} - -type validationError string - -func (e validationError) Error() string { - return string(e) -} - -func (e validationError) InvalidParameter() {} - -func (r *Root) validateName(name string) error { - if len(name) == 1 { - return validationError("volume name is too short, names should be at least two alphanumeric characters") - } - if !volumeNameRegex.MatchString(name) { - return validationError(fmt.Sprintf("%q includes invalid characters for a local volume name, only %q are allowed. If you intended to pass a host directory, use absolute path", name, names.RestrictedNameChars)) - } - return nil -} - -// localVolume implements the Volume interface from the volume package and -// represents the volumes created by Root. -type localVolume struct { - m sync.Mutex - // unique name of the volume - name string - // path is the path on the host where the data lives - path string - // driverName is the name of the driver that created the volume. - driverName string - // opts is the parsed list of options used to create the volume - opts *optsConfig - // active refcounts the active mounts - active activeMount -} - -// Name returns the name of the given Volume. -func (v *localVolume) Name() string { - return v.name -} - -// DriverName returns the driver that created the given Volume. -func (v *localVolume) DriverName() string { - return v.driverName -} - -// Path returns the data location. -func (v *localVolume) Path() string { - return v.path -} - -// CachedPath returns the data location -func (v *localVolume) CachedPath() string { - return v.path -} - -// Mount implements the localVolume interface, returning the data location. -// If there are any provided mount options, the resources will be mounted at this point -func (v *localVolume) Mount(id string) (string, error) { - v.m.Lock() - defer v.m.Unlock() - if v.opts != nil { - if !v.active.mounted { - if err := v.mount(); err != nil { - return "", errdefs.System(err) - } - v.active.mounted = true - } - v.active.count++ - } - return v.path, nil -} - -// Unmount dereferences the id, and if it is the last reference will unmount any resources -// that were previously mounted. -func (v *localVolume) Unmount(id string) error { - v.m.Lock() - defer v.m.Unlock() - - // Always decrement the count, even if the unmount fails - // Essentially docker doesn't care if this fails, it will send an error, but - // ultimately there's nothing that can be done. If we don't decrement the count - // this volume can never be removed until a daemon restart occurs. - if v.opts != nil { - v.active.count-- - } - - if v.active.count > 0 { - return nil - } - - return v.unmount() -} - -func (v *localVolume) unmount() error { - if v.opts != nil { - if err := mount.Unmount(v.path); err != nil { - if mounted, mErr := mount.Mounted(v.path); mounted || mErr != nil { - return errdefs.System(errors.Wrapf(err, "error while unmounting volume path '%s'", v.path)) - } - } - v.active.mounted = false - } - return nil -} - -func validateOpts(opts map[string]string) error { - for opt := range opts { - if !validOpts[opt] { - return validationError(fmt.Sprintf("invalid option key: %q", opt)) - } - } - return nil -} - -func (v *localVolume) Status() map[string]interface{} { - return nil -} - -// getAddress finds out address/hostname from options -func getAddress(opts string) string { - optsList := strings.Split(opts, ",") - for i := 0; i < len(optsList); i++ { - if strings.HasPrefix(optsList[i], "addr=") { - addr := strings.SplitN(optsList[i], "=", 2)[1] - return addr - } - } - return "" -} diff --git a/vendor/github.com/docker/docker/volume/local/local_unix.go b/vendor/github.com/docker/docker/volume/local/local_unix.go deleted file mode 100644 index b1c68b931..000000000 --- a/vendor/github.com/docker/docker/volume/local/local_unix.go +++ /dev/null @@ -1,99 +0,0 @@ -// +build linux freebsd - -// Package local provides the default implementation for volumes. It -// is used to mount data volume containers and directories local to -// the host server. -package local // import "github.com/docker/docker/volume/local" - -import ( - "fmt" - "net" - "os" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/pkg/errors" - - "github.com/docker/docker/pkg/mount" -) - -var ( - oldVfsDir = filepath.Join("vfs", "dir") - - validOpts = map[string]bool{ - "type": true, // specify the filesystem type for mount, e.g. nfs - "o": true, // generic mount options - "device": true, // device to mount from - } -) - -type optsConfig struct { - MountType string - MountOpts string - MountDevice string -} - -func (o *optsConfig) String() string { - return fmt.Sprintf("type='%s' device='%s' o='%s'", o.MountType, o.MountDevice, o.MountOpts) -} - -// scopedPath verifies that the path where the volume is located -// is under Docker's root and the valid local paths. -func (r *Root) scopedPath(realPath string) bool { - // Volumes path for Docker version >= 1.7 - if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) && realPath != filepath.Join(r.scope, volumesPathName) { - return true - } - - // Volumes path for Docker version < 1.7 - if strings.HasPrefix(realPath, filepath.Join(r.scope, oldVfsDir)) { - return true - } - - return false -} - -func setOpts(v *localVolume, opts map[string]string) error { - if len(opts) == 0 { - return nil - } - if err := validateOpts(opts); err != nil { - return err - } - - v.opts = &optsConfig{ - MountType: opts["type"], - MountOpts: opts["o"], - MountDevice: opts["device"], - } - return nil -} - -func (v *localVolume) mount() error { - if v.opts.MountDevice == "" { - return fmt.Errorf("missing device in volume options") - } - mountOpts := v.opts.MountOpts - if v.opts.MountType == "nfs" { - if addrValue := getAddress(v.opts.MountOpts); addrValue != "" && net.ParseIP(addrValue).To4() == nil { - ipAddr, err := net.ResolveIPAddr("ip", addrValue) - if err != nil { - return errors.Wrapf(err, "error resolving passed in nfs address") - } - mountOpts = strings.Replace(mountOpts, "addr="+addrValue, "addr="+ipAddr.String(), 1) - } - } - err := mount.Mount(v.opts.MountDevice, v.path, v.opts.MountType, mountOpts) - return errors.Wrapf(err, "error while mounting volume with options: %s", v.opts) -} - -func (v *localVolume) CreatedAt() (time.Time, error) { - fileInfo, err := os.Stat(v.path) - if err != nil { - return time.Time{}, err - } - sec, nsec := fileInfo.Sys().(*syscall.Stat_t).Ctim.Unix() - return time.Unix(sec, nsec), nil -} diff --git a/vendor/github.com/docker/docker/volume/local/local_windows.go b/vendor/github.com/docker/docker/volume/local/local_windows.go deleted file mode 100644 index d96fc0f59..000000000 --- a/vendor/github.com/docker/docker/volume/local/local_windows.go +++ /dev/null @@ -1,46 +0,0 @@ -// Package local provides the default implementation for volumes. It -// is used to mount data volume containers and directories local to -// the host server. -package local // import "github.com/docker/docker/volume/local" - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "syscall" - "time" -) - -type optsConfig struct{} - -var validOpts map[string]bool - -// scopedPath verifies that the path where the volume is located -// is under Docker's root and the valid local paths. -func (r *Root) scopedPath(realPath string) bool { - if strings.HasPrefix(realPath, filepath.Join(r.scope, volumesPathName)) && realPath != filepath.Join(r.scope, volumesPathName) { - return true - } - return false -} - -func setOpts(v *localVolume, opts map[string]string) error { - if len(opts) > 0 { - return fmt.Errorf("options are not supported on this platform") - } - return nil -} - -func (v *localVolume) mount() error { - return nil -} - -func (v *localVolume) CreatedAt() (time.Time, error) { - fileInfo, err := os.Stat(v.path) - if err != nil { - return time.Time{}, err - } - ft := fileInfo.Sys().(*syscall.Win32FileAttributeData).CreationTime - return time.Unix(0, ft.Nanoseconds()), nil -} diff --git a/vendor/github.com/docker/docker/volume/mounts/lcow_parser.go b/vendor/github.com/docker/docker/volume/mounts/lcow_parser.go deleted file mode 100644 index bafb7b07f..000000000 --- a/vendor/github.com/docker/docker/volume/mounts/lcow_parser.go +++ /dev/null @@ -1,34 +0,0 @@ -package mounts // import "github.com/docker/docker/volume/mounts" - -import ( - "errors" - "path" - - "github.com/docker/docker/api/types/mount" -) - -var lcowSpecificValidators mountValidator = func(m *mount.Mount) error { - if path.Clean(m.Target) == "/" { - return ErrVolumeTargetIsRoot - } - if m.Type == mount.TypeNamedPipe { - return errors.New("Linux containers on Windows do not support named pipe mounts") - } - return nil -} - -type lcowParser struct { - windowsParser -} - -func (p *lcowParser) ValidateMountConfig(mnt *mount.Mount) error { - return p.validateMountConfigReg(mnt, rxLCOWDestination, lcowSpecificValidators) -} - -func (p *lcowParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { - return p.parseMountRaw(raw, volumeDriver, rxLCOWDestination, false, lcowSpecificValidators) -} - -func (p *lcowParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { - return p.parseMountSpec(cfg, rxLCOWDestination, false, lcowSpecificValidators) -} diff --git a/vendor/github.com/docker/docker/volume/mounts/linux_parser.go b/vendor/github.com/docker/docker/volume/mounts/linux_parser.go deleted file mode 100644 index 8e436aec0..000000000 --- a/vendor/github.com/docker/docker/volume/mounts/linux_parser.go +++ /dev/null @@ -1,417 +0,0 @@ -package mounts // import "github.com/docker/docker/volume/mounts" - -import ( - "errors" - "fmt" - "path" - "path/filepath" - "strings" - - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/volume" -) - -type linuxParser struct { -} - -func linuxSplitRawSpec(raw string) ([]string, error) { - if strings.Count(raw, ":") > 2 { - return nil, errInvalidSpec(raw) - } - - arr := strings.SplitN(raw, ":", 3) - if arr[0] == "" { - return nil, errInvalidSpec(raw) - } - return arr, nil -} - -func linuxValidateNotRoot(p string) error { - p = path.Clean(strings.Replace(p, `\`, `/`, -1)) - if p == "/" { - return ErrVolumeTargetIsRoot - } - return nil -} -func linuxValidateAbsolute(p string) error { - p = strings.Replace(p, `\`, `/`, -1) - if path.IsAbs(p) { - return nil - } - return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) -} -func (p *linuxParser) ValidateMountConfig(mnt *mount.Mount) error { - // there was something looking like a bug in existing codebase: - // - validateMountConfig on linux was called with options skipping bind source existence when calling ParseMountRaw - // - but not when calling ParseMountSpec directly... nor when the unit test called it directly - return p.validateMountConfigImpl(mnt, true) -} -func (p *linuxParser) validateMountConfigImpl(mnt *mount.Mount, validateBindSourceExists bool) error { - if len(mnt.Target) == 0 { - return &errMountConfig{mnt, errMissingField("Target")} - } - - if err := linuxValidateNotRoot(mnt.Target); err != nil { - return &errMountConfig{mnt, err} - } - - if err := linuxValidateAbsolute(mnt.Target); err != nil { - return &errMountConfig{mnt, err} - } - - switch mnt.Type { - case mount.TypeBind: - if len(mnt.Source) == 0 { - return &errMountConfig{mnt, errMissingField("Source")} - } - // Don't error out just because the propagation mode is not supported on the platform - if opts := mnt.BindOptions; opts != nil { - if len(opts.Propagation) > 0 && len(linuxPropagationModes) > 0 { - if _, ok := linuxPropagationModes[opts.Propagation]; !ok { - return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} - } - } - } - if mnt.VolumeOptions != nil { - return &errMountConfig{mnt, errExtraField("VolumeOptions")} - } - - if err := linuxValidateAbsolute(mnt.Source); err != nil { - return &errMountConfig{mnt, err} - } - - if validateBindSourceExists { - exists, _, _ := currentFileInfoProvider.fileInfo(mnt.Source) - if !exists { - return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)} - } - } - - case mount.TypeVolume: - if mnt.BindOptions != nil { - return &errMountConfig{mnt, errExtraField("BindOptions")} - } - - if len(mnt.Source) == 0 && mnt.ReadOnly { - return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} - } - case mount.TypeTmpfs: - if len(mnt.Source) != 0 { - return &errMountConfig{mnt, errExtraField("Source")} - } - if _, err := p.ConvertTmpfsOptions(mnt.TmpfsOptions, mnt.ReadOnly); err != nil { - return &errMountConfig{mnt, err} - } - default: - return &errMountConfig{mnt, errors.New("mount type unknown")} - } - return nil -} - -// read-write modes -var rwModes = map[string]bool{ - "rw": true, - "ro": true, -} - -// label modes -var linuxLabelModes = map[string]bool{ - "Z": true, - "z": true, -} - -// consistency modes -var linuxConsistencyModes = map[mount.Consistency]bool{ - mount.ConsistencyFull: true, - mount.ConsistencyCached: true, - mount.ConsistencyDelegated: true, -} -var linuxPropagationModes = map[mount.Propagation]bool{ - mount.PropagationPrivate: true, - mount.PropagationRPrivate: true, - mount.PropagationSlave: true, - mount.PropagationRSlave: true, - mount.PropagationShared: true, - mount.PropagationRShared: true, -} - -const linuxDefaultPropagationMode = mount.PropagationRPrivate - -func linuxGetPropagation(mode string) mount.Propagation { - for _, o := range strings.Split(mode, ",") { - prop := mount.Propagation(o) - if linuxPropagationModes[prop] { - return prop - } - } - return linuxDefaultPropagationMode -} - -func linuxHasPropagation(mode string) bool { - for _, o := range strings.Split(mode, ",") { - if linuxPropagationModes[mount.Propagation(o)] { - return true - } - } - return false -} - -func linuxValidMountMode(mode string) bool { - if mode == "" { - return true - } - - rwModeCount := 0 - labelModeCount := 0 - propagationModeCount := 0 - copyModeCount := 0 - consistencyModeCount := 0 - - for _, o := range strings.Split(mode, ",") { - switch { - case rwModes[o]: - rwModeCount++ - case linuxLabelModes[o]: - labelModeCount++ - case linuxPropagationModes[mount.Propagation(o)]: - propagationModeCount++ - case copyModeExists(o): - copyModeCount++ - case linuxConsistencyModes[mount.Consistency(o)]: - consistencyModeCount++ - default: - return false - } - } - - // Only one string for each mode is allowed. - if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 || consistencyModeCount > 1 { - return false - } - return true -} - -func (p *linuxParser) ReadWrite(mode string) bool { - if !linuxValidMountMode(mode) { - return false - } - - for _, o := range strings.Split(mode, ",") { - if o == "ro" { - return false - } - } - return true -} - -func (p *linuxParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { - arr, err := linuxSplitRawSpec(raw) - if err != nil { - return nil, err - } - - var spec mount.Mount - var mode string - switch len(arr) { - case 1: - // Just a destination path in the container - spec.Target = arr[0] - case 2: - if linuxValidMountMode(arr[1]) { - // Destination + Mode is not a valid volume - volumes - // cannot include a mode. e.g. /foo:rw - return nil, errInvalidSpec(raw) - } - // Host Source Path or Name + Destination - spec.Source = arr[0] - spec.Target = arr[1] - case 3: - // HostSourcePath+DestinationPath+Mode - spec.Source = arr[0] - spec.Target = arr[1] - mode = arr[2] - default: - return nil, errInvalidSpec(raw) - } - - if !linuxValidMountMode(mode) { - return nil, errInvalidMode(mode) - } - - if path.IsAbs(spec.Source) { - spec.Type = mount.TypeBind - } else { - spec.Type = mount.TypeVolume - } - - spec.ReadOnly = !p.ReadWrite(mode) - - // cannot assume that if a volume driver is passed in that we should set it - if volumeDriver != "" && spec.Type == mount.TypeVolume { - spec.VolumeOptions = &mount.VolumeOptions{ - DriverConfig: &mount.Driver{Name: volumeDriver}, - } - } - - if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { - if spec.VolumeOptions == nil { - spec.VolumeOptions = &mount.VolumeOptions{} - } - spec.VolumeOptions.NoCopy = !copyData - } - if linuxHasPropagation(mode) { - spec.BindOptions = &mount.BindOptions{ - Propagation: linuxGetPropagation(mode), - } - } - - mp, err := p.parseMountSpec(spec, false) - if mp != nil { - mp.Mode = mode - } - if err != nil { - err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) - } - return mp, err -} -func (p *linuxParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { - return p.parseMountSpec(cfg, true) -} -func (p *linuxParser) parseMountSpec(cfg mount.Mount, validateBindSourceExists bool) (*MountPoint, error) { - if err := p.validateMountConfigImpl(&cfg, validateBindSourceExists); err != nil { - return nil, err - } - mp := &MountPoint{ - RW: !cfg.ReadOnly, - Destination: path.Clean(filepath.ToSlash(cfg.Target)), - Type: cfg.Type, - Spec: cfg, - } - - switch cfg.Type { - case mount.TypeVolume: - if cfg.Source == "" { - mp.Name = stringid.GenerateNonCryptoID() - } else { - mp.Name = cfg.Source - } - mp.CopyData = p.DefaultCopyMode() - - if cfg.VolumeOptions != nil { - if cfg.VolumeOptions.DriverConfig != nil { - mp.Driver = cfg.VolumeOptions.DriverConfig.Name - } - if cfg.VolumeOptions.NoCopy { - mp.CopyData = false - } - } - case mount.TypeBind: - mp.Source = path.Clean(filepath.ToSlash(cfg.Source)) - if cfg.BindOptions != nil && len(cfg.BindOptions.Propagation) > 0 { - mp.Propagation = cfg.BindOptions.Propagation - } else { - // If user did not specify a propagation mode, get - // default propagation mode. - mp.Propagation = linuxDefaultPropagationMode - } - case mount.TypeTmpfs: - // NOP - } - return mp, nil -} - -func (p *linuxParser) ParseVolumesFrom(spec string) (string, string, error) { - if len(spec) == 0 { - return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") - } - - specParts := strings.SplitN(spec, ":", 2) - id := specParts[0] - mode := "rw" - - if len(specParts) == 2 { - mode = specParts[1] - if !linuxValidMountMode(mode) { - return "", "", errInvalidMode(mode) - } - // For now don't allow propagation properties while importing - // volumes from data container. These volumes will inherit - // the same propagation property as of the original volume - // in data container. This probably can be relaxed in future. - if linuxHasPropagation(mode) { - return "", "", errInvalidMode(mode) - } - // Do not allow copy modes on volumes-from - if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { - return "", "", errInvalidMode(mode) - } - } - return id, mode, nil -} - -func (p *linuxParser) DefaultPropagationMode() mount.Propagation { - return linuxDefaultPropagationMode -} - -func (p *linuxParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) { - var rawOpts []string - if readOnly { - rawOpts = append(rawOpts, "ro") - } - - if opt != nil && opt.Mode != 0 { - rawOpts = append(rawOpts, fmt.Sprintf("mode=%o", opt.Mode)) - } - - if opt != nil && opt.SizeBytes != 0 { - // calculate suffix here, making this linux specific, but that is - // okay, since API is that way anyways. - - // we do this by finding the suffix that divides evenly into the - // value, returning the value itself, with no suffix, if it fails. - // - // For the most part, we don't enforce any semantic to this values. - // The operating system will usually align this and enforce minimum - // and maximums. - var ( - size = opt.SizeBytes - suffix string - ) - for _, r := range []struct { - suffix string - divisor int64 - }{ - {"g", 1 << 30}, - {"m", 1 << 20}, - {"k", 1 << 10}, - } { - if size%r.divisor == 0 { - size = size / r.divisor - suffix = r.suffix - break - } - } - - rawOpts = append(rawOpts, fmt.Sprintf("size=%d%s", size, suffix)) - } - return strings.Join(rawOpts, ","), nil -} - -func (p *linuxParser) DefaultCopyMode() bool { - return true -} -func (p *linuxParser) ValidateVolumeName(name string) error { - return nil -} - -func (p *linuxParser) IsBackwardCompatible(m *MountPoint) bool { - return len(m.Source) > 0 || m.Driver == volume.DefaultDriverName -} - -func (p *linuxParser) ValidateTmpfsMountDestination(dest string) error { - if err := linuxValidateNotRoot(dest); err != nil { - return err - } - return linuxValidateAbsolute(dest) -} diff --git a/vendor/github.com/docker/docker/volume/mounts/mounts.go b/vendor/github.com/docker/docker/volume/mounts/mounts.go deleted file mode 100644 index 8f255a548..000000000 --- a/vendor/github.com/docker/docker/volume/mounts/mounts.go +++ /dev/null @@ -1,170 +0,0 @@ -package mounts // import "github.com/docker/docker/volume/mounts" - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/volume" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" -) - -// MountPoint is the intersection point between a volume and a container. It -// specifies which volume is to be used and where inside a container it should -// be mounted. -// -// Note that this type is embedded in `container.Container` object and persisted to disk. -// Changes to this struct need to by synced with on disk state. -type MountPoint struct { - // Source is the source path of the mount. - // E.g. `mount --bind /foo /bar`, `/foo` is the `Source`. - Source string - // Destination is the path relative to the container root (`/`) to the mount point - // It is where the `Source` is mounted to - Destination string - // RW is set to true when the mountpoint should be mounted as read-write - RW bool - // Name is the name reference to the underlying data defined by `Source` - // e.g., the volume name - Name string - // Driver is the volume driver used to create the volume (if it is a volume) - Driver string - // Type of mount to use, see `Type` definitions in github.com/docker/docker/api/types/mount - Type mounttypes.Type `json:",omitempty"` - // Volume is the volume providing data to this mountpoint. - // This is nil unless `Type` is set to `TypeVolume` - Volume volume.Volume `json:"-"` - - // Mode is the comma separated list of options supplied by the user when creating - // the bind/volume mount. - // Note Mode is not used on Windows - Mode string `json:"Relabel,omitempty"` // Originally field was `Relabel`" - - // Propagation describes how the mounts are propagated from the host into the - // mount point, and vice-versa. - // See https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt - // Note Propagation is not used on Windows - Propagation mounttypes.Propagation `json:",omitempty"` // Mount propagation string - - // Specifies if data should be copied from the container before the first mount - // Use a pointer here so we can tell if the user set this value explicitly - // This allows us to error out when the user explicitly enabled copy but we can't copy due to the volume being populated - CopyData bool `json:"-"` - // ID is the opaque ID used to pass to the volume driver. - // This should be set by calls to `Mount` and unset by calls to `Unmount` - ID string `json:",omitempty"` - - // Sepc is a copy of the API request that created this mount. - Spec mounttypes.Mount - - // Track usage of this mountpoint - // Specifically needed for containers which are running and calls to `docker cp` - // because both these actions require mounting the volumes. - active int -} - -// Cleanup frees resources used by the mountpoint -func (m *MountPoint) Cleanup() error { - if m.Volume == nil || m.ID == "" { - return nil - } - - if err := m.Volume.Unmount(m.ID); err != nil { - return errors.Wrapf(err, "error unmounting volume %s", m.Volume.Name()) - } - - m.active-- - if m.active == 0 { - m.ID = "" - } - return nil -} - -// Setup sets up a mount point by either mounting the volume if it is -// configured, or creating the source directory if supplied. -// The, optional, checkFun parameter allows doing additional checking -// before creating the source directory on the host. -func (m *MountPoint) Setup(mountLabel string, rootIDs idtools.IDPair, checkFun func(m *MountPoint) error) (path string, err error) { - defer func() { - if err != nil || !label.RelabelNeeded(m.Mode) { - return - } - - var sourcePath string - sourcePath, err = filepath.EvalSymlinks(m.Source) - if err != nil { - path = "" - err = errors.Wrapf(err, "error evaluating symlinks from mount source %q", m.Source) - return - } - err = label.Relabel(sourcePath, mountLabel, label.IsShared(m.Mode)) - if err == syscall.ENOTSUP { - err = nil - } - if err != nil { - path = "" - err = errors.Wrapf(err, "error setting label on mount source '%s'", sourcePath) - } - }() - - if m.Volume != nil { - id := m.ID - if id == "" { - id = stringid.GenerateNonCryptoID() - } - path, err := m.Volume.Mount(id) - if err != nil { - return "", errors.Wrapf(err, "error while mounting volume '%s'", m.Source) - } - - m.ID = id - m.active++ - return path, nil - } - - if len(m.Source) == 0 { - return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") - } - - if m.Type == mounttypes.TypeBind { - // Before creating the source directory on the host, invoke checkFun if it's not nil. One of - // the use case is to forbid creating the daemon socket as a directory if the daemon is in - // the process of shutting down. - if checkFun != nil { - if err := checkFun(m); err != nil { - return "", err - } - } - // idtools.MkdirAllNewAs() produces an error if m.Source exists and is a file (not a directory) - // also, makes sure that if the directory is created, the correct remapped rootUID/rootGID will own it - if err := idtools.MkdirAllAndChownNew(m.Source, 0755, rootIDs); err != nil { - if perr, ok := err.(*os.PathError); ok { - if perr.Err != syscall.ENOTDIR { - return "", errors.Wrapf(err, "error while creating mount source path '%s'", m.Source) - } - } - } - } - return m.Source, nil -} - -// Path returns the path of a volume in a mount point. -func (m *MountPoint) Path() string { - if m.Volume != nil { - return m.Volume.Path() - } - return m.Source -} - -func errInvalidMode(mode string) error { - return errors.Errorf("invalid mode: %v", mode) -} - -func errInvalidSpec(spec string) error { - return errors.Errorf("invalid volume specification: '%s'", spec) -} diff --git a/vendor/github.com/docker/docker/volume/mounts/parser.go b/vendor/github.com/docker/docker/volume/mounts/parser.go deleted file mode 100644 index 73681750e..000000000 --- a/vendor/github.com/docker/docker/volume/mounts/parser.go +++ /dev/null @@ -1,47 +0,0 @@ -package mounts // import "github.com/docker/docker/volume/mounts" - -import ( - "errors" - "runtime" - - "github.com/docker/docker/api/types/mount" -) - -const ( - // OSLinux is the same as runtime.GOOS on linux - OSLinux = "linux" - // OSWindows is the same as runtime.GOOS on windows - OSWindows = "windows" -) - -// ErrVolumeTargetIsRoot is returned when the target destination is root. -// It's used by both LCOW and Linux parsers. -var ErrVolumeTargetIsRoot = errors.New("invalid specification: destination can't be '/'") - -// Parser represents a platform specific parser for mount expressions -type Parser interface { - ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) - ParseMountSpec(cfg mount.Mount) (*MountPoint, error) - ParseVolumesFrom(spec string) (string, string, error) - DefaultPropagationMode() mount.Propagation - ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) - DefaultCopyMode() bool - ValidateVolumeName(name string) error - ReadWrite(mode string) bool - IsBackwardCompatible(m *MountPoint) bool - HasResource(m *MountPoint, absPath string) bool - ValidateTmpfsMountDestination(dest string) error - ValidateMountConfig(mt *mount.Mount) error -} - -// NewParser creates a parser for a given container OS, depending on the current host OS (linux on a windows host will resolve to an lcowParser) -func NewParser(containerOS string) Parser { - switch containerOS { - case OSWindows: - return &windowsParser{} - } - if runtime.GOOS == OSWindows { - return &lcowParser{} - } - return &linuxParser{} -} diff --git a/vendor/github.com/docker/docker/volume/mounts/validate.go b/vendor/github.com/docker/docker/volume/mounts/validate.go deleted file mode 100644 index 0b7152690..000000000 --- a/vendor/github.com/docker/docker/volume/mounts/validate.go +++ /dev/null @@ -1,28 +0,0 @@ -package mounts // import "github.com/docker/docker/volume/mounts" - -import ( - "fmt" - - "github.com/docker/docker/api/types/mount" - "github.com/pkg/errors" -) - -type errMountConfig struct { - mount *mount.Mount - err error -} - -func (e *errMountConfig) Error() string { - return fmt.Sprintf("invalid mount config for type %q: %v", e.mount.Type, e.err.Error()) -} - -func errBindSourceDoesNotExist(path string) error { - return errors.Errorf("bind mount source path does not exist: %s", path) -} - -func errExtraField(name string) error { - return errors.Errorf("field %s must not be specified", name) -} -func errMissingField(name string) error { - return errors.Errorf("field %s must not be empty", name) -} diff --git a/vendor/github.com/docker/docker/volume/mounts/volume_copy.go b/vendor/github.com/docker/docker/volume/mounts/volume_copy.go deleted file mode 100644 index 04056fa50..000000000 --- a/vendor/github.com/docker/docker/volume/mounts/volume_copy.go +++ /dev/null @@ -1,23 +0,0 @@ -package mounts // import "github.com/docker/docker/volume/mounts" - -import "strings" - -// {=isEnabled} -var copyModes = map[string]bool{ - "nocopy": false, -} - -func copyModeExists(mode string) bool { - _, exists := copyModes[mode] - return exists -} - -// GetCopyMode gets the copy mode from the mode string for mounts -func getCopyMode(mode string, def bool) (bool, bool) { - for _, o := range strings.Split(mode, ",") { - if isEnabled, exists := copyModes[o]; exists { - return isEnabled, true - } - } - return def, false -} diff --git a/vendor/github.com/docker/docker/volume/mounts/volume_unix.go b/vendor/github.com/docker/docker/volume/mounts/volume_unix.go deleted file mode 100644 index c6d51e071..000000000 --- a/vendor/github.com/docker/docker/volume/mounts/volume_unix.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux freebsd darwin - -package mounts // import "github.com/docker/docker/volume/mounts" - -import ( - "fmt" - "path/filepath" - "strings" -) - -func (p *linuxParser) HasResource(m *MountPoint, absolutePath string) bool { - relPath, err := filepath.Rel(m.Destination, absolutePath) - return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) -} - -func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool { - return false -} diff --git a/vendor/github.com/docker/docker/volume/mounts/volume_windows.go b/vendor/github.com/docker/docker/volume/mounts/volume_windows.go deleted file mode 100644 index 773e7db88..000000000 --- a/vendor/github.com/docker/docker/volume/mounts/volume_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package mounts // import "github.com/docker/docker/volume/mounts" - -func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool { - return false -} -func (p *linuxParser) HasResource(m *MountPoint, absolutePath string) bool { - return false -} diff --git a/vendor/github.com/docker/docker/volume/mounts/windows_parser.go b/vendor/github.com/docker/docker/volume/mounts/windows_parser.go deleted file mode 100644 index ac6104404..000000000 --- a/vendor/github.com/docker/docker/volume/mounts/windows_parser.go +++ /dev/null @@ -1,456 +0,0 @@ -package mounts // import "github.com/docker/docker/volume/mounts" - -import ( - "errors" - "fmt" - "os" - "regexp" - "runtime" - "strings" - - "github.com/docker/docker/api/types/mount" - "github.com/docker/docker/pkg/stringid" -) - -type windowsParser struct { -} - -const ( - // Spec should be in the format [source:]destination[:mode] - // - // Examples: c:\foo bar:d:rw - // c:\foo:d:\bar - // myname:d: - // d:\ - // - // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See - // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to - // test is https://regex-golang.appspot.com/assets/html/index.html - // - // Useful link for referencing named capturing groups: - // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex - // - // There are three match groups: source, destination and mode. - // - - // rxHostDir is the first option of a source - rxHostDir = `(?:\\\\\?\\)?[a-z]:[\\/](?:[^\\/:*?"<>|\r\n]+[\\/]?)*` - // rxName is the second option of a source - rxName = `[^\\/:*?"<>|\r\n]+` - - // RXReservedNames are reserved names not possible on Windows - rxReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` - - // rxPipe is a named path pipe (starts with `\\.\pipe\`, possibly with / instead of \) - rxPipe = `[/\\]{2}.[/\\]pipe[/\\][^:*?"<>|\r\n]+` - // rxSource is the combined possibilities for a source - rxSource = `((?P((` + rxHostDir + `)|(` + rxName + `)|(` + rxPipe + `))):)?` - - // Source. Can be either a host directory, a name, or omitted: - // HostDir: - // - Essentially using the folder solution from - // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html - // but adding case insensitivity. - // - Must be an absolute path such as c:\path - // - Can include spaces such as `c:\program files` - // - And then followed by a colon which is not in the capture group - // - And can be optional - // Name: - // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) - // - And then followed by a colon which is not in the capture group - // - And can be optional - - // rxDestination is the regex expression for the mount destination - rxDestination = `(?P((?:\\\\\?\\)?([a-z]):((?:[\\/][^\\/:*?"<>\r\n]+)*[\\/]?))|(` + rxPipe + `))` - - rxLCOWDestination = `(?P/(?:[^\\/:*?"<>\r\n]+[/]?)*)` - // Destination (aka container path): - // - Variation on hostdir but can be a drive followed by colon as well - // - If a path, must be absolute. Can include spaces - // - Drive cannot be c: (explicitly checked in code, not RegEx) - - // rxMode is the regex expression for the mode of the mount - // Mode (optional): - // - Hopefully self explanatory in comparison to above regex's. - // - Colon is not in the capture group - rxMode = `(:(?P(?i)ro|rw))?` -) - -type mountValidator func(mnt *mount.Mount) error - -func windowsSplitRawSpec(raw, destRegex string) ([]string, error) { - specExp := regexp.MustCompile(`^` + rxSource + destRegex + rxMode + `$`) - match := specExp.FindStringSubmatch(strings.ToLower(raw)) - - // Must have something back - if len(match) == 0 { - return nil, errInvalidSpec(raw) - } - - var split []string - matchgroups := make(map[string]string) - // Pull out the sub expressions from the named capture groups - for i, name := range specExp.SubexpNames() { - matchgroups[name] = strings.ToLower(match[i]) - } - if source, exists := matchgroups["source"]; exists { - if source != "" { - split = append(split, source) - } - } - if destination, exists := matchgroups["destination"]; exists { - if destination != "" { - split = append(split, destination) - } - } - if mode, exists := matchgroups["mode"]; exists { - if mode != "" { - split = append(split, mode) - } - } - // Fix #26329. If the destination appears to be a file, and the source is null, - // it may be because we've fallen through the possible naming regex and hit a - // situation where the user intention was to map a file into a container through - // a local volume, but this is not supported by the platform. - if matchgroups["source"] == "" && matchgroups["destination"] != "" { - volExp := regexp.MustCompile(`^` + rxName + `$`) - reservedNameExp := regexp.MustCompile(`^` + rxReservedNames + `$`) - - if volExp.MatchString(matchgroups["destination"]) { - if reservedNameExp.MatchString(matchgroups["destination"]) { - return nil, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", matchgroups["destination"]) - } - } else { - - exists, isDir, _ := currentFileInfoProvider.fileInfo(matchgroups["destination"]) - if exists && !isDir { - return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"]) - - } - } - } - return split, nil -} - -func windowsValidMountMode(mode string) bool { - if mode == "" { - return true - } - return rwModes[strings.ToLower(mode)] -} -func windowsValidateNotRoot(p string) error { - p = strings.ToLower(strings.Replace(p, `/`, `\`, -1)) - if p == "c:" || p == `c:\` { - return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p) - } - return nil -} - -var windowsSpecificValidators mountValidator = func(mnt *mount.Mount) error { - return windowsValidateNotRoot(mnt.Target) -} - -func windowsValidateRegex(p, r string) error { - if regexp.MustCompile(`^` + r + `$`).MatchString(strings.ToLower(p)) { - return nil - } - return fmt.Errorf("invalid mount path: '%s'", p) -} -func windowsValidateAbsolute(p string) error { - if err := windowsValidateRegex(p, rxDestination); err != nil { - return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) - } - return nil -} - -func windowsDetectMountType(p string) mount.Type { - if strings.HasPrefix(p, `\\.\pipe\`) { - return mount.TypeNamedPipe - } else if regexp.MustCompile(`^` + rxHostDir + `$`).MatchString(p) { - return mount.TypeBind - } else { - return mount.TypeVolume - } -} - -func (p *windowsParser) ReadWrite(mode string) bool { - return strings.ToLower(mode) != "ro" -} - -// IsVolumeNameValid checks a volume name in a platform specific manner. -func (p *windowsParser) ValidateVolumeName(name string) error { - nameExp := regexp.MustCompile(`^` + rxName + `$`) - if !nameExp.MatchString(name) { - return errors.New("invalid volume name") - } - nameExp = regexp.MustCompile(`^` + rxReservedNames + `$`) - if nameExp.MatchString(name) { - return fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name) - } - return nil -} -func (p *windowsParser) ValidateMountConfig(mnt *mount.Mount) error { - return p.validateMountConfigReg(mnt, rxDestination, windowsSpecificValidators) -} - -type fileInfoProvider interface { - fileInfo(path string) (exist, isDir bool, err error) -} - -type defaultFileInfoProvider struct { -} - -func (defaultFileInfoProvider) fileInfo(path string) (exist, isDir bool, err error) { - fi, err := os.Stat(path) - if err != nil { - if !os.IsNotExist(err) { - return false, false, err - } - return false, false, nil - } - return true, fi.IsDir(), nil -} - -var currentFileInfoProvider fileInfoProvider = defaultFileInfoProvider{} - -func (p *windowsParser) validateMountConfigReg(mnt *mount.Mount, destRegex string, additionalValidators ...mountValidator) error { - - for _, v := range additionalValidators { - if err := v(mnt); err != nil { - return &errMountConfig{mnt, err} - } - } - if len(mnt.Target) == 0 { - return &errMountConfig{mnt, errMissingField("Target")} - } - - if err := windowsValidateRegex(mnt.Target, destRegex); err != nil { - return &errMountConfig{mnt, err} - } - - switch mnt.Type { - case mount.TypeBind: - if len(mnt.Source) == 0 { - return &errMountConfig{mnt, errMissingField("Source")} - } - // Don't error out just because the propagation mode is not supported on the platform - if opts := mnt.BindOptions; opts != nil { - if len(opts.Propagation) > 0 { - return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} - } - } - if mnt.VolumeOptions != nil { - return &errMountConfig{mnt, errExtraField("VolumeOptions")} - } - - if err := windowsValidateAbsolute(mnt.Source); err != nil { - return &errMountConfig{mnt, err} - } - - exists, isdir, err := currentFileInfoProvider.fileInfo(mnt.Source) - if err != nil { - return &errMountConfig{mnt, err} - } - if !exists { - return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)} - } - if !isdir { - return &errMountConfig{mnt, fmt.Errorf("source path must be a directory")} - } - - case mount.TypeVolume: - if mnt.BindOptions != nil { - return &errMountConfig{mnt, errExtraField("BindOptions")} - } - - if len(mnt.Source) == 0 && mnt.ReadOnly { - return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} - } - - if len(mnt.Source) != 0 { - if err := p.ValidateVolumeName(mnt.Source); err != nil { - return &errMountConfig{mnt, err} - } - } - case mount.TypeNamedPipe: - if len(mnt.Source) == 0 { - return &errMountConfig{mnt, errMissingField("Source")} - } - - if mnt.BindOptions != nil { - return &errMountConfig{mnt, errExtraField("BindOptions")} - } - - if mnt.ReadOnly { - return &errMountConfig{mnt, errExtraField("ReadOnly")} - } - - if windowsDetectMountType(mnt.Source) != mount.TypeNamedPipe { - return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Source)} - } - - if windowsDetectMountType(mnt.Target) != mount.TypeNamedPipe { - return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Target)} - } - default: - return &errMountConfig{mnt, errors.New("mount type unknown")} - } - return nil -} -func (p *windowsParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { - return p.parseMountRaw(raw, volumeDriver, rxDestination, true, windowsSpecificValidators) -} - -func (p *windowsParser) parseMountRaw(raw, volumeDriver, destRegex string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { - arr, err := windowsSplitRawSpec(raw, destRegex) - if err != nil { - return nil, err - } - - var spec mount.Mount - var mode string - switch len(arr) { - case 1: - // Just a destination path in the container - spec.Target = arr[0] - case 2: - if windowsValidMountMode(arr[1]) { - // Destination + Mode is not a valid volume - volumes - // cannot include a mode. e.g. /foo:rw - return nil, errInvalidSpec(raw) - } - // Host Source Path or Name + Destination - spec.Source = strings.Replace(arr[0], `/`, `\`, -1) - spec.Target = arr[1] - case 3: - // HostSourcePath+DestinationPath+Mode - spec.Source = strings.Replace(arr[0], `/`, `\`, -1) - spec.Target = arr[1] - mode = arr[2] - default: - return nil, errInvalidSpec(raw) - } - if convertTargetToBackslash { - spec.Target = strings.Replace(spec.Target, `/`, `\`, -1) - } - - if !windowsValidMountMode(mode) { - return nil, errInvalidMode(mode) - } - - spec.Type = windowsDetectMountType(spec.Source) - spec.ReadOnly = !p.ReadWrite(mode) - - // cannot assume that if a volume driver is passed in that we should set it - if volumeDriver != "" && spec.Type == mount.TypeVolume { - spec.VolumeOptions = &mount.VolumeOptions{ - DriverConfig: &mount.Driver{Name: volumeDriver}, - } - } - - if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { - if spec.VolumeOptions == nil { - spec.VolumeOptions = &mount.VolumeOptions{} - } - spec.VolumeOptions.NoCopy = !copyData - } - - mp, err := p.parseMountSpec(spec, destRegex, convertTargetToBackslash, additionalValidators...) - if mp != nil { - mp.Mode = mode - } - if err != nil { - err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) - } - return mp, err -} - -func (p *windowsParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { - return p.parseMountSpec(cfg, rxDestination, true, windowsSpecificValidators) -} -func (p *windowsParser) parseMountSpec(cfg mount.Mount, destRegex string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { - if err := p.validateMountConfigReg(&cfg, destRegex, additionalValidators...); err != nil { - return nil, err - } - mp := &MountPoint{ - RW: !cfg.ReadOnly, - Destination: cfg.Target, - Type: cfg.Type, - Spec: cfg, - } - if convertTargetToBackslash { - mp.Destination = strings.Replace(cfg.Target, `/`, `\`, -1) - } - - switch cfg.Type { - case mount.TypeVolume: - if cfg.Source == "" { - mp.Name = stringid.GenerateNonCryptoID() - } else { - mp.Name = cfg.Source - } - mp.CopyData = p.DefaultCopyMode() - - if cfg.VolumeOptions != nil { - if cfg.VolumeOptions.DriverConfig != nil { - mp.Driver = cfg.VolumeOptions.DriverConfig.Name - } - if cfg.VolumeOptions.NoCopy { - mp.CopyData = false - } - } - case mount.TypeBind: - mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) - case mount.TypeNamedPipe: - mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) - } - // cleanup trailing `\` except for paths like `c:\` - if len(mp.Source) > 3 && mp.Source[len(mp.Source)-1] == '\\' { - mp.Source = mp.Source[:len(mp.Source)-1] - } - if len(mp.Destination) > 3 && mp.Destination[len(mp.Destination)-1] == '\\' { - mp.Destination = mp.Destination[:len(mp.Destination)-1] - } - return mp, nil -} - -func (p *windowsParser) ParseVolumesFrom(spec string) (string, string, error) { - if len(spec) == 0 { - return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") - } - - specParts := strings.SplitN(spec, ":", 2) - id := specParts[0] - mode := "rw" - - if len(specParts) == 2 { - mode = specParts[1] - if !windowsValidMountMode(mode) { - return "", "", errInvalidMode(mode) - } - - // Do not allow copy modes on volumes-from - if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { - return "", "", errInvalidMode(mode) - } - } - return id, mode, nil -} - -func (p *windowsParser) DefaultPropagationMode() mount.Propagation { - return mount.Propagation("") -} - -func (p *windowsParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) { - return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS) -} -func (p *windowsParser) DefaultCopyMode() bool { - return false -} -func (p *windowsParser) IsBackwardCompatible(m *MountPoint) bool { - return false -} - -func (p *windowsParser) ValidateTmpfsMountDestination(dest string) error { - return errors.New("Platform does not support tmpfs") -} diff --git a/vendor/github.com/docker/docker/volume/service/by.go b/vendor/github.com/docker/docker/volume/service/by.go deleted file mode 100644 index c5a4638d2..000000000 --- a/vendor/github.com/docker/docker/volume/service/by.go +++ /dev/null @@ -1,89 +0,0 @@ -package service // import "github.com/docker/docker/volume/service" - -import ( - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/volume" -) - -// By is an interface which is used to implement filtering on volumes. -type By interface { - isBy() -} - -// ByDriver is `By` that filters based on the driver names that are passed in -func ByDriver(drivers ...string) By { - return byDriver(drivers) -} - -type byDriver []string - -func (byDriver) isBy() {} - -// ByReferenced is a `By` that filters based on if the volume has references -type ByReferenced bool - -func (ByReferenced) isBy() {} - -// And creates a `By` combining all the passed in bys using AND logic. -func And(bys ...By) By { - and := make(andCombinator, 0, len(bys)) - for _, by := range bys { - and = append(and, by) - } - return and -} - -type andCombinator []By - -func (andCombinator) isBy() {} - -// Or creates a `By` combining all the passed in bys using OR logic. -func Or(bys ...By) By { - or := make(orCombinator, 0, len(bys)) - for _, by := range bys { - or = append(or, by) - } - return or -} - -type orCombinator []By - -func (orCombinator) isBy() {} - -// CustomFilter is a `By` that is used by callers to provide custom filtering -// logic. -type CustomFilter filterFunc - -func (CustomFilter) isBy() {} - -// FromList returns a By which sets the initial list of volumes to use -func FromList(ls *[]volume.Volume, by By) By { - return &fromList{by: by, ls: ls} -} - -type fromList struct { - by By - ls *[]volume.Volume -} - -func (fromList) isBy() {} - -func byLabelFilter(filter filters.Args) By { - return CustomFilter(func(v volume.Volume) bool { - dv, ok := v.(volume.DetailedVolume) - if !ok { - return false - } - - labels := dv.Labels() - if !filter.MatchKVList("label", labels) { - return false - } - if filter.Contains("label!") { - if filter.MatchKVList("label!", labels) { - return false - } - } - return true - }) -} diff --git a/vendor/github.com/docker/docker/volume/service/convert.go b/vendor/github.com/docker/docker/volume/service/convert.go deleted file mode 100644 index 2967dc672..000000000 --- a/vendor/github.com/docker/docker/volume/service/convert.go +++ /dev/null @@ -1,132 +0,0 @@ -package service - -import ( - "context" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/pkg/directory" - "github.com/docker/docker/volume" - "github.com/sirupsen/logrus" -) - -// convertOpts are used to pass options to `volumeToAPI` -type convertOpt interface { - isConvertOpt() -} - -type useCachedPath bool - -func (useCachedPath) isConvertOpt() {} - -type calcSize bool - -func (calcSize) isConvertOpt() {} - -type pathCacher interface { - CachedPath() string -} - -func (s *VolumesService) volumesToAPI(ctx context.Context, volumes []volume.Volume, opts ...convertOpt) []*types.Volume { - var ( - out = make([]*types.Volume, 0, len(volumes)) - getSize bool - cachedPath bool - ) - - for _, o := range opts { - switch t := o.(type) { - case calcSize: - getSize = bool(t) - case useCachedPath: - cachedPath = bool(t) - } - } - for _, v := range volumes { - select { - case <-ctx.Done(): - return nil - default: - } - apiV := volumeToAPIType(v) - - if cachedPath { - if vv, ok := v.(pathCacher); ok { - apiV.Mountpoint = vv.CachedPath() - } - } else { - apiV.Mountpoint = v.Path() - } - - if getSize { - p := v.Path() - if apiV.Mountpoint == "" { - apiV.Mountpoint = p - } - sz, err := directory.Size(ctx, p) - if err != nil { - logrus.WithError(err).WithField("volume", v.Name()).Warnf("Failed to determine size of volume") - sz = -1 - } - apiV.UsageData = &types.VolumeUsageData{Size: sz, RefCount: int64(s.vs.CountReferences(v))} - } - - out = append(out, &apiV) - } - return out -} - -func volumeToAPIType(v volume.Volume) types.Volume { - createdAt, _ := v.CreatedAt() - tv := types.Volume{ - Name: v.Name(), - Driver: v.DriverName(), - CreatedAt: createdAt.Format(time.RFC3339), - } - if v, ok := v.(volume.DetailedVolume); ok { - tv.Labels = v.Labels() - tv.Options = v.Options() - tv.Scope = v.Scope() - } - if cp, ok := v.(pathCacher); ok { - tv.Mountpoint = cp.CachedPath() - } - return tv -} - -func filtersToBy(filter filters.Args, acceptedFilters map[string]bool) (By, error) { - if err := filter.Validate(acceptedFilters); err != nil { - return nil, err - } - var bys []By - if drivers := filter.Get("driver"); len(drivers) > 0 { - bys = append(bys, ByDriver(drivers...)) - } - if filter.Contains("name") { - bys = append(bys, CustomFilter(func(v volume.Volume) bool { - return filter.Match("name", v.Name()) - })) - } - bys = append(bys, byLabelFilter(filter)) - - if filter.Contains("dangling") { - var dangling bool - if filter.ExactMatch("dangling", "true") || filter.ExactMatch("dangling", "1") { - dangling = true - } else if !filter.ExactMatch("dangling", "false") && !filter.ExactMatch("dangling", "0") { - return nil, invalidFilter{"dangling", filter.Get("dangling")} - } - bys = append(bys, ByReferenced(!dangling)) - } - - var by By - switch len(bys) { - case 0: - case 1: - by = bys[0] - default: - by = And(bys...) - } - return by, nil -} diff --git a/vendor/github.com/docker/docker/volume/service/db.go b/vendor/github.com/docker/docker/volume/service/db.go deleted file mode 100644 index 3b31f7bf1..000000000 --- a/vendor/github.com/docker/docker/volume/service/db.go +++ /dev/null @@ -1,95 +0,0 @@ -package service // import "github.com/docker/docker/volume/service" - -import ( - "encoding/json" - - "github.com/boltdb/bolt" - "github.com/docker/docker/errdefs" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -var volumeBucketName = []byte("volumes") - -type volumeMetadata struct { - Name string - Driver string - Labels map[string]string - Options map[string]string -} - -func (s *VolumeStore) setMeta(name string, meta volumeMetadata) error { - return s.db.Update(func(tx *bolt.Tx) error { - return setMeta(tx, name, meta) - }) -} - -func setMeta(tx *bolt.Tx, name string, meta volumeMetadata) error { - metaJSON, err := json.Marshal(meta) - if err != nil { - return err - } - b, err := tx.CreateBucketIfNotExists(volumeBucketName) - if err != nil { - return errors.Wrap(err, "error creating volume bucket") - } - return errors.Wrap(b.Put([]byte(name), metaJSON), "error setting volume metadata") -} - -func (s *VolumeStore) getMeta(name string) (volumeMetadata, error) { - var meta volumeMetadata - err := s.db.View(func(tx *bolt.Tx) error { - return getMeta(tx, name, &meta) - }) - return meta, err -} - -func getMeta(tx *bolt.Tx, name string, meta *volumeMetadata) error { - b := tx.Bucket(volumeBucketName) - if b == nil { - return errdefs.NotFound(errors.New("volume bucket does not exist")) - } - val := b.Get([]byte(name)) - if len(val) == 0 { - return nil - } - if err := json.Unmarshal(val, meta); err != nil { - return errors.Wrap(err, "error unmarshaling volume metadata") - } - return nil -} - -func (s *VolumeStore) removeMeta(name string) error { - return s.db.Update(func(tx *bolt.Tx) error { - return removeMeta(tx, name) - }) -} - -func removeMeta(tx *bolt.Tx, name string) error { - b := tx.Bucket(volumeBucketName) - return errors.Wrap(b.Delete([]byte(name)), "error removing volume metadata") -} - -// listMeta is used during restore to get the list of volume metadata -// from the on-disk database. -// Any errors that occur are only logged. -func listMeta(tx *bolt.Tx) []volumeMetadata { - var ls []volumeMetadata - b := tx.Bucket(volumeBucketName) - b.ForEach(func(k, v []byte) error { - if len(v) == 0 { - // don't try to unmarshal an empty value - return nil - } - - var m volumeMetadata - if err := json.Unmarshal(v, &m); err != nil { - // Just log the error - logrus.Errorf("Error while reading volume metadata for volume %q: %v", string(k), err) - return nil - } - ls = append(ls, m) - return nil - }) - return ls -} diff --git a/vendor/github.com/docker/docker/volume/service/default_driver.go b/vendor/github.com/docker/docker/volume/service/default_driver.go deleted file mode 100644 index 1c1d5c54b..000000000 --- a/vendor/github.com/docker/docker/volume/service/default_driver.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build linux windows - -package service // import "github.com/docker/docker/volume/service" -import ( - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/volume" - "github.com/docker/docker/volume/drivers" - "github.com/docker/docker/volume/local" - "github.com/pkg/errors" -) - -func setupDefaultDriver(store *drivers.Store, root string, rootIDs idtools.IDPair) error { - d, err := local.New(root, rootIDs) - if err != nil { - return errors.Wrap(err, "error setting up default driver") - } - if !store.Register(d, volume.DefaultDriverName) { - return errors.New("local volume driver could not be registered") - } - return nil -} diff --git a/vendor/github.com/docker/docker/volume/service/default_driver_stubs.go b/vendor/github.com/docker/docker/volume/service/default_driver_stubs.go deleted file mode 100644 index fdb275eb9..000000000 --- a/vendor/github.com/docker/docker/volume/service/default_driver_stubs.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !linux,!windows - -package service // import "github.com/docker/docker/volume/service" - -import ( - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/volume/drivers" -) - -func setupDefaultDriver(_ *drivers.Store, _ string, _ idtools.IDPair) error { return nil } diff --git a/vendor/github.com/docker/docker/volume/service/errors.go b/vendor/github.com/docker/docker/volume/service/errors.go deleted file mode 100644 index ce2d678da..000000000 --- a/vendor/github.com/docker/docker/volume/service/errors.go +++ /dev/null @@ -1,111 +0,0 @@ -package service // import "github.com/docker/docker/volume/service" - -import ( - "fmt" - "strings" -) - -const ( - // errVolumeInUse is a typed error returned when trying to remove a volume that is currently in use by a container - errVolumeInUse conflictError = "volume is in use" - // errNoSuchVolume is a typed error returned if the requested volume doesn't exist in the volume store - errNoSuchVolume notFoundError = "no such volume" - // errNameConflict is a typed error returned on create when a volume exists with the given name, but for a different driver - errNameConflict conflictError = "volume name must be unique" -) - -type conflictError string - -func (e conflictError) Error() string { - return string(e) -} -func (conflictError) Conflict() {} - -type notFoundError string - -func (e notFoundError) Error() string { - return string(e) -} - -func (notFoundError) NotFound() {} - -// OpErr is the error type returned by functions in the store package. It describes -// the operation, volume name, and error. -type OpErr struct { - // Err is the error that occurred during the operation. - Err error - // Op is the operation which caused the error, such as "create", or "list". - Op string - // Name is the name of the resource being requested for this op, typically the volume name or the driver name. - Name string - // Refs is the list of references associated with the resource. - Refs []string -} - -// Error satisfies the built-in error interface type. -func (e *OpErr) Error() string { - if e == nil { - return "" - } - s := e.Op - if e.Name != "" { - s = s + " " + e.Name - } - - s = s + ": " + e.Err.Error() - if len(e.Refs) > 0 { - s = s + " - " + "[" + strings.Join(e.Refs, ", ") + "]" - } - return s -} - -// Cause returns the error the caused this error -func (e *OpErr) Cause() error { - return e.Err -} - -// IsInUse returns a boolean indicating whether the error indicates that a -// volume is in use -func IsInUse(err error) bool { - return isErr(err, errVolumeInUse) -} - -// IsNotExist returns a boolean indicating whether the error indicates that the volume does not exist -func IsNotExist(err error) bool { - return isErr(err, errNoSuchVolume) -} - -// IsNameConflict returns a boolean indicating whether the error indicates that a -// volume name is already taken -func IsNameConflict(err error) bool { - return isErr(err, errNameConflict) -} - -type causal interface { - Cause() error -} - -func isErr(err error, expected error) bool { - switch pe := err.(type) { - case nil: - return false - case causal: - return isErr(pe.Cause(), expected) - } - return err == expected -} - -type invalidFilter struct { - filter string - value interface{} -} - -func (e invalidFilter) Error() string { - msg := "Invalid filter '" + e.filter - if e.value != nil { - msg += fmt.Sprintf("=%s", e.value) - } - return msg + "'" -} - -func (e invalidFilter) InvalidParameter() {} diff --git a/vendor/github.com/docker/docker/volume/service/opts/opts.go b/vendor/github.com/docker/docker/volume/service/opts/opts.go deleted file mode 100644 index 6c7e5f4ea..000000000 --- a/vendor/github.com/docker/docker/volume/service/opts/opts.go +++ /dev/null @@ -1,89 +0,0 @@ -package opts - -// CreateOption is used to pass options in when creating a volume -type CreateOption func(*CreateConfig) - -// CreateConfig is the set of config options that can be set when creating -// a volume -type CreateConfig struct { - Options map[string]string - Labels map[string]string - Reference string -} - -// WithCreateLabels creates a CreateOption which sets the labels to the -// passed in value -func WithCreateLabels(labels map[string]string) CreateOption { - return func(cfg *CreateConfig) { - cfg.Labels = labels - } -} - -// WithCreateOptions creates a CreateOption which sets the options passed -// to the volume driver when creating a volume to the options passed in. -func WithCreateOptions(opts map[string]string) CreateOption { - return func(cfg *CreateConfig) { - cfg.Options = opts - } -} - -// WithCreateReference creats a CreateOption which sets a reference to use -// when creating a volume. This ensures that the volume is created with a reference -// already attached to it to prevent race conditions with Create and volume cleanup. -func WithCreateReference(ref string) CreateOption { - return func(cfg *CreateConfig) { - cfg.Reference = ref - } -} - -// GetConfig is used with `GetOption` to set options for the volumes service's -// `Get` implementation. -type GetConfig struct { - Driver string - Reference string - ResolveStatus bool -} - -// GetOption is passed to the service `Get` add extra details on the get request -type GetOption func(*GetConfig) - -// WithGetDriver provides the driver to get the volume from -// If no driver is provided to `Get`, first the available metadata is checked -// to see which driver it belongs to, if that is not available all drivers are -// probed to find the volume. -func WithGetDriver(name string) GetOption { - return func(o *GetConfig) { - o.Driver = name - } -} - -// WithGetReference indicates to `Get` to increment the reference count for the -// retreived volume with the provided reference ID. -func WithGetReference(ref string) GetOption { - return func(o *GetConfig) { - o.Reference = ref - } -} - -// WithGetResolveStatus indicates to `Get` to also fetch the volume status. -// This can cause significant overhead in the volume lookup. -func WithGetResolveStatus(cfg *GetConfig) { - cfg.ResolveStatus = true -} - -// RemoveConfig is used by `RemoveOption` to store config options for remove -type RemoveConfig struct { - PurgeOnError bool -} - -// RemoveOption is used to pass options to the volumes service `Remove` implementation -type RemoveOption func(*RemoveConfig) - -// WithPurgeOnError is an option passed to `Remove` which will purge all cached -// data about a volume even if there was an error while attempting to remove the -// volume. -func WithPurgeOnError(b bool) RemoveOption { - return func(o *RemoveConfig) { - o.PurgeOnError = b - } -} diff --git a/vendor/github.com/docker/docker/volume/service/restore.go b/vendor/github.com/docker/docker/volume/service/restore.go deleted file mode 100644 index 55c66c4f4..000000000 --- a/vendor/github.com/docker/docker/volume/service/restore.go +++ /dev/null @@ -1,85 +0,0 @@ -package service // import "github.com/docker/docker/volume/service" - -import ( - "context" - "sync" - - "github.com/boltdb/bolt" - "github.com/docker/docker/volume" - "github.com/sirupsen/logrus" -) - -// restore is called when a new volume store is created. -// It's primary purpose is to ensure that all drivers' refcounts are set based -// on known volumes after a restart. -// This only attempts to track volumes that are actually stored in the on-disk db. -// It does not probe the available drivers to find anything that may have been added -// out of band. -func (s *VolumeStore) restore() { - var ls []volumeMetadata - s.db.View(func(tx *bolt.Tx) error { - ls = listMeta(tx) - return nil - }) - ctx := context.Background() - - chRemove := make(chan *volumeMetadata, len(ls)) - var wg sync.WaitGroup - for _, meta := range ls { - wg.Add(1) - // this is potentially a very slow operation, so do it in a goroutine - go func(meta volumeMetadata) { - defer wg.Done() - - var v volume.Volume - var err error - if meta.Driver != "" { - v, err = lookupVolume(ctx, s.drivers, meta.Driver, meta.Name) - if err != nil && err != errNoSuchVolume { - logrus.WithError(err).WithField("driver", meta.Driver).WithField("volume", meta.Name).Warn("Error restoring volume") - return - } - if v == nil { - // doesn't exist in the driver, remove it from the db - chRemove <- &meta - return - } - } else { - v, err = s.getVolume(ctx, meta.Name, meta.Driver) - if err != nil { - if err == errNoSuchVolume { - chRemove <- &meta - } - return - } - - meta.Driver = v.DriverName() - if err := s.setMeta(v.Name(), meta); err != nil { - logrus.WithError(err).WithField("driver", meta.Driver).WithField("volume", v.Name()).Warn("Error updating volume metadata on restore") - } - } - - // increment driver refcount - s.drivers.CreateDriver(meta.Driver) - - // cache the volume - s.globalLock.Lock() - s.options[v.Name()] = meta.Options - s.labels[v.Name()] = meta.Labels - s.names[v.Name()] = v - s.refs[v.Name()] = make(map[string]struct{}) - s.globalLock.Unlock() - }(meta) - } - - wg.Wait() - close(chRemove) - s.db.Update(func(tx *bolt.Tx) error { - for meta := range chRemove { - if err := removeMeta(tx, meta.Name); err != nil { - logrus.WithField("volume", meta.Name).Warnf("Error removing stale entry from volume db: %v", err) - } - } - return nil - }) -} diff --git a/vendor/github.com/docker/docker/volume/service/service.go b/vendor/github.com/docker/docker/volume/service/service.go deleted file mode 100644 index a62a32de5..000000000 --- a/vendor/github.com/docker/docker/volume/service/service.go +++ /dev/null @@ -1,243 +0,0 @@ -package service // import "github.com/docker/docker/volume/service" - -import ( - "context" - "sync/atomic" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/directory" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/volume" - "github.com/docker/docker/volume/drivers" - "github.com/docker/docker/volume/service/opts" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type ds interface { - GetDriverList() []string -} - -type volumeEventLogger interface { - LogVolumeEvent(volumeID, action string, attributes map[string]string) -} - -// VolumesService manages access to volumes -type VolumesService struct { - vs *VolumeStore - ds ds - pruneRunning int32 - eventLogger volumeEventLogger -} - -// NewVolumeService creates a new volume service -func NewVolumeService(root string, pg plugingetter.PluginGetter, rootIDs idtools.IDPair, logger volumeEventLogger) (*VolumesService, error) { - ds := drivers.NewStore(pg) - if err := setupDefaultDriver(ds, root, rootIDs); err != nil { - return nil, err - } - - vs, err := NewStore(root, ds) - if err != nil { - return nil, err - } - return &VolumesService{vs: vs, ds: ds, eventLogger: logger}, nil -} - -// GetDriverList gets the list of registered volume drivers -func (s *VolumesService) GetDriverList() []string { - return s.ds.GetDriverList() -} - -// Create creates a volume -func (s *VolumesService) Create(ctx context.Context, name, driverName string, opts ...opts.CreateOption) (*types.Volume, error) { - if name == "" { - name = stringid.GenerateNonCryptoID() - } - v, err := s.vs.Create(ctx, name, driverName, opts...) - if err != nil { - return nil, err - } - - s.eventLogger.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) - apiV := volumeToAPIType(v) - return &apiV, nil -} - -// Get gets a volume -func (s *VolumesService) Get(ctx context.Context, name string, getOpts ...opts.GetOption) (*types.Volume, error) { - v, err := s.vs.Get(ctx, name, getOpts...) - if err != nil { - return nil, err - } - vol := volumeToAPIType(v) - - var cfg opts.GetConfig - for _, o := range getOpts { - o(&cfg) - } - - if cfg.ResolveStatus { - vol.Status = v.Status() - } - return &vol, nil -} - -// Mount mounts the volume -func (s *VolumesService) Mount(ctx context.Context, vol *types.Volume, ref string) (string, error) { - v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) - if err != nil { - if IsNotExist(err) { - err = errdefs.NotFound(err) - } - return "", err - } - return v.Mount(ref) -} - -// Unmount unmounts the volume. -// Note that depending on the implementation, the volume may still be mounted due to other resources using it. -func (s *VolumesService) Unmount(ctx context.Context, vol *types.Volume, ref string) error { - v, err := s.vs.Get(ctx, vol.Name, opts.WithGetDriver(vol.Driver)) - if err != nil { - if IsNotExist(err) { - err = errdefs.NotFound(err) - } - return err - } - return v.Unmount(ref) -} - -// Release releases a volume reference -func (s *VolumesService) Release(ctx context.Context, name string, ref string) error { - return s.vs.Release(ctx, name, ref) -} - -// Remove removes a volume -func (s *VolumesService) Remove(ctx context.Context, name string, rmOpts ...opts.RemoveOption) error { - var cfg opts.RemoveConfig - for _, o := range rmOpts { - o(&cfg) - } - - v, err := s.vs.Get(ctx, name) - if err != nil { - if IsNotExist(err) && cfg.PurgeOnError { - return nil - } - return err - } - - err = s.vs.Remove(ctx, v, rmOpts...) - if IsNotExist(err) { - err = nil - } else if IsInUse(err) { - err = errdefs.Conflict(err) - } else if IsNotExist(err) && cfg.PurgeOnError { - err = nil - } - - if err == nil { - s.eventLogger.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) - } - return err -} - -var acceptedPruneFilters = map[string]bool{ - "label": true, - "label!": true, -} - -var acceptedListFilters = map[string]bool{ - "dangling": true, - "name": true, - "driver": true, - "label": true, -} - -// LocalVolumesSize gets all local volumes and fetches their size on disk -// Note that this intentionally skips volumes which have mount options. Typically -// volumes with mount options are not really local even if they are using the -// local driver. -func (s *VolumesService) LocalVolumesSize(ctx context.Context) ([]*types.Volume, error) { - ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), CustomFilter(func(v volume.Volume) bool { - dv, ok := v.(volume.DetailedVolume) - return ok && len(dv.Options()) == 0 - }))) - if err != nil { - return nil, err - } - return s.volumesToAPI(ctx, ls, calcSize(true)), nil -} - -// Prune removes (local) volumes which match the past in filter arguments. -// Note that this intentionally skips volumes with mount options as there would -// be no space reclaimed in this case. -func (s *VolumesService) Prune(ctx context.Context, filter filters.Args) (*types.VolumesPruneReport, error) { - if !atomic.CompareAndSwapInt32(&s.pruneRunning, 0, 1) { - return nil, errdefs.Conflict(errors.New("a prune operation is already running")) - } - defer atomic.StoreInt32(&s.pruneRunning, 0) - - by, err := filtersToBy(filter, acceptedPruneFilters) - if err != nil { - return nil, err - } - ls, _, err := s.vs.Find(ctx, And(ByDriver(volume.DefaultDriverName), ByReferenced(false), by, CustomFilter(func(v volume.Volume) bool { - dv, ok := v.(volume.DetailedVolume) - return ok && len(dv.Options()) == 0 - }))) - if err != nil { - return nil, err - } - - rep := &types.VolumesPruneReport{VolumesDeleted: make([]string, 0, len(ls))} - for _, v := range ls { - select { - case <-ctx.Done(): - err := ctx.Err() - if err == context.Canceled { - err = nil - } - return rep, err - default: - } - - vSize, err := directory.Size(ctx, v.Path()) - if err != nil { - logrus.WithField("volume", v.Name()).WithError(err).Warn("could not determine size of volume") - } - if err := s.vs.Remove(ctx, v); err != nil { - logrus.WithError(err).WithField("volume", v.Name()).Warnf("Could not determine size of volume") - continue - } - rep.SpaceReclaimed += uint64(vSize) - rep.VolumesDeleted = append(rep.VolumesDeleted, v.Name()) - } - return rep, nil -} - -// List gets the list of volumes which match the past in filters -// If filters is nil or empty all volumes are returned. -func (s *VolumesService) List(ctx context.Context, filter filters.Args) (volumesOut []*types.Volume, warnings []string, err error) { - by, err := filtersToBy(filter, acceptedListFilters) - if err != nil { - return nil, nil, err - } - - volumes, warnings, err := s.vs.Find(ctx, by) - if err != nil { - return nil, nil, err - } - - return s.volumesToAPI(ctx, volumes, useCachedPath(true)), warnings, nil -} - -// Shutdown shuts down the image service and dependencies -func (s *VolumesService) Shutdown() error { - return s.vs.Shutdown() -} diff --git a/vendor/github.com/docker/docker/volume/service/store.go b/vendor/github.com/docker/docker/volume/service/store.go deleted file mode 100644 index e7e9d8a32..000000000 --- a/vendor/github.com/docker/docker/volume/service/store.go +++ /dev/null @@ -1,858 +0,0 @@ -package service // import "github.com/docker/docker/volume/service" - -import ( - "context" - "fmt" - "net" - "os" - "path/filepath" - "runtime" - "sync" - "time" - - "github.com/pkg/errors" - - "github.com/boltdb/bolt" - "github.com/docker/docker/errdefs" - "github.com/docker/docker/pkg/locker" - "github.com/docker/docker/volume" - "github.com/docker/docker/volume/drivers" - volumemounts "github.com/docker/docker/volume/mounts" - "github.com/docker/docker/volume/service/opts" - "github.com/sirupsen/logrus" -) - -const ( - volumeDataDir = "volumes" -) - -type volumeWrapper struct { - volume.Volume - labels map[string]string - scope string - options map[string]string -} - -func (v volumeWrapper) Options() map[string]string { - if v.options == nil { - return nil - } - options := make(map[string]string, len(v.options)) - for key, value := range v.options { - options[key] = value - } - return options -} - -func (v volumeWrapper) Labels() map[string]string { - if v.labels == nil { - return nil - } - - labels := make(map[string]string, len(v.labels)) - for key, value := range v.labels { - labels[key] = value - } - return labels -} - -func (v volumeWrapper) Scope() string { - return v.scope -} - -func (v volumeWrapper) CachedPath() string { - if vv, ok := v.Volume.(interface { - CachedPath() string - }); ok { - return vv.CachedPath() - } - return v.Volume.Path() -} - -// NewStore creates a new volume store at the given path -func NewStore(rootPath string, drivers *drivers.Store) (*VolumeStore, error) { - vs := &VolumeStore{ - locks: &locker.Locker{}, - names: make(map[string]volume.Volume), - refs: make(map[string]map[string]struct{}), - labels: make(map[string]map[string]string), - options: make(map[string]map[string]string), - drivers: drivers, - } - - if rootPath != "" { - // initialize metadata store - volPath := filepath.Join(rootPath, volumeDataDir) - if err := os.MkdirAll(volPath, 0750); err != nil { - return nil, err - } - - var err error - vs.db, err = bolt.Open(filepath.Join(volPath, "metadata.db"), 0600, &bolt.Options{Timeout: 1 * time.Second}) - if err != nil { - return nil, errors.Wrap(err, "error while opening volume store metadata database") - } - - // initialize volumes bucket - if err := vs.db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucketIfNotExists(volumeBucketName); err != nil { - return errors.Wrap(err, "error while setting up volume store metadata database") - } - return nil - }); err != nil { - return nil, err - } - } - - vs.restore() - - return vs, nil -} - -func (s *VolumeStore) getNamed(name string) (volume.Volume, bool) { - s.globalLock.RLock() - v, exists := s.names[name] - s.globalLock.RUnlock() - return v, exists -} - -func (s *VolumeStore) setNamed(v volume.Volume, ref string) { - name := v.Name() - - s.globalLock.Lock() - s.names[name] = v - if len(ref) > 0 { - if s.refs[name] == nil { - s.refs[name] = make(map[string]struct{}) - } - s.refs[name][ref] = struct{}{} - } - s.globalLock.Unlock() -} - -// hasRef returns true if the given name has at least one ref. -// Callers of this function are expected to hold the name lock. -func (s *VolumeStore) hasRef(name string) bool { - s.globalLock.RLock() - l := len(s.refs[name]) - s.globalLock.RUnlock() - return l > 0 -} - -// getRefs gets the list of refs for a given name -// Callers of this function are expected to hold the name lock. -func (s *VolumeStore) getRefs(name string) []string { - s.globalLock.RLock() - defer s.globalLock.RUnlock() - - refs := make([]string, 0, len(s.refs[name])) - for r := range s.refs[name] { - refs = append(refs, r) - } - - return refs -} - -// purge allows the cleanup of internal data on docker in case -// the internal data is out of sync with volumes driver plugins. -func (s *VolumeStore) purge(ctx context.Context, name string) error { - s.globalLock.Lock() - defer s.globalLock.Unlock() - - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - v, exists := s.names[name] - if exists { - driverName := v.DriverName() - if _, err := s.drivers.ReleaseDriver(driverName); err != nil { - logrus.WithError(err).WithField("driver", driverName).Error("Error releasing reference to volume driver") - } - } - if err := s.removeMeta(name); err != nil { - logrus.Errorf("Error removing volume metadata for volume %q: %v", name, err) - } - delete(s.names, name) - delete(s.refs, name) - delete(s.labels, name) - delete(s.options, name) - return nil -} - -// VolumeStore is a struct that stores the list of volumes available and keeps track of their usage counts -type VolumeStore struct { - // locks ensures that only one action is being performed on a particular volume at a time without locking the entire store - // since actions on volumes can be quite slow, this ensures the store is free to handle requests for other volumes. - locks *locker.Locker - drivers *drivers.Store - // globalLock is used to protect access to mutable structures used by the store object - globalLock sync.RWMutex - // names stores the volume name -> volume relationship. - // This is used for making lookups faster so we don't have to probe all drivers - names map[string]volume.Volume - // refs stores the volume name and the list of things referencing it - refs map[string]map[string]struct{} - // labels stores volume labels for each volume - labels map[string]map[string]string - // options stores volume options for each volume - options map[string]map[string]string - db *bolt.DB -} - -func filterByDriver(names []string) filterFunc { - return func(v volume.Volume) bool { - for _, name := range names { - if name == v.DriverName() { - return true - } - } - return false - } -} - -func (s *VolumeStore) byReferenced(referenced bool) filterFunc { - return func(v volume.Volume) bool { - return s.hasRef(v.Name()) == referenced - } -} - -func (s *VolumeStore) filter(ctx context.Context, vols *[]volume.Volume, by By) (warnings []string, err error) { - // note that this specifically does not support the `FromList` By type. - switch f := by.(type) { - case nil: - if *vols == nil { - var ls []volume.Volume - ls, warnings, err = s.list(ctx) - if err != nil { - return warnings, err - } - *vols = ls - } - case byDriver: - if *vols != nil { - filter(vols, filterByDriver([]string(f))) - return nil, nil - } - var ls []volume.Volume - ls, warnings, err = s.list(ctx, []string(f)...) - if err != nil { - return nil, err - } - *vols = ls - case ByReferenced: - // TODO(@cpuguy83): It would be nice to optimize this by looking at the list - // of referenced volumes, however the locking strategy makes this difficult - // without either providing inconsistent data or deadlocks. - if *vols == nil { - var ls []volume.Volume - ls, warnings, err = s.list(ctx) - if err != nil { - return nil, err - } - *vols = ls - } - filter(vols, s.byReferenced(bool(f))) - case andCombinator: - for _, by := range f { - w, err := s.filter(ctx, vols, by) - if err != nil { - return warnings, err - } - warnings = append(warnings, w...) - } - case orCombinator: - for _, by := range f { - switch by.(type) { - case byDriver: - var ls []volume.Volume - w, err := s.filter(ctx, &ls, by) - if err != nil { - return warnings, err - } - warnings = append(warnings, w...) - default: - ls, w, err := s.list(ctx) - if err != nil { - return warnings, err - } - warnings = append(warnings, w...) - w, err = s.filter(ctx, &ls, by) - if err != nil { - return warnings, err - } - warnings = append(warnings, w...) - *vols = append(*vols, ls...) - } - } - unique(vols) - case CustomFilter: - if *vols == nil { - var ls []volume.Volume - ls, warnings, err = s.list(ctx) - if err != nil { - return nil, err - } - *vols = ls - } - filter(vols, filterFunc(f)) - default: - return nil, errdefs.InvalidParameter(errors.Errorf("unsupported filter: %T", f)) - } - return warnings, nil -} - -func unique(ls *[]volume.Volume) { - names := make(map[string]bool, len(*ls)) - filter(ls, func(v volume.Volume) bool { - if names[v.Name()] { - return false - } - names[v.Name()] = true - return true - }) -} - -// Find lists volumes filtered by the past in filter. -// If a driver returns a volume that has name which conflicts with another volume from a different driver, -// the first volume is chosen and the conflicting volume is dropped. -func (s *VolumeStore) Find(ctx context.Context, by By) (vols []volume.Volume, warnings []string, err error) { - logrus.WithField("ByType", fmt.Sprintf("%T", by)).WithField("ByValue", fmt.Sprintf("%+v", by)).Debug("VolumeStore.Find") - switch f := by.(type) { - case nil, orCombinator, andCombinator, byDriver, ByReferenced, CustomFilter: - warnings, err = s.filter(ctx, &vols, by) - case fromList: - warnings, err = s.filter(ctx, f.ls, f.by) - default: - // Really shouldn't be possible, but makes sure that any new By's are added to this check. - err = errdefs.InvalidParameter(errors.Errorf("unsupported filter type: %T", f)) - } - if err != nil { - return nil, nil, &OpErr{Err: err, Op: "list"} - } - - var out []volume.Volume - - for _, v := range vols { - name := normalizeVolumeName(v.Name()) - - s.locks.Lock(name) - storedV, exists := s.getNamed(name) - // Note: it's not safe to populate the cache here because the volume may have been - // deleted before we acquire a lock on its name - if exists && storedV.DriverName() != v.DriverName() { - logrus.Warnf("Volume name %s already exists for driver %s, not including volume returned by %s", v.Name(), storedV.DriverName(), v.DriverName()) - s.locks.Unlock(v.Name()) - continue - } - - out = append(out, v) - s.locks.Unlock(v.Name()) - } - return out, warnings, nil -} - -type filterFunc func(volume.Volume) bool - -func filter(vols *[]volume.Volume, fn filterFunc) { - var evict []int - for i, v := range *vols { - if !fn(v) { - evict = append(evict, i) - } - } - - for n, i := range evict { - copy((*vols)[i-n:], (*vols)[i-n+1:]) - (*vols)[len(*vols)-1] = nil - *vols = (*vols)[:len(*vols)-1] - } -} - -// list goes through each volume driver and asks for its list of volumes. -// TODO(@cpuguy83): plumb context through -func (s *VolumeStore) list(ctx context.Context, driverNames ...string) ([]volume.Volume, []string, error) { - var ( - ls = []volume.Volume{} // do not return a nil value as this affects filtering - warnings []string - ) - - var dls []volume.Driver - - all, err := s.drivers.GetAllDrivers() - if err != nil { - return nil, nil, err - } - if len(driverNames) == 0 { - dls = all - } else { - idx := make(map[string]bool, len(driverNames)) - for _, name := range driverNames { - idx[name] = true - } - for _, d := range all { - if idx[d.Name()] { - dls = append(dls, d) - } - } - } - - type vols struct { - vols []volume.Volume - err error - driverName string - } - chVols := make(chan vols, len(dls)) - - for _, vd := range dls { - go func(d volume.Driver) { - vs, err := d.List() - if err != nil { - chVols <- vols{driverName: d.Name(), err: &OpErr{Err: err, Name: d.Name(), Op: "list"}} - return - } - for i, v := range vs { - s.globalLock.RLock() - vs[i] = volumeWrapper{v, s.labels[v.Name()], d.Scope(), s.options[v.Name()]} - s.globalLock.RUnlock() - } - - chVols <- vols{vols: vs} - }(vd) - } - - badDrivers := make(map[string]struct{}) - for i := 0; i < len(dls); i++ { - vs := <-chVols - - if vs.err != nil { - warnings = append(warnings, vs.err.Error()) - badDrivers[vs.driverName] = struct{}{} - } - ls = append(ls, vs.vols...) - } - - if len(badDrivers) > 0 { - s.globalLock.RLock() - for _, v := range s.names { - if _, exists := badDrivers[v.DriverName()]; exists { - ls = append(ls, v) - } - } - s.globalLock.RUnlock() - } - return ls, warnings, nil -} - -// Create creates a volume with the given name and driver -// If the volume needs to be created with a reference to prevent race conditions -// with volume cleanup, make sure to use the `CreateWithReference` option. -func (s *VolumeStore) Create(ctx context.Context, name, driverName string, createOpts ...opts.CreateOption) (volume.Volume, error) { - var cfg opts.CreateConfig - for _, o := range createOpts { - o(&cfg) - } - - name = normalizeVolumeName(name) - s.locks.Lock(name) - defer s.locks.Unlock(name) - - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - - v, err := s.create(ctx, name, driverName, cfg.Options, cfg.Labels) - if err != nil { - if _, ok := err.(*OpErr); ok { - return nil, err - } - return nil, &OpErr{Err: err, Name: name, Op: "create"} - } - - s.setNamed(v, cfg.Reference) - return v, nil -} - -// checkConflict checks the local cache for name collisions with the passed in name, -// for existing volumes with the same name but in a different driver. -// This is used by `Create` as a best effort to prevent name collisions for volumes. -// If a matching volume is found that is not a conflict that is returned so the caller -// does not need to perform an additional lookup. -// When no matching volume is found, both returns will be nil -// -// Note: This does not probe all the drivers for name collisions because v1 plugins -// are very slow, particularly if the plugin is down, and cause other issues, -// particularly around locking the store. -// TODO(cpuguy83): With v2 plugins this shouldn't be a problem. Could also potentially -// use a connect timeout for this kind of check to ensure we aren't blocking for a -// long time. -func (s *VolumeStore) checkConflict(ctx context.Context, name, driverName string) (volume.Volume, error) { - // check the local cache - v, _ := s.getNamed(name) - if v == nil { - return nil, nil - } - - vDriverName := v.DriverName() - var conflict bool - if driverName != "" { - // Retrieve canonical driver name to avoid inconsistencies (for example - // "plugin" vs. "plugin:latest") - vd, err := s.drivers.GetDriver(driverName) - if err != nil { - return nil, err - } - - if vDriverName != vd.Name() { - conflict = true - } - } - - // let's check if the found volume ref - // is stale by checking with the driver if it still exists - exists, err := volumeExists(ctx, s.drivers, v) - if err != nil { - return nil, errors.Wrapf(errNameConflict, "found reference to volume '%s' in driver '%s', but got an error while checking the driver: %v", name, vDriverName, err) - } - - if exists { - if conflict { - return nil, errors.Wrapf(errNameConflict, "driver '%s' already has volume '%s'", vDriverName, name) - } - return v, nil - } - - if s.hasRef(v.Name()) { - // Containers are referencing this volume but it doesn't seem to exist anywhere. - // Return a conflict error here, the user can fix this with `docker volume rm -f` - return nil, errors.Wrapf(errNameConflict, "found references to volume '%s' in driver '%s' but the volume was not found in the driver -- you may need to remove containers referencing this volume or force remove the volume to re-create it", name, vDriverName) - } - - // doesn't exist, so purge it from the cache - s.purge(ctx, name) - return nil, nil -} - -// volumeExists returns if the volume is still present in the driver. -// An error is returned if there was an issue communicating with the driver. -func volumeExists(ctx context.Context, store *drivers.Store, v volume.Volume) (bool, error) { - exists, err := lookupVolume(ctx, store, v.DriverName(), v.Name()) - if err != nil { - return false, err - } - return exists != nil, nil -} - -// create asks the given driver to create a volume with the name/opts. -// If a volume with the name is already known, it will ask the stored driver for the volume. -// If the passed in driver name does not match the driver name which is stored -// for the given volume name, an error is returned after checking if the reference is stale. -// If the reference is stale, it will be purged and this create can continue. -// It is expected that callers of this function hold any necessary locks. -func (s *VolumeStore) create(ctx context.Context, name, driverName string, opts, labels map[string]string) (volume.Volume, error) { - // Validate the name in a platform-specific manner - - // volume name validation is specific to the host os and not on container image - // windows/lcow should have an equivalent volumename validation logic so we create a parser for current host OS - parser := volumemounts.NewParser(runtime.GOOS) - err := parser.ValidateVolumeName(name) - if err != nil { - return nil, err - } - - v, err := s.checkConflict(ctx, name, driverName) - if err != nil { - return nil, err - } - - if v != nil { - // there is an existing volume, if we already have this stored locally, return it. - // TODO: there could be some inconsistent details such as labels here - if vv, _ := s.getNamed(v.Name()); vv != nil { - return vv, nil - } - } - - // Since there isn't a specified driver name, let's see if any of the existing drivers have this volume name - if driverName == "" { - v, _ = s.getVolume(ctx, name, "") - if v != nil { - return v, nil - } - } - - if driverName == "" { - driverName = volume.DefaultDriverName - } - vd, err := s.drivers.CreateDriver(driverName) - if err != nil { - return nil, &OpErr{Op: "create", Name: name, Err: err} - } - - logrus.Debugf("Registering new volume reference: driver %q, name %q", vd.Name(), name) - if v, _ = vd.Get(name); v == nil { - v, err = vd.Create(name, opts) - if err != nil { - if _, err := s.drivers.ReleaseDriver(driverName); err != nil { - logrus.WithError(err).WithField("driver", driverName).Error("Error releasing reference to volume driver") - } - return nil, err - } - } - - s.globalLock.Lock() - s.labels[name] = labels - s.options[name] = opts - s.refs[name] = make(map[string]struct{}) - s.globalLock.Unlock() - - metadata := volumeMetadata{ - Name: name, - Driver: vd.Name(), - Labels: labels, - Options: opts, - } - - if err := s.setMeta(name, metadata); err != nil { - return nil, err - } - return volumeWrapper{v, labels, vd.Scope(), opts}, nil -} - -// Get looks if a volume with the given name exists and returns it if so -func (s *VolumeStore) Get(ctx context.Context, name string, getOptions ...opts.GetOption) (volume.Volume, error) { - var cfg opts.GetConfig - for _, o := range getOptions { - o(&cfg) - } - name = normalizeVolumeName(name) - s.locks.Lock(name) - defer s.locks.Unlock(name) - - v, err := s.getVolume(ctx, name, cfg.Driver) - if err != nil { - return nil, &OpErr{Err: err, Name: name, Op: "get"} - } - if cfg.Driver != "" && v.DriverName() != cfg.Driver { - return nil, &OpErr{Name: name, Op: "get", Err: errdefs.Conflict(errors.New("found volume driver does not match passed in driver"))} - } - s.setNamed(v, cfg.Reference) - return v, nil -} - -// getVolume requests the volume, if the driver info is stored it just accesses that driver, -// if the driver is unknown it probes all drivers until it finds the first volume with that name. -// it is expected that callers of this function hold any necessary locks -func (s *VolumeStore) getVolume(ctx context.Context, name, driverName string) (volume.Volume, error) { - var meta volumeMetadata - meta, err := s.getMeta(name) - if err != nil { - return nil, err - } - - if driverName != "" { - if meta.Driver == "" { - meta.Driver = driverName - } - if driverName != meta.Driver { - return nil, errdefs.Conflict(errors.New("provided volume driver does not match stored driver")) - } - } - - if driverName == "" { - driverName = meta.Driver - } - if driverName == "" { - s.globalLock.RLock() - select { - case <-ctx.Done(): - s.globalLock.RUnlock() - return nil, ctx.Err() - default: - } - v, exists := s.names[name] - s.globalLock.RUnlock() - if exists { - meta.Driver = v.DriverName() - if err := s.setMeta(name, meta); err != nil { - return nil, err - } - } - } - - if meta.Driver != "" { - vol, err := lookupVolume(ctx, s.drivers, meta.Driver, name) - if err != nil { - return nil, err - } - if vol == nil { - s.purge(ctx, name) - return nil, errNoSuchVolume - } - - var scope string - vd, err := s.drivers.GetDriver(meta.Driver) - if err == nil { - scope = vd.Scope() - } - return volumeWrapper{vol, meta.Labels, scope, meta.Options}, nil - } - - logrus.Debugf("Probing all drivers for volume with name: %s", name) - drivers, err := s.drivers.GetAllDrivers() - if err != nil { - return nil, err - } - - for _, d := range drivers { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - } - v, err := d.Get(name) - if err != nil || v == nil { - continue - } - meta.Driver = v.DriverName() - if err := s.setMeta(name, meta); err != nil { - return nil, err - } - return volumeWrapper{v, meta.Labels, d.Scope(), meta.Options}, nil - } - return nil, errNoSuchVolume -} - -// lookupVolume gets the specified volume from the specified driver. -// This will only return errors related to communications with the driver. -// If the driver returns an error that is not communication related the -// error is logged but not returned. -// If the volume is not found it will return `nil, nil`` -// TODO(@cpuguy83): plumb through the context to lower level components -func lookupVolume(ctx context.Context, store *drivers.Store, driverName, volumeName string) (volume.Volume, error) { - if driverName == "" { - driverName = volume.DefaultDriverName - } - vd, err := store.GetDriver(driverName) - if err != nil { - return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", volumeName, driverName) - } - v, err := vd.Get(volumeName) - if err != nil { - err = errors.Cause(err) - if _, ok := err.(net.Error); ok { - if v != nil { - volumeName = v.Name() - driverName = v.DriverName() - } - return nil, errors.Wrapf(err, "error while checking if volume %q exists in driver %q", volumeName, driverName) - } - - // At this point, the error could be anything from the driver, such as "no such volume" - // Let's not check an error here, and instead check if the driver returned a volume - logrus.WithError(err).WithField("driver", driverName).WithField("volume", volumeName).Debug("Error while looking up volume") - } - return v, nil -} - -// Remove removes the requested volume. A volume is not removed if it has any refs -func (s *VolumeStore) Remove(ctx context.Context, v volume.Volume, rmOpts ...opts.RemoveOption) error { - var cfg opts.RemoveConfig - for _, o := range rmOpts { - o(&cfg) - } - - name := v.Name() - s.locks.Lock(name) - defer s.locks.Unlock(name) - - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - if s.hasRef(name) { - return &OpErr{Err: errVolumeInUse, Name: name, Op: "remove", Refs: s.getRefs(name)} - } - - v, err := s.getVolume(ctx, name, v.DriverName()) - if err != nil { - return err - } - - vd, err := s.drivers.GetDriver(v.DriverName()) - if err != nil { - return &OpErr{Err: err, Name: v.DriverName(), Op: "remove"} - } - - logrus.Debugf("Removing volume reference: driver %s, name %s", v.DriverName(), name) - vol := unwrapVolume(v) - - err = vd.Remove(vol) - if err != nil { - err = &OpErr{Err: err, Name: name, Op: "remove"} - } - - if err == nil || cfg.PurgeOnError { - if e := s.purge(ctx, name); e != nil && err == nil { - err = e - } - } - return err -} - -// Release releases the specified reference to the volume -func (s *VolumeStore) Release(ctx context.Context, name string, ref string) error { - s.locks.Lock(name) - defer s.locks.Unlock(name) - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - s.globalLock.Lock() - defer s.globalLock.Unlock() - - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - if s.refs[name] != nil { - delete(s.refs[name], ref) - } - return nil -} - -// CountReferences gives a count of all references for a given volume. -func (s *VolumeStore) CountReferences(v volume.Volume) int { - name := normalizeVolumeName(v.Name()) - - s.locks.Lock(name) - defer s.locks.Unlock(name) - s.globalLock.Lock() - defer s.globalLock.Unlock() - - return len(s.refs[name]) -} - -func unwrapVolume(v volume.Volume) volume.Volume { - if vol, ok := v.(volumeWrapper); ok { - return vol.Volume - } - - return v -} - -// Shutdown releases all resources used by the volume store -// It does not make any changes to volumes, drivers, etc. -func (s *VolumeStore) Shutdown() error { - return s.db.Close() -} diff --git a/vendor/github.com/docker/docker/volume/service/store_unix.go b/vendor/github.com/docker/docker/volume/service/store_unix.go deleted file mode 100644 index 4ccc4b999..000000000 --- a/vendor/github.com/docker/docker/volume/service/store_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build linux freebsd darwin - -package service // import "github.com/docker/docker/volume/service" - -// normalizeVolumeName is a platform specific function to normalize the name -// of a volume. This is a no-op on Unix-like platforms -func normalizeVolumeName(name string) string { - return name -} diff --git a/vendor/github.com/docker/docker/volume/service/store_windows.go b/vendor/github.com/docker/docker/volume/service/store_windows.go deleted file mode 100644 index bd46a6893..000000000 --- a/vendor/github.com/docker/docker/volume/service/store_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -package service // import "github.com/docker/docker/volume/service" - -import "strings" - -// normalizeVolumeName is a platform specific function to normalize the name -// of a volume. On Windows, as NTFS is case insensitive, under -// c:\ProgramData\Docker\Volumes\, the folders John and john would be synonymous. -// Hence we can't allow the volume "John" and "john" to be created as separate -// volumes. -func normalizeVolumeName(name string) string { - return strings.ToLower(name) -} diff --git a/vendor/github.com/docker/docker/volume/testutils/testutils.go b/vendor/github.com/docker/docker/volume/testutils/testutils.go deleted file mode 100644 index 5bb38e3f3..000000000 --- a/vendor/github.com/docker/docker/volume/testutils/testutils.go +++ /dev/null @@ -1,227 +0,0 @@ -package testutils // import "github.com/docker/docker/volume/testutils" - -import ( - "encoding/json" - "errors" - "fmt" - "net" - "net/http" - "time" - - "github.com/docker/docker/pkg/plugingetter" - "github.com/docker/docker/pkg/plugins" - "github.com/docker/docker/volume" -) - -// NoopVolume is a volume that doesn't perform any operation -type NoopVolume struct{} - -// Name is the name of the volume -func (NoopVolume) Name() string { return "noop" } - -// DriverName is the name of the driver -func (NoopVolume) DriverName() string { return "noop" } - -// Path is the filesystem path to the volume -func (NoopVolume) Path() string { return "noop" } - -// Mount mounts the volume in the container -func (NoopVolume) Mount(_ string) (string, error) { return "noop", nil } - -// Unmount unmounts the volume from the container -func (NoopVolume) Unmount(_ string) error { return nil } - -// Status provides low-level details about the volume -func (NoopVolume) Status() map[string]interface{} { return nil } - -// CreatedAt provides the time the volume (directory) was created at -func (NoopVolume) CreatedAt() (time.Time, error) { return time.Now(), nil } - -// FakeVolume is a fake volume with a random name -type FakeVolume struct { - name string - driverName string -} - -// NewFakeVolume creates a new fake volume for testing -func NewFakeVolume(name string, driverName string) volume.Volume { - return FakeVolume{name: name, driverName: driverName} -} - -// Name is the name of the volume -func (f FakeVolume) Name() string { return f.name } - -// DriverName is the name of the driver -func (f FakeVolume) DriverName() string { return f.driverName } - -// Path is the filesystem path to the volume -func (FakeVolume) Path() string { return "fake" } - -// Mount mounts the volume in the container -func (FakeVolume) Mount(_ string) (string, error) { return "fake", nil } - -// Unmount unmounts the volume from the container -func (FakeVolume) Unmount(_ string) error { return nil } - -// Status provides low-level details about the volume -func (FakeVolume) Status() map[string]interface{} { - return map[string]interface{}{"datakey": "datavalue"} -} - -// CreatedAt provides the time the volume (directory) was created at -func (FakeVolume) CreatedAt() (time.Time, error) { return time.Now(), nil } - -// FakeDriver is a driver that generates fake volumes -type FakeDriver struct { - name string - vols map[string]volume.Volume -} - -// NewFakeDriver creates a new FakeDriver with the specified name -func NewFakeDriver(name string) volume.Driver { - return &FakeDriver{ - name: name, - vols: make(map[string]volume.Volume), - } -} - -// Name is the name of the driver -func (d *FakeDriver) Name() string { return d.name } - -// Create initializes a fake volume. -// It returns an error if the options include an "error" key with a message -func (d *FakeDriver) Create(name string, opts map[string]string) (volume.Volume, error) { - if opts != nil && opts["error"] != "" { - return nil, fmt.Errorf(opts["error"]) - } - v := NewFakeVolume(name, d.name) - d.vols[name] = v - return v, nil -} - -// Remove deletes a volume. -func (d *FakeDriver) Remove(v volume.Volume) error { - if _, exists := d.vols[v.Name()]; !exists { - return fmt.Errorf("no such volume") - } - delete(d.vols, v.Name()) - return nil -} - -// List lists the volumes -func (d *FakeDriver) List() ([]volume.Volume, error) { - var vols []volume.Volume - for _, v := range d.vols { - vols = append(vols, v) - } - return vols, nil -} - -// Get gets the volume -func (d *FakeDriver) Get(name string) (volume.Volume, error) { - if v, exists := d.vols[name]; exists { - return v, nil - } - return nil, fmt.Errorf("no such volume") -} - -// Scope returns the local scope -func (*FakeDriver) Scope() string { - return "local" -} - -type fakePlugin struct { - client *plugins.Client - name string - refs int -} - -// MakeFakePlugin creates a fake plugin from the passed in driver -// Note: currently only "Create" is implemented because that's all that's needed -// so far. If you need it to test something else, add it here, but probably you -// shouldn't need to use this except for very specific cases with v2 plugin handling. -func MakeFakePlugin(d volume.Driver, l net.Listener) (plugingetter.CompatPlugin, error) { - c, err := plugins.NewClient(l.Addr().Network()+"://"+l.Addr().String(), nil) - if err != nil { - return nil, err - } - mux := http.NewServeMux() - - mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { - createReq := struct { - Name string - Opts map[string]string - }{} - if err := json.NewDecoder(r.Body).Decode(&createReq); err != nil { - fmt.Fprintf(w, `{"Err": "%s"}`, err.Error()) - return - } - _, err := d.Create(createReq.Name, createReq.Opts) - if err != nil { - fmt.Fprintf(w, `{"Err": "%s"}`, err.Error()) - return - } - w.Write([]byte("{}")) - }) - - go http.Serve(l, mux) - return &fakePlugin{client: c, name: d.Name()}, nil -} - -func (p *fakePlugin) Client() *plugins.Client { - return p.client -} - -func (p *fakePlugin) Name() string { - return p.name -} - -func (p *fakePlugin) IsV1() bool { - return false -} - -func (p *fakePlugin) ScopedPath(s string) string { - return s -} - -type fakePluginGetter struct { - plugins map[string]plugingetter.CompatPlugin -} - -// NewFakePluginGetter returns a plugin getter for fake plugins -func NewFakePluginGetter(pls ...plugingetter.CompatPlugin) plugingetter.PluginGetter { - idx := make(map[string]plugingetter.CompatPlugin, len(pls)) - for _, p := range pls { - idx[p.Name()] = p - } - return &fakePluginGetter{plugins: idx} -} - -// This ignores the second argument since we only care about volume drivers here, -// there shouldn't be any other kind of plugin in here -func (g *fakePluginGetter) Get(name, _ string, mode int) (plugingetter.CompatPlugin, error) { - p, ok := g.plugins[name] - if !ok { - return nil, errors.New("not found") - } - p.(*fakePlugin).refs += mode - return p, nil -} - -func (g *fakePluginGetter) GetAllByCap(capability string) ([]plugingetter.CompatPlugin, error) { - panic("GetAllByCap shouldn't be called") -} - -func (g *fakePluginGetter) GetAllManagedPluginsByCap(capability string) []plugingetter.CompatPlugin { - panic("GetAllManagedPluginsByCap should not be called") -} - -func (g *fakePluginGetter) Handle(capability string, callback func(string, *plugins.Client)) { - panic("Handle should not be called") -} - -// FakeRefs checks ref count on a fake plugin. -func FakeRefs(p plugingetter.CompatPlugin) int { - // this should panic if something other than a `*fakePlugin` is passed in - return p.(*fakePlugin).refs -} diff --git a/vendor/github.com/docker/docker/volume/volume.go b/vendor/github.com/docker/docker/volume/volume.go deleted file mode 100644 index 61c824397..000000000 --- a/vendor/github.com/docker/docker/volume/volume.go +++ /dev/null @@ -1,69 +0,0 @@ -package volume // import "github.com/docker/docker/volume" - -import ( - "time" -) - -// DefaultDriverName is the driver name used for the driver -// implemented in the local package. -const DefaultDriverName = "local" - -// Scopes define if a volume has is cluster-wide (global) or local only. -// Scopes are returned by the volume driver when it is queried for capabilities and then set on a volume -const ( - LocalScope = "local" - GlobalScope = "global" -) - -// Driver is for creating and removing volumes. -type Driver interface { - // Name returns the name of the volume driver. - Name() string - // Create makes a new volume with the given name. - Create(name string, opts map[string]string) (Volume, error) - // Remove deletes the volume. - Remove(vol Volume) (err error) - // List lists all the volumes the driver has - List() ([]Volume, error) - // Get retrieves the volume with the requested name - Get(name string) (Volume, error) - // Scope returns the scope of the driver (e.g. `global` or `local`). - // Scope determines how the driver is handled at a cluster level - Scope() string -} - -// Capability defines a set of capabilities that a driver is able to handle. -type Capability struct { - // Scope is the scope of the driver, `global` or `local` - // A `global` scope indicates that the driver manages volumes across the cluster - // A `local` scope indicates that the driver only manages volumes resources local to the host - // Scope is declared by the driver - Scope string -} - -// Volume is a place to store data. It is backed by a specific driver, and can be mounted. -type Volume interface { - // Name returns the name of the volume - Name() string - // DriverName returns the name of the driver which owns this volume. - DriverName() string - // Path returns the absolute path to the volume. - Path() string - // Mount mounts the volume and returns the absolute path to - // where it can be consumed. - Mount(id string) (string, error) - // Unmount unmounts the volume when it is no longer in use. - Unmount(id string) error - // CreatedAt returns Volume Creation time - CreatedAt() (time.Time, error) - // Status returns low-level status information about a volume - Status() map[string]interface{} -} - -// DetailedVolume wraps a Volume with user-defined labels, options, and cluster scope (e.g., `local` or `global`) -type DetailedVolume interface { - Labels() map[string]string - Options() map[string]string - Scope() string - Volume -} diff --git a/vendor/github.com/emicklei/go-restful/examples/.goconvey b/vendor/github.com/emicklei/go-restful/examples/.goconvey deleted file mode 100644 index 8485e986e..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/.goconvey +++ /dev/null @@ -1 +0,0 @@ -ignore \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/examples/google_app_engine/.goconvey b/vendor/github.com/emicklei/go-restful/examples/google_app_engine/.goconvey deleted file mode 100644 index 8485e986e..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/google_app_engine/.goconvey +++ /dev/null @@ -1 +0,0 @@ -ignore \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/examples/google_app_engine/datastore/.goconvey b/vendor/github.com/emicklei/go-restful/examples/google_app_engine/datastore/.goconvey deleted file mode 100644 index 8485e986e..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/google_app_engine/datastore/.goconvey +++ /dev/null @@ -1 +0,0 @@ -ignore \ No newline at end of file diff --git a/vendor/github.com/emicklei/go-restful/examples/google_app_engine/datastore/main.go b/vendor/github.com/emicklei/go-restful/examples/google_app_engine/datastore/main.go deleted file mode 100644 index 33e5b2ea6..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/google_app_engine/datastore/main.go +++ /dev/null @@ -1,267 +0,0 @@ -package main - -import ( - "net/http" - "time" - - "github.com/emicklei/go-restful" - "github.com/emicklei/go-restful-swagger12" - "google.golang.org/appengine" - "google.golang.org/appengine/datastore" - "google.golang.org/appengine/user" -) - -// This example demonstrates a reasonably complete suite of RESTful operations backed -// by DataStore on Google App Engine. - -// Our simple example struct. -type Profile struct { - LastModified time.Time `json:"-" xml:"-"` - Email string `json:"-" xml:"-"` - FirstName string `json:"first_name" xml:"first-name"` - NickName string `json:"nick_name" xml:"nick-name"` - LastName string `json:"last_name" xml:"last-name"` -} - -type ProfileApi struct { - Path string -} - -func gaeUrl() string { - if appengine.IsDevAppServer() { - return "http://localhost:8080" - } else { - // Include your URL on App Engine here. - // I found no way to get AppID without appengine.Context and this always - // based on a http.Request. - return "http://federatedservices.appspot.com" - } -} - -func init() { - u := ProfileApi{Path: "/profiles"} - u.register() - - // Optionally, you can install the Swagger Service which provides a nice Web UI on your REST API - // You need to download the Swagger HTML5 assets and change the FilePath location in the config below. - // Open .appspot.com/apidocs and enter - // Place the Swagger UI files into a folder called static/swagger if you wish to use Swagger - // http://.appspot.com/apidocs.json in the api input field. - // For testing, you can use http://localhost:8080/apidocs.json - config := swagger.Config{ - // You control what services are visible - WebServices: restful.RegisteredWebServices(), - WebServicesUrl: gaeUrl(), - ApiPath: "/apidocs.json", - - // Optionally, specify where the UI is located - SwaggerPath: "/apidocs/", - - // GAE support static content which is configured in your app.yaml. - // This example expect the swagger-ui in static/swagger so you should place it there :) - SwaggerFilePath: "static/swagger"} - swagger.InstallSwaggerService(config) -} - -func (u ProfileApi) register() { - ws := new(restful.WebService) - - ws. - Path(u.Path). - // You can specify consumes and produces per route as well. - Consumes(restful.MIME_JSON, restful.MIME_XML). - Produces(restful.MIME_JSON, restful.MIME_XML) - - ws.Route(ws.POST("").To(u.insert). - // Swagger documentation. - Doc("insert a new profile"). - Param(ws.BodyParameter("Profile", "representation of a profile").DataType("main.Profile")). - Reads(Profile{})) - - ws.Route(ws.GET("/{profile-id}").To(u.read). - // Swagger documentation. - Doc("read a profile"). - Param(ws.PathParameter("profile-id", "identifier for a profile").DataType("string")). - Writes(Profile{})) - - ws.Route(ws.PUT("/{profile-id}").To(u.update). - // Swagger documentation. - Doc("update an existing profile"). - Param(ws.PathParameter("profile-id", "identifier for a profile").DataType("string")). - Param(ws.BodyParameter("Profile", "representation of a profile").DataType("main.Profile")). - Reads(Profile{})) - - ws.Route(ws.DELETE("/{profile-id}").To(u.remove). - // Swagger documentation. - Doc("remove a profile"). - Param(ws.PathParameter("profile-id", "identifier for a profile").DataType("string"))) - - restful.Add(ws) -} - -// POST http://localhost:8080/profiles -// {"first_name": "Ivan", "nick_name": "Socks", "last_name": "Hawkes"} -// -func (u *ProfileApi) insert(r *restful.Request, w *restful.Response) { - c := appengine.NewContext(r.Request) - - // Marshall the entity from the request into a struct. - p := new(Profile) - err := r.ReadEntity(&p) - if err != nil { - w.WriteError(http.StatusNotAcceptable, err) - return - } - - // Ensure we start with a sensible value for this field. - p.LastModified = time.Now() - - // The profile belongs to this user. - p.Email = user.Current(c).String() - - k, err := datastore.Put(c, datastore.NewIncompleteKey(c, "profiles", nil), p) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - // Let them know the location of the newly created resource. - // TODO: Use a safe Url path append function. - w.AddHeader("Location", u.Path+"/"+k.Encode()) - - // Return the resultant entity. - w.WriteHeader(http.StatusCreated) - w.WriteEntity(p) -} - -// GET http://localhost:8080/profiles/ahdkZXZ-ZmVkZXJhdGlvbi1zZXJ2aWNlc3IVCxIIcHJvZmlsZXMYgICAgICAgAoM -// -func (u ProfileApi) read(r *restful.Request, w *restful.Response) { - c := appengine.NewContext(r.Request) - - // Decode the request parameter to determine the key for the entity. - k, err := datastore.DecodeKey(r.PathParameter("profile-id")) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // Retrieve the entity from the datastore. - p := Profile{} - if err := datastore.Get(c, k, &p); err != nil { - if err.Error() == "datastore: no such entity" { - http.Error(w, err.Error(), http.StatusNotFound) - } else { - http.Error(w, err.Error(), http.StatusInternalServerError) - } - return - } - - // Check we own the profile before allowing them to view it. - // Optionally, return a 404 instead to help prevent guessing ids. - // TODO: Allow admins access. - if p.Email != user.Current(c).String() { - http.Error(w, "You do not have access to this resource", http.StatusForbidden) - return - } - - w.WriteEntity(p) -} - -// PUT http://localhost:8080/profiles/ahdkZXZ-ZmVkZXJhdGlvbi1zZXJ2aWNlc3IVCxIIcHJvZmlsZXMYgICAgICAgAoM -// {"first_name": "Ivan", "nick_name": "Socks", "last_name": "Hawkes"} -// -func (u *ProfileApi) update(r *restful.Request, w *restful.Response) { - c := appengine.NewContext(r.Request) - - // Decode the request parameter to determine the key for the entity. - k, err := datastore.DecodeKey(r.PathParameter("profile-id")) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // Marshall the entity from the request into a struct. - p := new(Profile) - err = r.ReadEntity(&p) - if err != nil { - w.WriteError(http.StatusNotAcceptable, err) - return - } - - // Retrieve the old entity from the datastore. - old := Profile{} - if err := datastore.Get(c, k, &old); err != nil { - if err.Error() == "datastore: no such entity" { - http.Error(w, err.Error(), http.StatusNotFound) - } else { - http.Error(w, err.Error(), http.StatusInternalServerError) - } - return - } - - // Check we own the profile before allowing them to update it. - // Optionally, return a 404 instead to help prevent guessing ids. - // TODO: Allow admins access. - if old.Email != user.Current(c).String() { - http.Error(w, "You do not have access to this resource", http.StatusForbidden) - return - } - - // Since the whole entity is re-written, we need to assign any invariant fields again - // e.g. the owner of the entity. - p.Email = user.Current(c).String() - - // Keep track of the last modification date. - p.LastModified = time.Now() - - // Attempt to overwrite the old entity. - _, err = datastore.Put(c, k, p) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - // Let them know it succeeded. - w.WriteHeader(http.StatusNoContent) -} - -// DELETE http://localhost:8080/profiles/ahdkZXZ-ZmVkZXJhdGlvbi1zZXJ2aWNlc3IVCxIIcHJvZmlsZXMYgICAgICAgAoM -// -func (u *ProfileApi) remove(r *restful.Request, w *restful.Response) { - c := appengine.NewContext(r.Request) - - // Decode the request parameter to determine the key for the entity. - k, err := datastore.DecodeKey(r.PathParameter("profile-id")) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - // Retrieve the old entity from the datastore. - old := Profile{} - if err := datastore.Get(c, k, &old); err != nil { - if err.Error() == "datastore: no such entity" { - http.Error(w, err.Error(), http.StatusNotFound) - } else { - http.Error(w, err.Error(), http.StatusInternalServerError) - } - return - } - - // Check we own the profile before allowing them to delete it. - // Optionally, return a 404 instead to help prevent guessing ids. - // TODO: Allow admins access. - if old.Email != user.Current(c).String() { - http.Error(w, "You do not have access to this resource", http.StatusForbidden) - return - } - - // Delete the entity. - if err := datastore.Delete(c, k); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } - - // Success notification. - w.WriteHeader(http.StatusNoContent) -} diff --git a/vendor/github.com/emicklei/go-restful/examples/google_app_engine/restful-appstats-integration.go b/vendor/github.com/emicklei/go-restful/examples/google_app_engine/restful-appstats-integration.go deleted file mode 100644 index a871133b0..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/google_app_engine/restful-appstats-integration.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -import ( - "github.com/mjibson/appstats" -) - -func stats(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - c := appstats.NewContext(req.Request) - chain.ProcessFilter(req, resp) - c.Stats.Status = resp.StatusCode() - c.Save() -} diff --git a/vendor/github.com/emicklei/go-restful/examples/google_app_engine/restful-user-service.go b/vendor/github.com/emicklei/go-restful/examples/google_app_engine/restful-user-service.go deleted file mode 100644 index e1b462c3e..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/google_app_engine/restful-user-service.go +++ /dev/null @@ -1,162 +0,0 @@ -package main - -import ( - "net/http" - - "github.com/emicklei/go-restful" - "github.com/emicklei/go-restful-swagger12" - "google.golang.org/appengine" - "google.golang.org/appengine/memcache" -) - -// This example is functionally the same as ../restful-user-service.go -// but it`s supposed to run on Goole App Engine (GAE) -// -// contributed by ivanhawkes - -type User struct { - Id, Name string -} - -type UserService struct { - // normally one would use DAO (data access object) - // but in this example we simple use memcache. -} - -func (u UserService) Register() { - ws := new(restful.WebService) - - ws. - Path("/users"). - Consumes(restful.MIME_XML, restful.MIME_JSON). - Produces(restful.MIME_JSON, restful.MIME_XML) // you can specify this per route as well - - ws.Route(ws.GET("/{user-id}").To(u.findUser). - // docs - Doc("get a user"). - Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")). - Writes(User{})) // on the response - - ws.Route(ws.PATCH("").To(u.updateUser). - // docs - Doc("update a user"). - Reads(User{})) // from the request - - ws.Route(ws.PUT("/{user-id}").To(u.createUser). - // docs - Doc("create a user"). - Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")). - Reads(User{})) // from the request - - ws.Route(ws.DELETE("/{user-id}").To(u.removeUser). - // docs - Doc("delete a user"). - Param(ws.PathParameter("user-id", "identifier of the user").DataType("string"))) - - restful.Add(ws) -} - -// GET http://localhost:8080/users/1 -// -func (u UserService) findUser(request *restful.Request, response *restful.Response) { - c := appengine.NewContext(request.Request) - id := request.PathParameter("user-id") - usr := new(User) - _, err := memcache.Gob.Get(c, id, &usr) - if err != nil || len(usr.Id) == 0 { - response.WriteErrorString(http.StatusNotFound, "User could not be found.") - } else { - response.WriteEntity(usr) - } -} - -// PATCH http://localhost:8080/users -// 1Melissa Raspberry -// -func (u *UserService) updateUser(request *restful.Request, response *restful.Response) { - c := appengine.NewContext(request.Request) - usr := new(User) - err := request.ReadEntity(&usr) - if err == nil { - item := &memcache.Item{ - Key: usr.Id, - Object: &usr, - } - err = memcache.Gob.Set(c, item) - if err != nil { - response.WriteError(http.StatusInternalServerError, err) - return - } - response.WriteEntity(usr) - } else { - response.WriteError(http.StatusInternalServerError, err) - } -} - -// PUT http://localhost:8080/users/1 -// 1Melissa -// -func (u *UserService) createUser(request *restful.Request, response *restful.Response) { - c := appengine.NewContext(request.Request) - usr := User{Id: request.PathParameter("user-id")} - err := request.ReadEntity(&usr) - if err == nil { - item := &memcache.Item{ - Key: usr.Id, - Object: &usr, - } - err = memcache.Gob.Add(c, item) - if err != nil { - response.WriteError(http.StatusInternalServerError, err) - return - } - response.WriteHeader(http.StatusCreated) - response.WriteEntity(usr) - } else { - response.WriteError(http.StatusInternalServerError, err) - } -} - -// DELETE http://localhost:8080/users/1 -// -func (u *UserService) removeUser(request *restful.Request, response *restful.Response) { - c := appengine.NewContext(request.Request) - id := request.PathParameter("user-id") - err := memcache.Delete(c, id) - if err != nil { - response.WriteError(http.StatusInternalServerError, err) - } -} - -func getGaeURL() string { - if appengine.IsDevAppServer() { - return "http://localhost:8080" - } else { - /** - * Include your URL on App Engine here. - * I found no way to get AppID without appengine.Context and this always - * based on a http.Request. - */ - return "http://.appspot.com" - } -} - -func init() { - u := UserService{} - u.Register() - - // Optionally, you can install the Swagger Service which provides a nice Web UI on your REST API - // You need to download the Swagger HTML5 assets and change the FilePath location in the config below. - // Open .appspot.com/apidocs and enter http://.appspot.com/apidocs.json in the api input field. - config := swagger.Config{ - WebServices: restful.RegisteredWebServices(), // you control what services are visible - WebServicesUrl: getGaeURL(), - ApiPath: "/apidocs.json", - - // Optionally, specify where the UI is located - SwaggerPath: "/apidocs/", - // GAE support static content which is configured in your app.yaml. - // This example expect the swagger-ui in static/swagger so you should place it there :) - SwaggerFilePath: "static/swagger"} - swagger.InstallSwaggerService(config) -} diff --git a/vendor/github.com/emicklei/go-restful/examples/msgpack/msgpack_entity.go b/vendor/github.com/emicklei/go-restful/examples/msgpack/msgpack_entity.go deleted file mode 100644 index 330e45896..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/msgpack/msgpack_entity.go +++ /dev/null @@ -1,34 +0,0 @@ -package restPack - -import ( - restful "github.com/emicklei/go-restful" - "gopkg.in/vmihailenco/msgpack.v2" -) - -const MIME_MSGPACK = "application/x-msgpack" // Accept or Content-Type used in Consumes() and/or Produces() - -// NewEntityAccessorMPack returns a new EntityReaderWriter for accessing MessagePack content. -// This package is not initialized with such an accessor using the MIME_MSGPACK contentType. -func NewEntityAccessorMsgPack() restful.EntityReaderWriter { - return entityMsgPackAccess{} -} - -// entityOctetAccess is a EntityReaderWriter for Octet encoding -type entityMsgPackAccess struct { -} - -// Read unmarshalls the value from byte slice and using msgpack to unmarshal -func (e entityMsgPackAccess) Read(req *restful.Request, v interface{}) error { - return msgpack.NewDecoder(req.Request.Body).Decode(v) -} - -// Write marshals the value to byte slice and set the Content-Type Header. -func (e entityMsgPackAccess) Write(resp *restful.Response, status int, v interface{}) error { - if v == nil { - resp.WriteHeader(status) - // do not write a nil representation - return nil - } - resp.WriteHeader(status) - return msgpack.NewEncoder(resp).Encode(v) -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-CORS-filter.go b/vendor/github.com/emicklei/go-restful/examples/restful-CORS-filter.go deleted file mode 100644 index d682d43e9..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-CORS-filter.go +++ /dev/null @@ -1,68 +0,0 @@ -package main - -import ( - "io" - "log" - "net/http" - - "github.com/emicklei/go-restful" -) - -// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page -// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from. -// -// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing -// http://enable-cors.org/server.html -// -// GET http://localhost:8080/users -// -// GET http://localhost:8080/users/1 -// -// PUT http://localhost:8080/users/1 -// -// DELETE http://localhost:8080/users/1 -// -// OPTIONS http://localhost:8080/users/1 with Header "Origin" set to some domain and - -type UserResource struct{} - -func (u UserResource) RegisterTo(container *restful.Container) { - ws := new(restful.WebService) - ws. - Path("/users"). - Consumes("*/*"). - Produces("*/*") - - ws.Route(ws.GET("/{user-id}").To(u.nop)) - ws.Route(ws.POST("").To(u.nop)) - ws.Route(ws.PUT("/{user-id}").To(u.nop)) - ws.Route(ws.DELETE("/{user-id}").To(u.nop)) - - container.Add(ws) -} - -func (u UserResource) nop(request *restful.Request, response *restful.Response) { - io.WriteString(response.ResponseWriter, "this would be a normal response") -} - -func main() { - wsContainer := restful.NewContainer() - u := UserResource{} - u.RegisterTo(wsContainer) - - // Add container filter to enable CORS - cors := restful.CrossOriginResourceSharing{ - ExposeHeaders: []string{"X-My-Header"}, - AllowedHeaders: []string{"Content-Type", "Accept"}, - AllowedMethods: []string{"GET", "POST"}, - CookiesAllowed: false, - Container: wsContainer} - wsContainer.Filter(cors.Filter) - - // Add container filter to respond to OPTIONS - wsContainer.Filter(wsContainer.OPTIONSFilter) - - log.Print("start listening on localhost:8080") - server := &http.Server{Addr: ":8080", Handler: wsContainer} - log.Fatal(server.ListenAndServe()) -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-NCSA-logging.go b/vendor/github.com/emicklei/go-restful/examples/restful-NCSA-logging.go deleted file mode 100644 index 7066b96d6..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-NCSA-logging.go +++ /dev/null @@ -1,54 +0,0 @@ -package main - -import ( - "github.com/emicklei/go-restful" - "io" - "log" - "net/http" - "os" - "strings" - "time" -) - -// This example shows how to create a filter that produces log lines -// according to the Common Log Format, also known as the NCSA standard. -// -// kindly contributed by leehambley -// -// GET http://localhost:8080/ping - -var logger *log.Logger = log.New(os.Stdout, "", 0) - -func NCSACommonLogFormatLogger() restful.FilterFunction { - return func(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - var username = "-" - if req.Request.URL.User != nil { - if name := req.Request.URL.User.Username(); name != "" { - username = name - } - } - chain.ProcessFilter(req, resp) - logger.Printf("%s - %s [%s] \"%s %s %s\" %d %d", - strings.Split(req.Request.RemoteAddr, ":")[0], - username, - time.Now().Format("02/Jan/2006:15:04:05 -0700"), - req.Request.Method, - req.Request.URL.RequestURI(), - req.Request.Proto, - resp.StatusCode(), - resp.ContentLength(), - ) - } -} - -func main() { - ws := new(restful.WebService) - ws.Filter(NCSACommonLogFormatLogger()) - ws.Route(ws.GET("/ping").To(hello)) - restful.Add(ws) - log.Fatal(http.ListenAndServe(":8080", nil)) -} - -func hello(req *restful.Request, resp *restful.Response) { - io.WriteString(resp, "pong") -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-basic-authentication.go b/vendor/github.com/emicklei/go-restful/examples/restful-basic-authentication.go deleted file mode 100644 index f4fd5ce18..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-basic-authentication.go +++ /dev/null @@ -1,35 +0,0 @@ -package main - -import ( - "github.com/emicklei/go-restful" - "io" - "log" - "net/http" -) - -// This example shows how to create a (Route) Filter that performs Basic Authentication on the Http request. -// -// GET http://localhost:8080/secret -// and use admin,admin for the credentials - -func main() { - ws := new(restful.WebService) - ws.Route(ws.GET("/secret").Filter(basicAuthenticate).To(secret)) - restful.Add(ws) - log.Fatal(http.ListenAndServe(":8080", nil)) -} - -func basicAuthenticate(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - // usr/pwd = admin/admin - u, p, ok := req.Request.BasicAuth() - if !ok || u != "admin" || p != "admin" { - resp.AddHeader("WWW-Authenticate", "Basic realm=Protected Area") - resp.WriteErrorString(401, "401: Not Authorized") - return - } - chain.ProcessFilter(req, resp) -} - -func secret(req *restful.Request, resp *restful.Response) { - io.WriteString(resp, "42") -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-cpuprofiler-service.go b/vendor/github.com/emicklei/go-restful/examples/restful-cpuprofiler-service.go deleted file mode 100644 index 9148213cf..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-cpuprofiler-service.go +++ /dev/null @@ -1,65 +0,0 @@ -package main - -import ( - "github.com/emicklei/go-restful" - "io" - "log" - "os" - "runtime/pprof" -) - -// ProfilingService is a WebService that can start/stop a CPU profile and write results to a file -// GET /{rootPath}/start will activate CPU profiling -// GET /{rootPath}/stop will stop profiling -// -// NewProfileService("/profiler", "ace.prof").AddWebServiceTo(restful.DefaultContainer) -// -type ProfilingService struct { - rootPath string // the base (root) of the service, e.g. /profiler - cpuprofile string // the output filename to write profile results, e.g. myservice.prof - cpufile *os.File // if not nil, then profiling is active -} - -func NewProfileService(rootPath string, outputFilename string) *ProfilingService { - ps := new(ProfilingService) - ps.rootPath = rootPath - ps.cpuprofile = outputFilename - return ps -} - -// Add this ProfileService to a restful Container -func (p ProfilingService) AddWebServiceTo(container *restful.Container) { - ws := new(restful.WebService) - ws.Path(p.rootPath).Consumes("*/*").Produces(restful.MIME_JSON) - ws.Route(ws.GET("/start").To(p.startProfiler)) - ws.Route(ws.GET("/stop").To(p.stopProfiler)) - container.Add(ws) -} - -func (p *ProfilingService) startProfiler(req *restful.Request, resp *restful.Response) { - if p.cpufile != nil { - io.WriteString(resp.ResponseWriter, "[restful] CPU profiling already running") - return // error? - } - cpufile, err := os.Create(p.cpuprofile) - if err != nil { - log.Fatal(err) - } - // remember for close - p.cpufile = cpufile - pprof.StartCPUProfile(cpufile) - io.WriteString(resp.ResponseWriter, "[restful] CPU profiling started, writing on:"+p.cpuprofile) -} - -func (p *ProfilingService) stopProfiler(req *restful.Request, resp *restful.Response) { - if p.cpufile == nil { - io.WriteString(resp.ResponseWriter, "[restful] CPU profiling not active") - return // error? - } - pprof.StopCPUProfile() - p.cpufile.Close() - p.cpufile = nil - io.WriteString(resp.ResponseWriter, "[restful] CPU profiling stopped, closing:"+p.cpuprofile) -} - -func main() {} // exists for example compilation only diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-curly-router.go b/vendor/github.com/emicklei/go-restful/examples/restful-curly-router.go deleted file mode 100644 index 1bddb34af..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-curly-router.go +++ /dev/null @@ -1,107 +0,0 @@ -package main - -import ( - "log" - "net/http" - - "github.com/emicklei/go-restful" -) - -// This example has the same service definition as restful-user-resource -// but uses a different router (CurlyRouter) that does not use regular expressions -// -// POST http://localhost:8080/users -// 1Melissa Raspberry -// -// GET http://localhost:8080/users/1 -// -// PUT http://localhost:8080/users/1 -// 1Melissa -// -// DELETE http://localhost:8080/users/1 -// - -type User struct { - Id, Name string -} - -type UserResource struct { - // normally one would use DAO (data access object) - users map[string]User -} - -func (u UserResource) Register(container *restful.Container) { - ws := new(restful.WebService) - ws. - Path("/users"). - Consumes(restful.MIME_XML, restful.MIME_JSON). - Produces(restful.MIME_JSON, restful.MIME_XML) // you can specify this per route as well - - ws.Route(ws.GET("/{user-id}").To(u.findUser)) - ws.Route(ws.POST("").To(u.updateUser)) - ws.Route(ws.PUT("/{user-id}").To(u.createUser)) - ws.Route(ws.DELETE("/{user-id}").To(u.removeUser)) - - container.Add(ws) -} - -// GET http://localhost:8080/users/1 -// -func (u UserResource) findUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - usr := u.users[id] - if len(usr.Id) == 0 { - response.AddHeader("Content-Type", "text/plain") - response.WriteErrorString(http.StatusNotFound, "User could not be found.") - } else { - response.WriteEntity(usr) - } -} - -// POST http://localhost:8080/users -// 1Melissa Raspberry -// -func (u *UserResource) updateUser(request *restful.Request, response *restful.Response) { - usr := new(User) - err := request.ReadEntity(&usr) - if err == nil { - u.users[usr.Id] = *usr - response.WriteEntity(usr) - } else { - response.AddHeader("Content-Type", "text/plain") - response.WriteErrorString(http.StatusInternalServerError, err.Error()) - } -} - -// PUT http://localhost:8080/users/1 -// 1Melissa -// -func (u *UserResource) createUser(request *restful.Request, response *restful.Response) { - usr := User{Id: request.PathParameter("user-id")} - err := request.ReadEntity(&usr) - if err == nil { - u.users[usr.Id] = usr - response.WriteHeaderAndEntity(http.StatusCreated, usr) - } else { - response.AddHeader("Content-Type", "text/plain") - response.WriteErrorString(http.StatusInternalServerError, err.Error()) - } -} - -// DELETE http://localhost:8080/users/1 -// -func (u *UserResource) removeUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - delete(u.users, id) -} - -func main() { - wsContainer := restful.NewContainer() - wsContainer.Router(restful.CurlyRouter{}) - u := UserResource{map[string]User{}} - u.Register(wsContainer) - - log.Print("start listening on localhost:8080") - server := &http.Server{Addr: ":8080", Handler: wsContainer} - log.Fatal(server.ListenAndServe()) -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-encoding-filter.go b/vendor/github.com/emicklei/go-restful/examples/restful-encoding-filter.go deleted file mode 100644 index 177d5a994..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-encoding-filter.go +++ /dev/null @@ -1,61 +0,0 @@ -package main - -import ( - "github.com/emicklei/go-restful" - "log" - "net/http" -) - -type User struct { - Id, Name string -} - -type UserList struct { - Users []User -} - -// -// This example shows how to use the CompressingResponseWriter by a Filter -// such that encoding can be enabled per WebService or per Route (instead of per container) -// Using restful.DefaultContainer.EnableContentEncoding(true) will encode all responses served by WebServices in the DefaultContainer. -// -// Set Accept-Encoding to gzip or deflate -// GET http://localhost:8080/users/42 -// and look at the response headers - -func main() { - restful.Add(NewUserService()) - log.Print("start listening on localhost:8080") - log.Fatal(http.ListenAndServe(":8080", nil)) -} - -func NewUserService() *restful.WebService { - ws := new(restful.WebService) - ws. - Path("/users"). - Consumes(restful.MIME_XML, restful.MIME_JSON). - Produces(restful.MIME_JSON, restful.MIME_XML) - - // install a response encoding filter - ws.Route(ws.GET("/{user-id}").Filter(encodingFilter).To(findUser)) - return ws -} - -// Route Filter (defines FilterFunction) -func encodingFilter(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - log.Printf("[encoding-filter] %s,%s\n", req.Request.Method, req.Request.URL) - // wrap responseWriter into a compressing one - compress, _ := restful.NewCompressingResponseWriter(resp.ResponseWriter, restful.ENCODING_GZIP) - resp.ResponseWriter = compress - defer func() { - compress.Close() - }() - chain.ProcessFilter(req, resp) -} - -// GET http://localhost:8080/users/42 -// -func findUser(request *restful.Request, response *restful.Response) { - log.Print("findUser") - response.WriteEntity(User{"42", "Gandalf"}) -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-filters.go b/vendor/github.com/emicklei/go-restful/examples/restful-filters.go deleted file mode 100644 index 0d2434b3c..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-filters.go +++ /dev/null @@ -1,114 +0,0 @@ -package main - -import ( - "github.com/emicklei/go-restful" - "log" - "net/http" - "time" -) - -type User struct { - Id, Name string -} - -type UserList struct { - Users []User -} - -// This example show how to create and use the three different Filters (Container,WebService and Route) -// When applied to the restful.DefaultContainer, we refer to them as a global filter. -// -// GET http://locahost:8080/users/42 -// and see the logging per filter (try repeating this request) - -func main() { - // install a global (=DefaultContainer) filter (processed before any webservice in the DefaultContainer) - restful.Filter(globalLogging) - - restful.Add(NewUserService()) - log.Print("start listening on localhost:8080") - log.Fatal(http.ListenAndServe(":8080", nil)) -} - -func NewUserService() *restful.WebService { - ws := new(restful.WebService) - ws. - Path("/users"). - Consumes(restful.MIME_XML, restful.MIME_JSON). - Produces(restful.MIME_JSON, restful.MIME_XML) - - // install a webservice filter (processed before any route) - ws.Filter(webserviceLogging).Filter(measureTime) - - // install a counter filter - ws.Route(ws.GET("").Filter(NewCountFilter().routeCounter).To(getAllUsers)) - - // install 2 chained route filters (processed before calling findUser) - ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser)) - return ws -} - -// Global Filter -func globalLogging(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - log.Printf("[global-filter (logger)] %s,%s\n", req.Request.Method, req.Request.URL) - chain.ProcessFilter(req, resp) -} - -// WebService Filter -func webserviceLogging(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - log.Printf("[webservice-filter (logger)] %s,%s\n", req.Request.Method, req.Request.URL) - chain.ProcessFilter(req, resp) -} - -// WebService (post-process) Filter (as a struct that defines a FilterFunction) -func measureTime(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - now := time.Now() - chain.ProcessFilter(req, resp) - log.Printf("[webservice-filter (timer)] %v\n", time.Now().Sub(now)) -} - -// Route Filter (defines FilterFunction) -func routeLogging(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - log.Printf("[route-filter (logger)] %s,%s\n", req.Request.Method, req.Request.URL) - chain.ProcessFilter(req, resp) -} - -// Route Filter (as a struct that defines a FilterFunction) -// CountFilter implements a FilterFunction for counting requests. -type CountFilter struct { - count int - counter chan int // for go-routine safe count increments -} - -// NewCountFilter creates and initializes a new CountFilter. -func NewCountFilter() *CountFilter { - c := new(CountFilter) - c.counter = make(chan int) - go func() { - for { - c.count += <-c.counter - } - }() - return c -} - -// routeCounter increments the count of the filter (through a channel) -func (c *CountFilter) routeCounter(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - c.counter <- 1 - log.Printf("[route-filter (counter)] count:%d", c.count) - chain.ProcessFilter(req, resp) -} - -// GET http://localhost:8080/users -// -func getAllUsers(request *restful.Request, response *restful.Response) { - log.Print("getAllUsers") - response.WriteEntity(UserList{[]User{{"42", "Gandalf"}, {"3.14", "Pi"}}}) -} - -// GET http://localhost:8080/users/42 -// -func findUser(request *restful.Request, response *restful.Response) { - log.Print("findUser") - response.WriteEntity(User{"42", "Gandalf"}) -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-form-handling.go b/vendor/github.com/emicklei/go-restful/examples/restful-form-handling.go deleted file mode 100644 index e85608c9b..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-form-handling.go +++ /dev/null @@ -1,63 +0,0 @@ -package main - -import ( - "fmt" - "github.com/emicklei/go-restful" - "github.com/gorilla/schema" - "io" - "log" - "net/http" -) - -// This example shows how to handle a POST of a HTML form that uses the standard x-www-form-urlencoded content-type. -// It uses the gorilla web tool kit schema package to decode the form data into a struct. -// -// GET http://localhost:8080/profiles -// - -type Profile struct { - Name string - Age int -} - -var decoder *schema.Decoder - -func main() { - decoder = schema.NewDecoder() - ws := new(restful.WebService) - ws.Route(ws.POST("/profiles").Consumes("application/x-www-form-urlencoded").To(postAdddress)) - ws.Route(ws.GET("/profiles").To(addresssForm)) - restful.Add(ws) - log.Fatal(http.ListenAndServe(":8080", nil)) -} - -func postAdddress(req *restful.Request, resp *restful.Response) { - err := req.Request.ParseForm() - if err != nil { - resp.WriteErrorString(http.StatusBadRequest, err.Error()) - return - } - p := new(Profile) - err = decoder.Decode(p, req.Request.PostForm) - if err != nil { - resp.WriteErrorString(http.StatusBadRequest, err.Error()) - return - } - io.WriteString(resp.ResponseWriter, fmt.Sprintf("Name=%s, Age=%d", p.Name, p.Age)) -} - -func addresssForm(req *restful.Request, resp *restful.Response) { - io.WriteString(resp.ResponseWriter, - ` - -

Enter Profile

-
- - - - - -
- - `) -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-hello-world.go b/vendor/github.com/emicklei/go-restful/examples/restful-hello-world.go deleted file mode 100644 index bf987b805..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-hello-world.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "github.com/emicklei/go-restful" - "io" - "log" - "net/http" -) - -// This example shows the minimal code needed to get a restful.WebService working. -// -// GET http://localhost:8080/hello - -func main() { - ws := new(restful.WebService) - ws.Route(ws.GET("/hello").To(hello)) - restful.Add(ws) - log.Fatal(http.ListenAndServe(":8080", nil)) -} - -func hello(req *restful.Request, resp *restful.Response) { - io.WriteString(resp, "world") -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-html-template.go b/vendor/github.com/emicklei/go-restful/examples/restful-html-template.go deleted file mode 100644 index d76d9d1e4..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-html-template.go +++ /dev/null @@ -1,35 +0,0 @@ -package main - -import ( - "log" - "net/http" - "text/template" - - "github.com/emicklei/go-restful" -) - -// This example shows how to serve a HTML page using the standard Go template engine. -// -// GET http://localhost:8080/ - -func main() { - ws := new(restful.WebService) - ws.Route(ws.GET("/").To(home)) - restful.Add(ws) - print("open browser on http://localhost:8080/\n") - log.Fatal(http.ListenAndServe(":8080", nil)) -} - -type Message struct { - Text string -} - -func home(req *restful.Request, resp *restful.Response) { - p := &Message{"restful-html-template demo"} - // you might want to cache compiled templates - t, err := template.ParseFiles("home.html") - if err != nil { - log.Fatalf("Template gave: %s", err) - } - t.Execute(resp.ResponseWriter, p) -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-multi-containers.go b/vendor/github.com/emicklei/go-restful/examples/restful-multi-containers.go deleted file mode 100644 index 3056d3ea2..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-multi-containers.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "github.com/emicklei/go-restful" - "io" - "log" - "net/http" -) - -// This example shows how to have a program with 2 WebServices containers -// each having a http server listening on its own port. -// -// The first "hello" is added to the restful.DefaultContainer (and uses DefaultServeMux) -// For the second "hello", a new container and ServeMux is created -// and requires a new http.Server with the container being the Handler. -// This first server is spawn in its own go-routine such that the program proceeds to create the second. -// -// GET http://localhost:8080/hello -// GET http://localhost:8081/hello - -func main() { - ws := new(restful.WebService) - ws.Route(ws.GET("/hello").To(hello)) - restful.Add(ws) - go func() { - log.Fatal(http.ListenAndServe(":8080", nil)) - }() - - container2 := restful.NewContainer() - ws2 := new(restful.WebService) - ws2.Route(ws2.GET("/hello").To(hello2)) - container2.Add(ws2) - server := &http.Server{Addr: ":8081", Handler: container2} - log.Fatal(server.ListenAndServe()) -} - -func hello(req *restful.Request, resp *restful.Response) { - io.WriteString(resp, "default world") -} - -func hello2(req *restful.Request, resp *restful.Response) { - io.WriteString(resp, "second world") -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-no-cache-filter.go b/vendor/github.com/emicklei/go-restful/examples/restful-no-cache-filter.go deleted file mode 100644 index 8e4540f46..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-no-cache-filter.go +++ /dev/null @@ -1,25 +0,0 @@ -package main - -import ( - "io" - "log" - "net/http" - - "github.com/emicklei/go-restful" -) - -// This example shows how to use a WebService filter that passed the Http headers to disable browser cacheing. -// -// GET http://localhost:8080/hello - -func main() { - ws := new(restful.WebService) - ws.Filter(restful.NoBrowserCacheFilter) - ws.Route(ws.GET("/hello").To(hello)) - restful.Add(ws) - log.Fatal(http.ListenAndServe(":8080", nil)) -} - -func hello(req *restful.Request, resp *restful.Response) { - io.WriteString(resp, "world") -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-options-filter.go b/vendor/github.com/emicklei/go-restful/examples/restful-options-filter.go deleted file mode 100644 index 79ccce558..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-options-filter.go +++ /dev/null @@ -1,51 +0,0 @@ -package main - -import ( - "github.com/emicklei/go-restful" - "io" - "log" - "net/http" -) - -// This example shows how to use the OPTIONSFilter on a Container -// -// OPTIONS http://localhost:8080/users -// -// OPTIONS http://localhost:8080/users/1 - -type UserResource struct{} - -func (u UserResource) RegisterTo(container *restful.Container) { - ws := new(restful.WebService) - ws. - Path("/users"). - Consumes("*/*"). - Produces("*/*") - - ws.Route(ws.GET("/{user-id}").To(u.nop)) - ws.Route(ws.POST("").To(u.nop)) - ws.Route(ws.PUT("/{user-id}").To(u.nop)) - ws.Route(ws.DELETE("/{user-id}").To(u.nop)) - - container.Add(ws) -} - -func (u UserResource) nop(request *restful.Request, response *restful.Response) { - io.WriteString(response.ResponseWriter, "this would be a normal response") -} - -func main() { - wsContainer := restful.NewContainer() - u := UserResource{} - u.RegisterTo(wsContainer) - - // Add container filter to respond to OPTIONS - wsContainer.Filter(wsContainer.OPTIONSFilter) - - // For use on the default container, you can write - // restful.Filter(restful.OPTIONSFilter()) - - log.Print("start listening on localhost:8080") - server := &http.Server{Addr: ":8080", Handler: wsContainer} - log.Fatal(server.ListenAndServe()) -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-path-tail.go b/vendor/github.com/emicklei/go-restful/examples/restful-path-tail.go deleted file mode 100644 index f30d6716a..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-path-tail.go +++ /dev/null @@ -1,27 +0,0 @@ -package main - -import ( - . "github.com/emicklei/go-restful" - "io" - "log" - "net/http" -) - -// This example shows how to create a Route matching the "tail" of a path. -// Requires the use of a CurlyRouter and the star "*" path parameter pattern. -// -// GET http://localhost:8080/basepath/some/other/location/test.xml - -func main() { - DefaultContainer.Router(CurlyRouter{}) - ws := new(WebService) - ws.Route(ws.GET("/basepath/{resource:*}").To(staticFromPathParam)) - Add(ws) - - println("[go-restful] serve path tails from http://localhost:8080/basepath") - log.Fatal(http.ListenAndServe(":8080", nil)) -} - -func staticFromPathParam(req *Request, resp *Response) { - io.WriteString(resp, "Tail="+req.PathParameter("resource")) -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-pre-post-filters.go b/vendor/github.com/emicklei/go-restful/examples/restful-pre-post-filters.go deleted file mode 100644 index 0b55f1493..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-pre-post-filters.go +++ /dev/null @@ -1,98 +0,0 @@ -package main - -import ( - "github.com/emicklei/go-restful" - "io" - "log" - "net/http" -) - -// This example shows how the different types of filters are called in the request-response flow. -// The call chain is logged on the console when sending an http request. -// -// GET http://localhost:8080/1 -// GET http://localhost:8080/2 - -var indentLevel int - -func container_filter_A(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - log.Printf("url path:%v\n", req.Request.URL) - trace("container_filter_A: before", 1) - chain.ProcessFilter(req, resp) - trace("container_filter_A: after", -1) -} - -func container_filter_B(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - trace("container_filter_B: before", 1) - chain.ProcessFilter(req, resp) - trace("container_filter_B: after", -1) -} - -func service_filter_A(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - trace("service_filter_A: before", 1) - chain.ProcessFilter(req, resp) - trace("service_filter_A: after", -1) -} - -func service_filter_B(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - trace("service_filter_B: before", 1) - chain.ProcessFilter(req, resp) - trace("service_filter_B: after", -1) -} - -func route_filter_A(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - trace("route_filter_A: before", 1) - chain.ProcessFilter(req, resp) - trace("route_filter_A: after", -1) -} - -func route_filter_B(req *restful.Request, resp *restful.Response, chain *restful.FilterChain) { - trace("route_filter_B: before", 1) - chain.ProcessFilter(req, resp) - trace("route_filter_B: after", -1) -} - -func trace(what string, delta int) { - indented := what - if delta < 0 { - indentLevel += delta - } - for t := 0; t < indentLevel; t++ { - indented = "." + indented - } - log.Printf("%s", indented) - if delta > 0 { - indentLevel += delta - } -} - -func main() { - restful.Filter(container_filter_A) - restful.Filter(container_filter_B) - - ws1 := new(restful.WebService) - ws1.Path("/1") - ws1.Filter(service_filter_A) - ws1.Filter(service_filter_B) - ws1.Route(ws1.GET("").To(doit1).Filter(route_filter_A).Filter(route_filter_B)) - - ws2 := new(restful.WebService) - ws2.Path("/2") - ws2.Filter(service_filter_A) - ws2.Filter(service_filter_B) - ws2.Route(ws2.GET("").To(doit2).Filter(route_filter_A).Filter(route_filter_B)) - - restful.Add(ws1) - restful.Add(ws2) - - log.Print("go-restful example listing on http://localhost:8080/1 and http://localhost:8080/2") - log.Fatal(http.ListenAndServe(":8080", nil)) -} - -func doit1(req *restful.Request, resp *restful.Response) { - io.WriteString(resp, "nothing to see in 1") -} - -func doit2(req *restful.Request, resp *restful.Response) { - io.WriteString(resp, "nothing to see in 2") -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-resource-functions.go b/vendor/github.com/emicklei/go-restful/examples/restful-resource-functions.go deleted file mode 100644 index 09e6e5663..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-resource-functions.go +++ /dev/null @@ -1,63 +0,0 @@ -package main - -import ( - "github.com/emicklei/go-restful" - "log" - "net/http" -) - -// This example shows how to use methods as RouteFunctions for WebServices. -// The ProductResource has a Register() method that creates and initializes -// a WebService to expose its methods as REST operations. -// The WebService is added to the restful.DefaultContainer. -// A ProductResource is typically created using some data access object. -// -// GET http://localhost:8080/products/1 -// POST http://localhost:8080/products -// 1The First - -type Product struct { - Id, Title string -} - -type ProductResource struct { - // typically reference a DAO (data-access-object) -} - -func (p ProductResource) getOne(req *restful.Request, resp *restful.Response) { - id := req.PathParameter("id") - log.Println("getting product with id:" + id) - resp.WriteEntity(Product{Id: id, Title: "test"}) -} - -func (p ProductResource) postOne(req *restful.Request, resp *restful.Response) { - updatedProduct := new(Product) - err := req.ReadEntity(updatedProduct) - if err != nil { // bad request - resp.WriteErrorString(http.StatusBadRequest, err.Error()) - return - } - log.Println("updating product with id:" + updatedProduct.Id) -} - -func (p ProductResource) Register() { - ws := new(restful.WebService) - ws.Path("/products") - ws.Consumes(restful.MIME_XML) - ws.Produces(restful.MIME_XML) - - ws.Route(ws.GET("/{id}").To(p.getOne). - Doc("get the product by its id"). - Param(ws.PathParameter("id", "identifier of the product").DataType("string"))) - - ws.Route(ws.POST("").To(p.postOne). - Doc("update or create a product"). - Param(ws.BodyParameter("Product", "a Product (XML)").DataType("main.Product"))) - - restful.Add(ws) -} - -func main() { - ProductResource{}.Register() - log.Fatal(http.ListenAndServe(":8080", nil)) -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-serve-static.go b/vendor/github.com/emicklei/go-restful/examples/restful-serve-static.go deleted file mode 100644 index 34faf6078..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-serve-static.go +++ /dev/null @@ -1,47 +0,0 @@ -package main - -import ( - "fmt" - "net/http" - "path" - - "github.com/emicklei/go-restful" -) - -// This example shows how to define methods that serve static files -// It uses the standard http.ServeFile method -// -// GET http://localhost:8080/static/test.xml -// GET http://localhost:8080/static/ -// -// GET http://localhost:8080/static?resource=subdir/test.xml - -var rootdir = "/tmp" - -func main() { - restful.DefaultContainer.Router(restful.CurlyRouter{}) - - ws := new(restful.WebService) - ws.Route(ws.GET("/static/{subpath:*}").To(staticFromPathParam)) - ws.Route(ws.GET("/static").To(staticFromQueryParam)) - restful.Add(ws) - - println("[go-restful] serving files on http://localhost:8080/static from local /tmp") - log.Fatal(http.ListenAndServe(":8080", nil)) -} - -func staticFromPathParam(req *restful.Request, resp *restful.Response) { - actual := path.Join(rootdir, req.PathParameter("subpath")) - fmt.Printf("serving %s ... (from %s)\n", actual, req.PathParameter("subpath")) - http.ServeFile( - resp.ResponseWriter, - req.Request, - actual) -} - -func staticFromQueryParam(req *restful.Request, resp *restful.Response) { - http.ServeFile( - resp.ResponseWriter, - req.Request, - path.Join(rootdir, req.QueryParameter("resource"))) -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-swagger.go b/vendor/github.com/emicklei/go-restful/examples/restful-swagger.go deleted file mode 100644 index ecbd71b20..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-swagger.go +++ /dev/null @@ -1,61 +0,0 @@ -package main - -import ( - "log" - "net/http" - - "github.com/emicklei/go-restful" - "github.com/emicklei/go-restful-swagger12" -) - -type Book struct { - Title string - Author string -} - -func main() { - ws := new(restful.WebService) - ws.Path("/books") - ws.Consumes(restful.MIME_JSON, restful.MIME_XML) - ws.Produces(restful.MIME_JSON, restful.MIME_XML) - restful.Add(ws) - - ws.Route(ws.GET("/{medium}").To(noop). - Doc("Search all books"). - Param(ws.PathParameter("medium", "digital or paperback").DataType("string")). - Param(ws.QueryParameter("language", "en,nl,de").DataType("string")). - Param(ws.HeaderParameter("If-Modified-Since", "last known timestamp").DataType("datetime")). - Do(returns200, returns500)) - - ws.Route(ws.PUT("/{medium}").To(noop). - Doc("Add a new book"). - Param(ws.PathParameter("medium", "digital or paperback").DataType("string")). - Reads(Book{})) - - // You can install the Swagger Service which provides a nice Web UI on your REST API - // You need to download the Swagger HTML5 assets and change the FilePath location in the config below. - // Open http://localhost:8080/apidocs and enter http://localhost:8080/apidocs.json in the api input field. - config := swagger.Config{ - WebServices: restful.DefaultContainer.RegisteredWebServices(), // you control what services are visible - WebServicesUrl: "http://localhost:8080", - ApiPath: "/apidocs.json", - - // Optionally, specify where the UI is located - SwaggerPath: "/apidocs/", - SwaggerFilePath: "/Users/emicklei/xProjects/swagger-ui/dist"} - swagger.RegisterSwaggerService(config, restful.DefaultContainer) - - log.Print("start listening on localhost:8080") - server := &http.Server{Addr: ":8080", Handler: restful.DefaultContainer} - log.Fatal(server.ListenAndServe()) -} - -func noop(req *restful.Request, resp *restful.Response) {} - -func returns200(b *restful.RouteBuilder) { - b.Returns(http.StatusOK, "OK", Book{}) -} - -func returns500(b *restful.RouteBuilder) { - b.Returns(http.StatusInternalServerError, "Bummer, something went wrong", nil) -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-user-resource.go b/vendor/github.com/emicklei/go-restful/examples/restful-user-resource.go deleted file mode 100644 index a1efc46cc..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-user-resource.go +++ /dev/null @@ -1,152 +0,0 @@ -package main - -import ( - "log" - "net/http" - "strconv" - - "github.com/emicklei/go-restful" - "github.com/emicklei/go-restful-swagger12" -) - -// This example show a complete (GET,PUT,POST,DELETE) conventional example of -// a REST Resource including documentation to be served by e.g. a Swagger UI -// It is recommended to create a Resource struct (UserResource) that can encapsulate -// an object that provide domain access (a DAO) -// It has a Register method including the complete Route mapping to methods together -// with all the appropriate documentation -// -// POST http://localhost:8080/users -// 1Melissa Raspberry -// -// GET http://localhost:8080/users/1 -// -// PUT http://localhost:8080/users/1 -// 1Melissa -// -// DELETE http://localhost:8080/users/1 -// - -type User struct { - Id, Name string -} - -type UserResource struct { - // normally one would use DAO (data access object) - users map[string]User -} - -func (u UserResource) Register(container *restful.Container) { - ws := new(restful.WebService) - ws. - Path("/users"). - Doc("Manage Users"). - Consumes(restful.MIME_XML, restful.MIME_JSON). - Produces(restful.MIME_JSON, restful.MIME_XML) // you can specify this per route as well - - ws.Route(ws.GET("/{user-id}").To(u.findUser). - // docs - Doc("get a user"). - Operation("findUser"). - Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")). - Writes(User{})) // on the response - - ws.Route(ws.PUT("/{user-id}").To(u.updateUser). - // docs - Doc("update a user"). - Operation("updateUser"). - Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")). - ReturnsError(409, "duplicate user-id", nil). - Reads(User{})) // from the request - - ws.Route(ws.POST("").To(u.createUser). - // docs - Doc("create a user"). - Operation("createUser"). - Reads(User{})) // from the request - - ws.Route(ws.DELETE("/{user-id}").To(u.removeUser). - // docs - Doc("delete a user"). - Operation("removeUser"). - Param(ws.PathParameter("user-id", "identifier of the user").DataType("string"))) - - container.Add(ws) -} - -// GET http://localhost:8080/users/1 -// -func (u UserResource) findUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - usr := u.users[id] - if len(usr.Id) == 0 { - response.AddHeader("Content-Type", "text/plain") - response.WriteErrorString(http.StatusNotFound, "404: User could not be found.") - return - } - response.WriteEntity(usr) -} - -// POST http://localhost:8080/users -// Melissa -// -func (u *UserResource) createUser(request *restful.Request, response *restful.Response) { - usr := new(User) - err := request.ReadEntity(usr) - if err != nil { - response.AddHeader("Content-Type", "text/plain") - response.WriteErrorString(http.StatusInternalServerError, err.Error()) - return - } - usr.Id = strconv.Itoa(len(u.users) + 1) // simple id generation - u.users[usr.Id] = *usr - response.WriteHeaderAndEntity(http.StatusCreated, usr) -} - -// PUT http://localhost:8080/users/1 -// 1Melissa Raspberry -// -func (u *UserResource) updateUser(request *restful.Request, response *restful.Response) { - usr := new(User) - err := request.ReadEntity(&usr) - if err != nil { - response.AddHeader("Content-Type", "text/plain") - response.WriteErrorString(http.StatusInternalServerError, err.Error()) - return - } - u.users[usr.Id] = *usr - response.WriteEntity(usr) -} - -// DELETE http://localhost:8080/users/1 -// -func (u *UserResource) removeUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - delete(u.users, id) -} - -func main() { - // to see what happens in the package, uncomment the following - //restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile)) - - wsContainer := restful.NewContainer() - u := UserResource{map[string]User{}} - u.Register(wsContainer) - - // Optionally, you can install the Swagger Service which provides a nice Web UI on your REST API - // You need to download the Swagger HTML5 assets and change the FilePath location in the config below. - // Open http://localhost:8080/apidocs and enter http://localhost:8080/apidocs.json in the api input field. - config := swagger.Config{ - WebServices: wsContainer.RegisteredWebServices(), // you control what services are visible - WebServicesUrl: "http://localhost:8080", - ApiPath: "/apidocs.json", - - // Optionally, specify where the UI is located - SwaggerPath: "/apidocs/", - SwaggerFilePath: "/Users/emicklei/xProjects/swagger-ui/dist"} - swagger.RegisterSwaggerService(config, wsContainer) - - log.Print("start listening on localhost:8080") - server := &http.Server{Addr: ":8080", Handler: wsContainer} - log.Fatal(server.ListenAndServe()) -} diff --git a/vendor/github.com/emicklei/go-restful/examples/restful-user-service.go b/vendor/github.com/emicklei/go-restful/examples/restful-user-service.go deleted file mode 100644 index 46f31d896..000000000 --- a/vendor/github.com/emicklei/go-restful/examples/restful-user-service.go +++ /dev/null @@ -1,143 +0,0 @@ -package main - -import ( - "log" - "net/http" - - "github.com/emicklei/go-restful" - "github.com/emicklei/go-restful-swagger12" -) - -// This example is functionally the same as the example in restful-user-resource.go -// with the only difference that is served using the restful.DefaultContainer - -type User struct { - Id, Name string -} - -type UserService struct { - // normally one would use DAO (data access object) - users map[string]User -} - -func (u UserService) Register() { - ws := new(restful.WebService) - ws. - Path("/users"). - Consumes(restful.MIME_XML, restful.MIME_JSON). - Produces(restful.MIME_JSON, restful.MIME_XML) // you can specify this per route as well - - ws.Route(ws.GET("/").To(u.findAllUsers). - // docs - Doc("get all users"). - Operation("findAllUsers"). - Writes([]User{}). - Returns(200, "OK", nil)) - - ws.Route(ws.GET("/{user-id}").To(u.findUser). - // docs - Doc("get a user"). - Operation("findUser"). - Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")). - Writes(User{}). // on the response - Returns(404, "Not Found", nil)) - - ws.Route(ws.PUT("/{user-id}").To(u.updateUser). - // docs - Doc("update a user"). - Operation("updateUser"). - Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")). - Reads(User{})) // from the request - - ws.Route(ws.PUT("").To(u.createUser). - // docs - Doc("create a user"). - Operation("createUser"). - Reads(User{})) // from the request - - ws.Route(ws.DELETE("/{user-id}").To(u.removeUser). - // docs - Doc("delete a user"). - Operation("removeUser"). - Param(ws.PathParameter("user-id", "identifier of the user").DataType("string"))) - - restful.Add(ws) -} - -// GET http://localhost:8080/users -// -func (u UserService) findAllUsers(request *restful.Request, response *restful.Response) { - list := []User{} - for _, each := range u.users { - list = append(list, each) - } - response.WriteEntity(list) -} - -// GET http://localhost:8080/users/1 -// -func (u UserService) findUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - usr := u.users[id] - if len(usr.Id) == 0 { - response.WriteErrorString(http.StatusNotFound, "User could not be found.") - } else { - response.WriteEntity(usr) - } -} - -// PUT http://localhost:8080/users/1 -// 1Melissa Raspberry -// -func (u *UserService) updateUser(request *restful.Request, response *restful.Response) { - usr := new(User) - err := request.ReadEntity(&usr) - if err == nil { - u.users[usr.Id] = *usr - response.WriteEntity(usr) - } else { - response.WriteError(http.StatusInternalServerError, err) - } -} - -// PUT http://localhost:8080/users/1 -// 1Melissa -// -func (u *UserService) createUser(request *restful.Request, response *restful.Response) { - usr := User{Id: request.PathParameter("user-id")} - err := request.ReadEntity(&usr) - if err == nil { - u.users[usr.Id] = usr - response.WriteHeaderAndEntity(http.StatusCreated, usr) - } else { - response.WriteError(http.StatusInternalServerError, err) - } -} - -// DELETE http://localhost:8080/users/1 -// -func (u *UserService) removeUser(request *restful.Request, response *restful.Response) { - id := request.PathParameter("user-id") - delete(u.users, id) -} - -func main() { - u := UserService{map[string]User{}} - u.Register() - - // Optionally, you can install the Swagger Service which provides a nice Web UI on your REST API - // You need to download the Swagger HTML5 assets and change the FilePath location in the config below. - // Open http://localhost:8080/apidocs and enter http://localhost:8080/apidocs.json in the api input field. - config := swagger.Config{ - WebServices: restful.RegisteredWebServices(), // you control what services are visible - WebServicesUrl: "http://localhost:8080", - ApiPath: "/apidocs.json", - - // Optionally, specify where the UI is located - SwaggerPath: "/apidocs/", - SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"} - swagger.InstallSwaggerService(config) - - log.Print("start listening on localhost:8080") - log.Fatal(http.ListenAndServe(":8080", nil)) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/examples/testproto/test.pb.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/examples/testproto/test.pb.go deleted file mode 100644 index ec4664904..000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/examples/testproto/test.pb.go +++ /dev/null @@ -1,329 +0,0 @@ -// Code generated by protoc-gen-go. -// source: test.proto -// DO NOT EDIT! - -/* -Package mwitkow_testproto is a generated protocol buffer package. - -It is generated from these files: - test.proto - -It has these top-level messages: - Empty - PingRequest - PingResponse -*/ -package mwitkow_testproto - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Empty struct { -} - -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type PingRequest struct { - Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` - SleepTimeMs int32 `protobuf:"varint,2,opt,name=sleep_time_ms,json=sleepTimeMs" json:"sleep_time_ms,omitempty"` - ErrorCodeReturned uint32 `protobuf:"varint,3,opt,name=error_code_returned,json=errorCodeReturned" json:"error_code_returned,omitempty"` -} - -func (m *PingRequest) Reset() { *m = PingRequest{} } -func (m *PingRequest) String() string { return proto.CompactTextString(m) } -func (*PingRequest) ProtoMessage() {} -func (*PingRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *PingRequest) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -func (m *PingRequest) GetSleepTimeMs() int32 { - if m != nil { - return m.SleepTimeMs - } - return 0 -} - -func (m *PingRequest) GetErrorCodeReturned() uint32 { - if m != nil { - return m.ErrorCodeReturned - } - return 0 -} - -type PingResponse struct { - Value string `protobuf:"bytes,1,opt,name=Value,json=value" json:"Value,omitempty"` - Counter int32 `protobuf:"varint,2,opt,name=counter" json:"counter,omitempty"` -} - -func (m *PingResponse) Reset() { *m = PingResponse{} } -func (m *PingResponse) String() string { return proto.CompactTextString(m) } -func (*PingResponse) ProtoMessage() {} -func (*PingResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *PingResponse) GetValue() string { - if m != nil { - return m.Value - } - return "" -} - -func (m *PingResponse) GetCounter() int32 { - if m != nil { - return m.Counter - } - return 0 -} - -func init() { - proto.RegisterType((*Empty)(nil), "mwitkow.testproto.Empty") - proto.RegisterType((*PingRequest)(nil), "mwitkow.testproto.PingRequest") - proto.RegisterType((*PingResponse)(nil), "mwitkow.testproto.PingResponse") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for TestService service - -type TestServiceClient interface { - PingEmpty(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*PingResponse, error) - Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) - PingError(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*Empty, error) - PingList(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (TestService_PingListClient, error) -} - -type testServiceClient struct { - cc *grpc.ClientConn -} - -func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { - return &testServiceClient{cc} -} - -func (c *testServiceClient) PingEmpty(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*PingResponse, error) { - out := new(PingResponse) - err := grpc.Invoke(ctx, "/mwitkow.testproto.TestService/PingEmpty", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *testServiceClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) { - out := new(PingResponse) - err := grpc.Invoke(ctx, "/mwitkow.testproto.TestService/Ping", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *testServiceClient) PingError(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := grpc.Invoke(ctx, "/mwitkow.testproto.TestService/PingError", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *testServiceClient) PingList(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (TestService_PingListClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/mwitkow.testproto.TestService/PingList", opts...) - if err != nil { - return nil, err - } - x := &testServicePingListClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type TestService_PingListClient interface { - Recv() (*PingResponse, error) - grpc.ClientStream -} - -type testServicePingListClient struct { - grpc.ClientStream -} - -func (x *testServicePingListClient) Recv() (*PingResponse, error) { - m := new(PingResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for TestService service - -type TestServiceServer interface { - PingEmpty(context.Context, *Empty) (*PingResponse, error) - Ping(context.Context, *PingRequest) (*PingResponse, error) - PingError(context.Context, *PingRequest) (*Empty, error) - PingList(*PingRequest, TestService_PingListServer) error -} - -func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { - s.RegisterService(&_TestService_serviceDesc, srv) -} - -func _TestService_PingEmpty_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TestServiceServer).PingEmpty(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/mwitkow.testproto.TestService/PingEmpty", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TestServiceServer).PingEmpty(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _TestService_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PingRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TestServiceServer).Ping(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/mwitkow.testproto.TestService/Ping", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TestServiceServer).Ping(ctx, req.(*PingRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TestService_PingError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PingRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TestServiceServer).PingError(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/mwitkow.testproto.TestService/PingError", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TestServiceServer).PingError(ctx, req.(*PingRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TestService_PingList_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(PingRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(TestServiceServer).PingList(m, &testServicePingListServer{stream}) -} - -type TestService_PingListServer interface { - Send(*PingResponse) error - grpc.ServerStream -} - -type testServicePingListServer struct { - grpc.ServerStream -} - -func (x *testServicePingListServer) Send(m *PingResponse) error { - return x.ServerStream.SendMsg(m) -} - -var _TestService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "mwitkow.testproto.TestService", - HandlerType: (*TestServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "PingEmpty", - Handler: _TestService_PingEmpty_Handler, - }, - { - MethodName: "Ping", - Handler: _TestService_Ping_Handler, - }, - { - MethodName: "PingError", - Handler: _TestService_PingError_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "PingList", - Handler: _TestService_PingList_Handler, - ServerStreams: true, - }, - }, - Metadata: "test.proto", -} - -func init() { proto.RegisterFile("test.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 273 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xac, 0x90, 0xcf, 0x4b, 0xc3, 0x30, - 0x14, 0xc7, 0xd7, 0x69, 0x9d, 0x7b, 0x75, 0x87, 0x45, 0x0f, 0xc5, 0x83, 0x96, 0x9c, 0x7a, 0x0a, - 0xa2, 0x77, 0x2f, 0x22, 0x2a, 0x28, 0x4a, 0x1c, 0x5e, 0x8b, 0xb6, 0x0f, 0x09, 0x2e, 0x4d, 0x4d, - 0x5e, 0x57, 0xfc, 0xdf, 0xfc, 0xe3, 0x24, 0x59, 0x05, 0x61, 0x0e, 0x3d, 0xec, 0x98, 0xcf, 0xf7, - 0xf1, 0xfd, 0x11, 0x00, 0x42, 0x47, 0xa2, 0xb1, 0x86, 0x0c, 0x9b, 0xea, 0x4e, 0xd1, 0x9b, 0xe9, - 0x84, 0x67, 0x01, 0xf1, 0x11, 0xc4, 0x97, 0xba, 0xa1, 0x0f, 0xde, 0x41, 0xf2, 0xa0, 0xea, 0x57, - 0x89, 0xef, 0x2d, 0x3a, 0x62, 0x07, 0x10, 0x2f, 0x9e, 0xe7, 0x2d, 0xa6, 0x51, 0x16, 0xe5, 0x63, - 0xb9, 0x7c, 0x30, 0x0e, 0x13, 0x37, 0x47, 0x6c, 0x0a, 0x52, 0x1a, 0x0b, 0xed, 0xd2, 0x61, 0x16, - 0xe5, 0xb1, 0x4c, 0x02, 0x9c, 0x29, 0x8d, 0x77, 0x8e, 0x09, 0xd8, 0x47, 0x6b, 0x8d, 0x2d, 0x4a, - 0x53, 0x61, 0x61, 0x91, 0x5a, 0x5b, 0x63, 0x95, 0x6e, 0x65, 0x51, 0x3e, 0x91, 0xd3, 0x20, 0x5d, - 0x98, 0x0a, 0x65, 0x2f, 0xf0, 0x73, 0xd8, 0x5b, 0x06, 0xbb, 0xc6, 0xd4, 0x0e, 0x7d, 0xf2, 0xd3, - 0x6a, 0x72, 0x0a, 0xa3, 0xd2, 0xb4, 0x35, 0xa1, 0xed, 0x33, 0xbf, 0x9f, 0xa7, 0x9f, 0x43, 0x48, - 0x66, 0xe8, 0xe8, 0x11, 0xed, 0x42, 0x95, 0xc8, 0xae, 0x61, 0xec, 0xfd, 0xc2, 0x2a, 0x96, 0x8a, - 0x95, 0xc9, 0x22, 0x28, 0x87, 0xc7, 0xbf, 0x28, 0x3f, 0x7b, 0xf0, 0x01, 0xbb, 0x81, 0x6d, 0x4f, - 0xd8, 0xd1, 0xda, 0xd3, 0xf0, 0x57, 0xff, 0xb1, 0xba, 0xea, 0x4b, 0xf9, 0xf5, 0x7f, 0xfa, 0xad, - 0x2d, 0xcd, 0x07, 0xec, 0x1e, 0x76, 0xfd, 0xe9, 0xad, 0x72, 0xb4, 0x81, 0x5e, 0x27, 0xd1, 0xcb, - 0x4e, 0xe0, 0x67, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x38, 0x3e, 0x02, 0xe9, 0x28, 0x02, 0x00, - 0x00, -} diff --git a/vendor/github.com/stretchr/testify/.travis.gofmt.sh b/vendor/github.com/stretchr/testify/.travis.gofmt.sh deleted file mode 100755 index bfffdca8b..000000000 --- a/vendor/github.com/stretchr/testify/.travis.gofmt.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -if [ -n "$(gofmt -l .)" ]; then - echo "Go code is not formatted:" - gofmt -d . - exit 1 -fi diff --git a/vendor/github.com/stretchr/testify/.travis.gogenerate.sh b/vendor/github.com/stretchr/testify/.travis.gogenerate.sh deleted file mode 100755 index 161b449cd..000000000 --- a/vendor/github.com/stretchr/testify/.travis.gogenerate.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -if [[ "$TRAVIS_GO_VERSION" =~ ^1\.[45](\..*)?$ ]]; then - exit 0 -fi - -go get github.com/ernesto-jimenez/gogen/imports -go generate ./... -if [ -n "$(git diff)" ]; then - echo "Go generate had not been run" - git diff - exit 1 -fi diff --git a/vendor/github.com/stretchr/testify/.travis.govet.sh b/vendor/github.com/stretchr/testify/.travis.govet.sh deleted file mode 100755 index f8fbba7a1..000000000 --- a/vendor/github.com/stretchr/testify/.travis.govet.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -cd "$(dirname $0)" -DIRS=". assert require mock _codegen" -set -e -for subdir in $DIRS; do - pushd $subdir - go vet - popd -done diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml deleted file mode 100644 index 22bf25004..000000000 --- a/vendor/google.golang.org/grpc/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: go - -go: - - 1.7.x - - 1.8.x - - 1.9.x - -matrix: - include: - - go: 1.9.x - env: ARCH=386 - -go_import_path: google.golang.org/grpc - -before_install: - - if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh -install || exit 1; fi - -script: - - if [[ "$TRAVIS_GO_VERSION" = 1.9* && "$ARCH" != "386" ]]; then ./vet.sh || exit 1; fi - - make test testrace diff --git a/vendor/google.golang.org/grpc/benchmark/benchmain/main.go b/vendor/google.golang.org/grpc/benchmark/benchmain/main.go deleted file mode 100644 index 0cc1f25e6..000000000 --- a/vendor/google.golang.org/grpc/benchmark/benchmain/main.go +++ /dev/null @@ -1,499 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* -Package main provides benchmark with setting flags. - -An example to run some benchmarks with profiling enabled: - -go run benchmark/benchmain/main.go -benchtime=10s -workloads=all \ - -compression=on -maxConcurrentCalls=1 -trace=off \ - -reqSizeBytes=1,1048576 -respSizeBytes=1,1048576 -networkMode=Local \ - -cpuProfile=cpuProf -memProfile=memProf -memProfileRate=10000 -resultFile=result - -As a suggestion, when creating a branch, you can run this benchmark and save the result -file "-resultFile=basePerf", and later when you at the middle of the work or finish the -work, you can get the benchmark result and compare it with the base anytime. - -Assume there are two result files names as "basePerf" and "curPerf" created by adding --resultFile=basePerf and -resultFile=curPerf. - To format the curPerf, run: - go run benchmark/benchresult/main.go curPerf - To observe how the performance changes based on a base result, run: - go run benchmark/benchresult/main.go basePerf curPerf -*/ -package main - -import ( - "encoding/gob" - "errors" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "net" - "os" - "reflect" - "runtime" - "runtime/pprof" - "strconv" - "strings" - "sync" - "sync/atomic" - "testing" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc" - bm "google.golang.org/grpc/benchmark" - testpb "google.golang.org/grpc/benchmark/grpc_testing" - "google.golang.org/grpc/benchmark/latency" - "google.golang.org/grpc/benchmark/stats" - "google.golang.org/grpc/grpclog" -) - -const ( - modeOn = "on" - modeOff = "off" - modeBoth = "both" -) - -var allCompressionModes = []string{modeOn, modeOff, modeBoth} -var allTraceModes = []string{modeOn, modeOff, modeBoth} - -const ( - workloadsUnary = "unary" - workloadsStreaming = "streaming" - workloadsAll = "all" -) - -var allWorkloads = []string{workloadsUnary, workloadsStreaming, workloadsAll} - -var ( - runMode = []bool{true, true} // {runUnary, runStream} - // When set the latency to 0 (no delay), the result is slower than the real result with no delay - // because latency simulation section has extra operations - ltc = []time.Duration{0, 40 * time.Millisecond} // if non-positive, no delay. - kbps = []int{0, 10240} // if non-positive, infinite - mtu = []int{0} // if non-positive, infinite - maxConcurrentCalls = []int{1, 8, 64, 512} - reqSizeBytes = []int{1, 1024, 1024 * 1024} - respSizeBytes = []int{1, 1024, 1024 * 1024} - enableTrace []bool - benchtime time.Duration - memProfile, cpuProfile string - memProfileRate int - enableCompressor []bool - networkMode string - benchmarkResultFile string - networks = map[string]latency.Network{ - "Local": latency.Local, - "LAN": latency.LAN, - "WAN": latency.WAN, - "Longhaul": latency.Longhaul, - } -) - -func unaryBenchmark(startTimer func(), stopTimer func(int32), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) { - caller, close := makeFuncUnary(benchFeatures) - defer close() - runBenchmark(caller, startTimer, stopTimer, benchFeatures, benchtime, s) -} - -func streamBenchmark(startTimer func(), stopTimer func(int32), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) { - caller, close := makeFuncStream(benchFeatures) - defer close() - runBenchmark(caller, startTimer, stopTimer, benchFeatures, benchtime, s) -} - -func makeFuncUnary(benchFeatures stats.Features) (func(int), func()) { - nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu} - opts := []grpc.DialOption{} - sopts := []grpc.ServerOption{} - if benchFeatures.EnableCompressor { - sopts = append(sopts, - grpc.RPCCompressor(nopCompressor{}), - grpc.RPCDecompressor(nopDecompressor{}), - ) - opts = append(opts, - grpc.WithCompressor(nopCompressor{}), - grpc.WithDecompressor(nopDecompressor{}), - ) - } - sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1))) - opts = append(opts, grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) { - return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout) - })) - opts = append(opts, grpc.WithInsecure()) - - target, stopper := bm.StartServer(bm.ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, sopts...) - conn := bm.NewClientConn(target, opts...) - tc := testpb.NewBenchmarkServiceClient(conn) - return func(int) { - unaryCaller(tc, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) - }, func() { - conn.Close() - stopper() - } -} - -func makeFuncStream(benchFeatures stats.Features) (func(int), func()) { - nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu} - opts := []grpc.DialOption{} - sopts := []grpc.ServerOption{} - if benchFeatures.EnableCompressor { - sopts = append(sopts, - grpc.RPCCompressor(grpc.NewGZIPCompressor()), - grpc.RPCDecompressor(grpc.NewGZIPDecompressor()), - ) - opts = append(opts, - grpc.WithCompressor(grpc.NewGZIPCompressor()), - grpc.WithDecompressor(grpc.NewGZIPDecompressor()), - ) - } - sopts = append(sopts, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1))) - opts = append(opts, grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) { - return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout) - })) - opts = append(opts, grpc.WithInsecure()) - - target, stopper := bm.StartServer(bm.ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, sopts...) - conn := bm.NewClientConn(target, opts...) - tc := testpb.NewBenchmarkServiceClient(conn) - streams := make([]testpb.BenchmarkService_StreamingCallClient, benchFeatures.MaxConcurrentCalls) - for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ { - stream, err := tc.StreamingCall(context.Background()) - if err != nil { - grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) - } - streams[i] = stream - } - return func(pos int) { - streamCaller(streams[pos], benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) - }, func() { - conn.Close() - stopper() - } -} - -func unaryCaller(client testpb.BenchmarkServiceClient, reqSize, respSize int) { - if err := bm.DoUnaryCall(client, reqSize, respSize); err != nil { - grpclog.Fatalf("DoUnaryCall failed: %v", err) - } -} - -func streamCaller(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) { - if err := bm.DoStreamingRoundTrip(stream, reqSize, respSize); err != nil { - grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err) - } -} - -func runBenchmark(caller func(int), startTimer func(), stopTimer func(int32), benchFeatures stats.Features, benchtime time.Duration, s *stats.Stats) { - // Warm up connection. - for i := 0; i < 10; i++ { - caller(0) - } - // Run benchmark. - startTimer() - var ( - mu sync.Mutex - wg sync.WaitGroup - ) - wg.Add(benchFeatures.MaxConcurrentCalls) - bmEnd := time.Now().Add(benchtime) - var count int32 - for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ { - go func(pos int) { - for { - t := time.Now() - if t.After(bmEnd) { - break - } - start := time.Now() - caller(pos) - elapse := time.Since(start) - atomic.AddInt32(&count, 1) - mu.Lock() - s.Add(elapse) - mu.Unlock() - } - wg.Done() - }(i) - } - wg.Wait() - stopTimer(count) -} - -// Initiate main function to get settings of features. -func init() { - var ( - workloads, traceMode, compressorMode, readLatency string - readKbps, readMtu, readMaxConcurrentCalls intSliceType - readReqSizeBytes, readRespSizeBytes intSliceType - ) - flag.StringVar(&workloads, "workloads", workloadsAll, - fmt.Sprintf("Workloads to execute - One of: %v", strings.Join(allWorkloads, ", "))) - flag.StringVar(&traceMode, "trace", modeOff, - fmt.Sprintf("Trace mode - One of: %v", strings.Join(allTraceModes, ", "))) - flag.StringVar(&readLatency, "latency", "", "Simulated one-way network latency - may be a comma-separated list") - flag.DurationVar(&benchtime, "benchtime", time.Second, "Configures the amount of time to run each benchmark") - flag.Var(&readKbps, "kbps", "Simulated network throughput (in kbps) - may be a comma-separated list") - flag.Var(&readMtu, "mtu", "Simulated network MTU (Maximum Transmission Unit) - may be a comma-separated list") - flag.Var(&readMaxConcurrentCalls, "maxConcurrentCalls", "Number of concurrent RPCs during benchmarks") - flag.Var(&readReqSizeBytes, "reqSizeBytes", "Request size in bytes - may be a comma-separated list") - flag.Var(&readRespSizeBytes, "respSizeBytes", "Response size in bytes - may be a comma-separated list") - flag.StringVar(&memProfile, "memProfile", "", "Enables memory profiling output to the filename provided.") - flag.IntVar(&memProfileRate, "memProfileRate", 512*1024, "Configures the memory profiling rate. \n"+ - "memProfile should be set before setting profile rate. To include every allocated block in the profile, "+ - "set MemProfileRate to 1. To turn off profiling entirely, set MemProfileRate to 0. 512 * 1024 by default.") - flag.StringVar(&cpuProfile, "cpuProfile", "", "Enables CPU profiling output to the filename provided") - flag.StringVar(&compressorMode, "compression", modeOff, - fmt.Sprintf("Compression mode - One of: %v", strings.Join(allCompressionModes, ", "))) - flag.StringVar(&benchmarkResultFile, "resultFile", "", "Save the benchmark result into a binary file") - flag.StringVar(&networkMode, "networkMode", "", "Network mode includes LAN, WAN, Local and Longhaul") - flag.Parse() - if flag.NArg() != 0 { - log.Fatal("Error: unparsed arguments: ", flag.Args()) - } - switch workloads { - case workloadsUnary: - runMode[0] = true - runMode[1] = false - case workloadsStreaming: - runMode[0] = false - runMode[1] = true - case workloadsAll: - runMode[0] = true - runMode[1] = true - default: - log.Fatalf("Unknown workloads setting: %v (want one of: %v)", - workloads, strings.Join(allWorkloads, ", ")) - } - enableCompressor = setMode(compressorMode) - enableTrace = setMode(traceMode) - // Time input formats as (time + unit). - readTimeFromInput(<c, readLatency) - readIntFromIntSlice(&kbps, readKbps) - readIntFromIntSlice(&mtu, readMtu) - readIntFromIntSlice(&maxConcurrentCalls, readMaxConcurrentCalls) - readIntFromIntSlice(&reqSizeBytes, readReqSizeBytes) - readIntFromIntSlice(&respSizeBytes, readRespSizeBytes) - // Re-write latency, kpbs and mtu if network mode is set. - if network, ok := networks[networkMode]; ok { - ltc = []time.Duration{network.Latency} - kbps = []int{network.Kbps} - mtu = []int{network.MTU} - } -} - -func setMode(name string) []bool { - switch name { - case modeOn: - return []bool{true} - case modeOff: - return []bool{false} - case modeBoth: - return []bool{false, true} - default: - log.Fatalf("Unknown %s setting: %v (want one of: %v)", - name, name, strings.Join(allCompressionModes, ", ")) - return []bool{} - } -} - -type intSliceType []int - -func (intSlice *intSliceType) String() string { - return fmt.Sprintf("%v", *intSlice) -} - -func (intSlice *intSliceType) Set(value string) error { - if len(*intSlice) > 0 { - return errors.New("interval flag already set") - } - for _, num := range strings.Split(value, ",") { - next, err := strconv.Atoi(num) - if err != nil { - return err - } - *intSlice = append(*intSlice, next) - } - return nil -} - -func readIntFromIntSlice(values *[]int, replace intSliceType) { - // If not set replace in the flag, just return to run the default settings. - if len(replace) == 0 { - return - } - *values = replace -} - -func readTimeFromInput(values *[]time.Duration, replace string) { - if strings.Compare(replace, "") != 0 { - *values = []time.Duration{} - for _, ltc := range strings.Split(replace, ",") { - duration, err := time.ParseDuration(ltc) - if err != nil { - log.Fatal(err.Error()) - } - *values = append(*values, duration) - } - } -} - -func main() { - before() - featuresPos := make([]int, 8) - // 0:enableTracing 1:ltc 2:kbps 3:mtu 4:maxC 5:reqSize 6:respSize - featuresNum := []int{len(enableTrace), len(ltc), len(kbps), len(mtu), - len(maxConcurrentCalls), len(reqSizeBytes), len(respSizeBytes), len(enableCompressor)} - initalPos := make([]int, len(featuresPos)) - s := stats.NewStats(10) - s.SortLatency() - var memStats runtime.MemStats - var results testing.BenchmarkResult - var startAllocs, startBytes uint64 - var startTime time.Time - start := true - var startTimer = func() { - runtime.ReadMemStats(&memStats) - startAllocs = memStats.Mallocs - startBytes = memStats.TotalAlloc - startTime = time.Now() - } - var stopTimer = func(count int32) { - runtime.ReadMemStats(&memStats) - results = testing.BenchmarkResult{N: int(count), T: time.Now().Sub(startTime), - Bytes: 0, MemAllocs: memStats.Mallocs - startAllocs, MemBytes: memStats.TotalAlloc - startBytes} - } - sharedPos := make([]bool, len(featuresPos)) - for i := 0; i < len(featuresPos); i++ { - if featuresNum[i] <= 1 { - sharedPos[i] = true - } - } - - // Run benchmarks - resultSlice := []stats.BenchResults{} - for !reflect.DeepEqual(featuresPos, initalPos) || start { - start = false - benchFeature := stats.Features{ - NetworkMode: networkMode, - EnableTrace: enableTrace[featuresPos[0]], - Latency: ltc[featuresPos[1]], - Kbps: kbps[featuresPos[2]], - Mtu: mtu[featuresPos[3]], - MaxConcurrentCalls: maxConcurrentCalls[featuresPos[4]], - ReqSizeBytes: reqSizeBytes[featuresPos[5]], - RespSizeBytes: respSizeBytes[featuresPos[6]], - EnableCompressor: enableCompressor[featuresPos[7]], - } - - grpc.EnableTracing = enableTrace[featuresPos[0]] - if runMode[0] { - unaryBenchmark(startTimer, stopTimer, benchFeature, benchtime, s) - s.SetBenchmarkResult("Unary", benchFeature, results.N, - results.AllocedBytesPerOp(), results.AllocsPerOp(), sharedPos) - fmt.Println(s.BenchString()) - fmt.Println(s.String()) - resultSlice = append(resultSlice, s.GetBenchmarkResults()) - s.Clear() - } - if runMode[1] { - streamBenchmark(startTimer, stopTimer, benchFeature, benchtime, s) - s.SetBenchmarkResult("Stream", benchFeature, results.N, - results.AllocedBytesPerOp(), results.AllocsPerOp(), sharedPos) - fmt.Println(s.BenchString()) - fmt.Println(s.String()) - resultSlice = append(resultSlice, s.GetBenchmarkResults()) - s.Clear() - } - bm.AddOne(featuresPos, featuresNum) - } - after(resultSlice) -} - -func before() { - if memProfile != "" { - runtime.MemProfileRate = memProfileRate - } - if cpuProfile != "" { - f, err := os.Create(cpuProfile) - if err != nil { - fmt.Fprintf(os.Stderr, "testing: %s\n", err) - return - } - if err := pprof.StartCPUProfile(f); err != nil { - fmt.Fprintf(os.Stderr, "testing: can't start cpu profile: %s\n", err) - f.Close() - return - } - } -} - -func after(data []stats.BenchResults) { - if cpuProfile != "" { - pprof.StopCPUProfile() // flushes profile to disk - } - if memProfile != "" { - f, err := os.Create(memProfile) - if err != nil { - fmt.Fprintf(os.Stderr, "testing: %s\n", err) - os.Exit(2) - } - runtime.GC() // materialize all statistics - if err = pprof.WriteHeapProfile(f); err != nil { - fmt.Fprintf(os.Stderr, "testing: can't write heap profile %s: %s\n", memProfile, err) - os.Exit(2) - } - f.Close() - } - if benchmarkResultFile != "" { - f, err := os.Create(benchmarkResultFile) - if err != nil { - log.Fatalf("testing: can't write benchmark result %s: %s\n", benchmarkResultFile, err) - } - dataEncoder := gob.NewEncoder(f) - dataEncoder.Encode(data) - f.Close() - } -} - -// nopCompressor is a compressor that just copies data. -type nopCompressor struct{} - -func (nopCompressor) Do(w io.Writer, p []byte) error { - n, err := w.Write(p) - if err != nil { - return err - } - if n != len(p) { - return fmt.Errorf("nopCompressor.Write: wrote %v bytes; want %v", n, len(p)) - } - return nil -} - -func (nopCompressor) Type() string { return "nop" } - -// nopDecompressor is a decompressor that just copies data. -type nopDecompressor struct{} - -func (nopDecompressor) Do(r io.Reader) ([]byte, error) { return ioutil.ReadAll(r) } -func (nopDecompressor) Type() string { return "nop" } diff --git a/vendor/google.golang.org/grpc/benchmark/benchmark.go b/vendor/google.golang.org/grpc/benchmark/benchmark.go deleted file mode 100644 index f09fa4543..000000000 --- a/vendor/google.golang.org/grpc/benchmark/benchmark.go +++ /dev/null @@ -1,364 +0,0 @@ -/* - * - * Copyright 2014 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -//go:generate protoc -I grpc_testing --go_out=plugins=grpc:grpc_testing grpc_testing/control.proto grpc_testing/messages.proto grpc_testing/payloads.proto grpc_testing/services.proto grpc_testing/stats.proto - -/* -Package benchmark implements the building blocks to setup end-to-end gRPC benchmarks. -*/ -package benchmark - -import ( - "fmt" - "io" - "net" - "sync" - "testing" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc" - testpb "google.golang.org/grpc/benchmark/grpc_testing" - "google.golang.org/grpc/benchmark/latency" - "google.golang.org/grpc/benchmark/stats" - "google.golang.org/grpc/grpclog" -) - -// AddOne add 1 to the features slice -func AddOne(features []int, featuresMaxPosition []int) { - for i := len(features) - 1; i >= 0; i-- { - features[i] = (features[i] + 1) - if features[i]/featuresMaxPosition[i] == 0 { - break - } - features[i] = features[i] % featuresMaxPosition[i] - } -} - -// Allows reuse of the same testpb.Payload object. -func setPayload(p *testpb.Payload, t testpb.PayloadType, size int) { - if size < 0 { - grpclog.Fatalf("Requested a response with invalid length %d", size) - } - body := make([]byte, size) - switch t { - case testpb.PayloadType_COMPRESSABLE: - case testpb.PayloadType_UNCOMPRESSABLE: - grpclog.Fatalf("PayloadType UNCOMPRESSABLE is not supported") - default: - grpclog.Fatalf("Unsupported payload type: %d", t) - } - p.Type = t - p.Body = body - return -} - -func newPayload(t testpb.PayloadType, size int) *testpb.Payload { - p := new(testpb.Payload) - setPayload(p, t, size) - return p -} - -type testServer struct { -} - -func (s *testServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { - return &testpb.SimpleResponse{ - Payload: newPayload(in.ResponseType, int(in.ResponseSize)), - }, nil -} - -func (s *testServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error { - response := &testpb.SimpleResponse{ - Payload: new(testpb.Payload), - } - in := new(testpb.SimpleRequest) - for { - // use ServerStream directly to reuse the same testpb.SimpleRequest object - err := stream.(grpc.ServerStream).RecvMsg(in) - if err == io.EOF { - // read done. - return nil - } - if err != nil { - return err - } - setPayload(response.Payload, in.ResponseType, int(in.ResponseSize)) - if err := stream.Send(response); err != nil { - return err - } - } -} - -// byteBufServer is a gRPC server that sends and receives byte buffer. -// The purpose is to benchmark the gRPC performance without protobuf serialization/deserialization overhead. -type byteBufServer struct { - respSize int32 -} - -// UnaryCall is an empty function and is not used for benchmark. -// If bytebuf UnaryCall benchmark is needed later, the function body needs to be updated. -func (s *byteBufServer) UnaryCall(ctx context.Context, in *testpb.SimpleRequest) (*testpb.SimpleResponse, error) { - return &testpb.SimpleResponse{}, nil -} - -func (s *byteBufServer) StreamingCall(stream testpb.BenchmarkService_StreamingCallServer) error { - for { - var in []byte - err := stream.(grpc.ServerStream).RecvMsg(&in) - if err == io.EOF { - return nil - } - if err != nil { - return err - } - out := make([]byte, s.respSize) - if err := stream.(grpc.ServerStream).SendMsg(&out); err != nil { - return err - } - } -} - -// ServerInfo contains the information to create a gRPC benchmark server. -type ServerInfo struct { - // Addr is the address of the server. - Addr string - - // Type is the type of the server. - // It should be "protobuf" or "bytebuf". - Type string - - // Metadata is an optional configuration. - // For "protobuf", it's ignored. - // For "bytebuf", it should be an int representing response size. - Metadata interface{} - - // Network can simulate latency - Network *latency.Network -} - -// StartServer starts a gRPC server serving a benchmark service according to info. -// It returns its listen address and a function to stop the server. -func StartServer(info ServerInfo, opts ...grpc.ServerOption) (string, func()) { - lis, err := net.Listen("tcp", info.Addr) - if err != nil { - grpclog.Fatalf("Failed to listen: %v", err) - } - nw := info.Network - if nw != nil { - lis = nw.Listener(lis) - } - opts = append(opts, grpc.WriteBufferSize(128*1024)) - opts = append(opts, grpc.ReadBufferSize(128*1024)) - s := grpc.NewServer(opts...) - switch info.Type { - case "protobuf": - testpb.RegisterBenchmarkServiceServer(s, &testServer{}) - case "bytebuf": - respSize, ok := info.Metadata.(int32) - if !ok { - grpclog.Fatalf("failed to StartServer, invalid metadata: %v, for Type: %v", info.Metadata, info.Type) - } - testpb.RegisterBenchmarkServiceServer(s, &byteBufServer{respSize: respSize}) - default: - grpclog.Fatalf("failed to StartServer, unknown Type: %v", info.Type) - } - go s.Serve(lis) - return lis.Addr().String(), func() { - s.Stop() - } -} - -// DoUnaryCall performs an unary RPC with given stub and request and response sizes. -func DoUnaryCall(tc testpb.BenchmarkServiceClient, reqSize, respSize int) error { - pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) - req := &testpb.SimpleRequest{ - ResponseType: pl.Type, - ResponseSize: int32(respSize), - Payload: pl, - } - if _, err := tc.UnaryCall(context.Background(), req); err != nil { - return fmt.Errorf("/BenchmarkService/UnaryCall(_, _) = _, %v, want _, ", err) - } - return nil -} - -// DoStreamingRoundTrip performs a round trip for a single streaming rpc. -func DoStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { - pl := newPayload(testpb.PayloadType_COMPRESSABLE, reqSize) - req := &testpb.SimpleRequest{ - ResponseType: pl.Type, - ResponseSize: int32(respSize), - Payload: pl, - } - if err := stream.Send(req); err != nil { - return fmt.Errorf("/BenchmarkService/StreamingCall.Send(_) = %v, want ", err) - } - if _, err := stream.Recv(); err != nil { - // EOF is a valid error here. - if err == io.EOF { - return nil - } - return fmt.Errorf("/BenchmarkService/StreamingCall.Recv(_) = %v, want ", err) - } - return nil -} - -// DoByteBufStreamingRoundTrip performs a round trip for a single streaming rpc, using a custom codec for byte buffer. -func DoByteBufStreamingRoundTrip(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) error { - out := make([]byte, reqSize) - if err := stream.(grpc.ClientStream).SendMsg(&out); err != nil { - return fmt.Errorf("/BenchmarkService/StreamingCall.(ClientStream).SendMsg(_) = %v, want ", err) - } - var in []byte - if err := stream.(grpc.ClientStream).RecvMsg(&in); err != nil { - // EOF is a valid error here. - if err == io.EOF { - return nil - } - return fmt.Errorf("/BenchmarkService/StreamingCall.(ClientStream).RecvMsg(_) = %v, want ", err) - } - return nil -} - -// NewClientConn creates a gRPC client connection to addr. -func NewClientConn(addr string, opts ...grpc.DialOption) *grpc.ClientConn { - opts = append(opts, grpc.WithWriteBufferSize(128*1024)) - opts = append(opts, grpc.WithReadBufferSize(128*1024)) - conn, err := grpc.Dial(addr, opts...) - if err != nil { - grpclog.Fatalf("NewClientConn(%q) failed to create a ClientConn %v", addr, err) - } - return conn -} - -func runUnary(b *testing.B, benchFeatures stats.Features) { - s := stats.AddStats(b, 38) - nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu} - target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1))) - defer stopper() - conn := NewClientConn( - target, grpc.WithInsecure(), - grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) { - return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout) - }), - ) - tc := testpb.NewBenchmarkServiceClient(conn) - - // Warm up connection. - for i := 0; i < 10; i++ { - unaryCaller(tc, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) - } - ch := make(chan int, benchFeatures.MaxConcurrentCalls*4) - var ( - mu sync.Mutex - wg sync.WaitGroup - ) - wg.Add(benchFeatures.MaxConcurrentCalls) - - // Distribute the b.N calls over maxConcurrentCalls workers. - for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ { - go func() { - for range ch { - start := time.Now() - unaryCaller(tc, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) - elapse := time.Since(start) - mu.Lock() - s.Add(elapse) - mu.Unlock() - } - wg.Done() - }() - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - ch <- i - } - close(ch) - wg.Wait() - b.StopTimer() - conn.Close() -} - -func runStream(b *testing.B, benchFeatures stats.Features) { - s := stats.AddStats(b, 38) - nw := &latency.Network{Kbps: benchFeatures.Kbps, Latency: benchFeatures.Latency, MTU: benchFeatures.Mtu} - target, stopper := StartServer(ServerInfo{Addr: "localhost:0", Type: "protobuf", Network: nw}, grpc.MaxConcurrentStreams(uint32(benchFeatures.MaxConcurrentCalls+1))) - defer stopper() - conn := NewClientConn( - target, grpc.WithInsecure(), - grpc.WithDialer(func(address string, timeout time.Duration) (net.Conn, error) { - return nw.TimeoutDialer(net.DialTimeout)("tcp", address, timeout) - }), - ) - tc := testpb.NewBenchmarkServiceClient(conn) - - // Warm up connection. - stream, err := tc.StreamingCall(context.Background()) - if err != nil { - b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) - } - for i := 0; i < 10; i++ { - streamCaller(stream, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) - } - - ch := make(chan struct{}, benchFeatures.MaxConcurrentCalls*4) - var ( - mu sync.Mutex - wg sync.WaitGroup - ) - wg.Add(benchFeatures.MaxConcurrentCalls) - - // Distribute the b.N calls over maxConcurrentCalls workers. - for i := 0; i < benchFeatures.MaxConcurrentCalls; i++ { - stream, err := tc.StreamingCall(context.Background()) - if err != nil { - b.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) - } - go func() { - for range ch { - start := time.Now() - streamCaller(stream, benchFeatures.ReqSizeBytes, benchFeatures.RespSizeBytes) - elapse := time.Since(start) - mu.Lock() - s.Add(elapse) - mu.Unlock() - } - wg.Done() - }() - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - ch <- struct{}{} - } - close(ch) - wg.Wait() - b.StopTimer() - conn.Close() -} -func unaryCaller(client testpb.BenchmarkServiceClient, reqSize, respSize int) { - if err := DoUnaryCall(client, reqSize, respSize); err != nil { - grpclog.Fatalf("DoUnaryCall failed: %v", err) - } -} - -func streamCaller(stream testpb.BenchmarkService_StreamingCallClient, reqSize, respSize int) { - if err := DoStreamingRoundTrip(stream, reqSize, respSize); err != nil { - grpclog.Fatalf("DoStreamingRoundTrip failed: %v", err) - } -} diff --git a/vendor/google.golang.org/grpc/benchmark/benchresult/main.go b/vendor/google.golang.org/grpc/benchmark/benchresult/main.go deleted file mode 100644 index 40226cff1..000000000 --- a/vendor/google.golang.org/grpc/benchmark/benchresult/main.go +++ /dev/null @@ -1,133 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* -To format the benchmark result: - go run benchmark/benchresult/main.go resultfile - -To see the performance change based on a old result: - go run benchmark/benchresult/main.go resultfile_old resultfile -It will print the comparison result of intersection benchmarks between two files. - -*/ -package main - -import ( - "encoding/gob" - "fmt" - "log" - "os" - "strconv" - "strings" - "time" - - "google.golang.org/grpc/benchmark/stats" -) - -func createMap(fileName string, m map[string]stats.BenchResults) { - f, err := os.Open(fileName) - if err != nil { - log.Fatalf("Read file %s error: %s\n", fileName, err) - } - defer f.Close() - var data []stats.BenchResults - decoder := gob.NewDecoder(f) - if err = decoder.Decode(&data); err != nil { - log.Fatalf("Decode file %s error: %s\n", fileName, err) - } - for _, d := range data { - m[d.RunMode+"-"+d.Features.String()] = d - } -} - -func intChange(title string, val1, val2 int64) string { - return fmt.Sprintf("%10s %12s %12s %8.2f%%\n", title, strconv.FormatInt(val1, 10), - strconv.FormatInt(val2, 10), float64(val2-val1)*100/float64(val1)) -} - -func timeChange(title int, val1, val2 time.Duration) string { - return fmt.Sprintf("%10s %12s %12s %8.2f%%\n", strconv.Itoa(title)+" latency", val1.String(), - val2.String(), float64(val2-val1)*100/float64(val1)) -} - -func compareTwoMap(m1, m2 map[string]stats.BenchResults) { - for k2, v2 := range m2 { - if v1, ok := m1[k2]; ok { - changes := k2 + "\n" - changes += fmt.Sprintf("%10s %12s %12s %8s\n", "Title", "Before", "After", "Percentage") - changes += intChange("Bytes/op", v1.AllocedBytesPerOp, v2.AllocedBytesPerOp) - changes += intChange("Allocs/op", v1.AllocsPerOp, v2.AllocsPerOp) - changes += timeChange(v1.Latency[1].Percent, v1.Latency[1].Value, v2.Latency[1].Value) - changes += timeChange(v1.Latency[2].Percent, v1.Latency[2].Value, v2.Latency[2].Value) - fmt.Printf("%s\n", changes) - } - } -} - -func compareBenchmark(file1, file2 string) { - var BenchValueFile1 map[string]stats.BenchResults - var BenchValueFile2 map[string]stats.BenchResults - BenchValueFile1 = make(map[string]stats.BenchResults) - BenchValueFile2 = make(map[string]stats.BenchResults) - - createMap(file1, BenchValueFile1) - createMap(file2, BenchValueFile2) - - compareTwoMap(BenchValueFile1, BenchValueFile2) -} - -func printline(benchName, ltc50, ltc90, allocByte, allocsOp interface{}) { - fmt.Printf("%-80v%12v%12v%12v%12v\n", benchName, ltc50, ltc90, allocByte, allocsOp) -} - -func formatBenchmark(fileName string) { - f, err := os.Open(fileName) - if err != nil { - log.Fatalf("Read file %s error: %s\n", fileName, err) - } - defer f.Close() - var data []stats.BenchResults - decoder := gob.NewDecoder(f) - if err = decoder.Decode(&data); err != nil { - log.Fatalf("Decode file %s error: %s\n", fileName, err) - } - if len(data) == 0 { - log.Fatalf("No data in file %s\n", fileName) - } - printPos := data[0].SharedPosion - fmt.Println("\nShared features:\n" + strings.Repeat("-", 20)) - fmt.Print(stats.PartialPrintString(printPos, data[0].Features, true)) - fmt.Println(strings.Repeat("-", 35)) - for i := 0; i < len(data[0].SharedPosion); i++ { - printPos[i] = !printPos[i] - } - printline("Name", "latency-50", "latency-90", "Alloc (B)", "Alloc (#)") - for _, d := range data { - name := d.RunMode + stats.PartialPrintString(printPos, d.Features, false) - printline(name, d.Latency[1].Value.String(), d.Latency[2].Value.String(), - d.AllocedBytesPerOp, d.AllocsPerOp) - } -} - -func main() { - if len(os.Args) == 2 { - formatBenchmark(os.Args[1]) - } else { - compareBenchmark(os.Args[1], os.Args[2]) - } -} diff --git a/vendor/google.golang.org/grpc/benchmark/client/main.go b/vendor/google.golang.org/grpc/benchmark/client/main.go deleted file mode 100644 index 9aa587e3a..000000000 --- a/vendor/google.golang.org/grpc/benchmark/client/main.go +++ /dev/null @@ -1,180 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "flag" - "math" - "net" - "net/http" - _ "net/http/pprof" - "sync" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/benchmark" - testpb "google.golang.org/grpc/benchmark/grpc_testing" - "google.golang.org/grpc/benchmark/stats" - "google.golang.org/grpc/grpclog" -) - -var ( - server = flag.String("server", "", "The server address") - maxConcurrentRPCs = flag.Int("max_concurrent_rpcs", 1, "The max number of concurrent RPCs") - duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark client") - trace = flag.Bool("trace", true, "Whether tracing is on") - rpcType = flag.Int("rpc_type", 0, - `Configure different client rpc type. Valid options are: - 0 : unary call; - 1 : streaming call.`) -) - -func unaryCaller(client testpb.BenchmarkServiceClient) { - benchmark.DoUnaryCall(client, 1, 1) -} - -func streamCaller(stream testpb.BenchmarkService_StreamingCallClient) { - benchmark.DoStreamingRoundTrip(stream, 1, 1) -} - -func buildConnection() (s *stats.Stats, conn *grpc.ClientConn, tc testpb.BenchmarkServiceClient) { - s = stats.NewStats(256) - conn = benchmark.NewClientConn(*server) - tc = testpb.NewBenchmarkServiceClient(conn) - return s, conn, tc -} - -func closeLoopUnary() { - s, conn, tc := buildConnection() - - for i := 0; i < 100; i++ { - unaryCaller(tc) - } - ch := make(chan int, *maxConcurrentRPCs*4) - var ( - mu sync.Mutex - wg sync.WaitGroup - ) - wg.Add(*maxConcurrentRPCs) - - for i := 0; i < *maxConcurrentRPCs; i++ { - go func() { - for range ch { - start := time.Now() - unaryCaller(tc) - elapse := time.Since(start) - mu.Lock() - s.Add(elapse) - mu.Unlock() - } - wg.Done() - }() - } - // Stop the client when time is up. - done := make(chan struct{}) - go func() { - <-time.After(time.Duration(*duration) * time.Second) - close(done) - }() - ok := true - for ok { - select { - case ch <- 0: - case <-done: - ok = false - } - } - close(ch) - wg.Wait() - conn.Close() - grpclog.Println(s.String()) - -} - -func closeLoopStream() { - s, conn, tc := buildConnection() - ch := make(chan int, *maxConcurrentRPCs*4) - var ( - mu sync.Mutex - wg sync.WaitGroup - ) - wg.Add(*maxConcurrentRPCs) - // Distribute RPCs over maxConcurrentCalls workers. - for i := 0; i < *maxConcurrentRPCs; i++ { - go func() { - stream, err := tc.StreamingCall(context.Background()) - if err != nil { - grpclog.Fatalf("%v.StreamingCall(_) = _, %v", tc, err) - } - // Do some warm up. - for i := 0; i < 100; i++ { - streamCaller(stream) - } - for range ch { - start := time.Now() - streamCaller(stream) - elapse := time.Since(start) - mu.Lock() - s.Add(elapse) - mu.Unlock() - } - wg.Done() - }() - } - // Stop the client when time is up. - done := make(chan struct{}) - go func() { - <-time.After(time.Duration(*duration) * time.Second) - close(done) - }() - ok := true - for ok { - select { - case ch <- 0: - case <-done: - ok = false - } - } - close(ch) - wg.Wait() - conn.Close() - grpclog.Println(s.String()) -} - -func main() { - flag.Parse() - grpc.EnableTracing = *trace - go func() { - lis, err := net.Listen("tcp", ":0") - if err != nil { - grpclog.Fatalf("Failed to listen: %v", err) - } - grpclog.Println("Client profiling address: ", lis.Addr().String()) - if err := http.Serve(lis, nil); err != nil { - grpclog.Fatalf("Failed to serve: %v", err) - } - }() - switch *rpcType { - case 0: - closeLoopUnary() - case 1: - closeLoopStream() - } -} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go deleted file mode 100644 index b88832f75..000000000 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.pb.go +++ /dev/null @@ -1,1194 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: control.proto - -/* -Package grpc_testing is a generated protocol buffer package. - -It is generated from these files: - control.proto - messages.proto - payloads.proto - services.proto - stats.proto - -It has these top-level messages: - PoissonParams - UniformParams - DeterministicParams - ParetoParams - ClosedLoopParams - LoadParams - SecurityParams - ClientConfig - ClientStatus - Mark - ClientArgs - ServerConfig - ServerArgs - ServerStatus - CoreRequest - CoreResponse - Void - Scenario - Scenarios - Payload - EchoStatus - SimpleRequest - SimpleResponse - StreamingInputCallRequest - StreamingInputCallResponse - ResponseParameters - StreamingOutputCallRequest - StreamingOutputCallResponse - ReconnectParams - ReconnectInfo - ByteBufferParams - SimpleProtoParams - ComplexProtoParams - PayloadConfig - ServerStats - HistogramParams - HistogramData - ClientStats -*/ -package grpc_testing - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type ClientType int32 - -const ( - ClientType_SYNC_CLIENT ClientType = 0 - ClientType_ASYNC_CLIENT ClientType = 1 -) - -var ClientType_name = map[int32]string{ - 0: "SYNC_CLIENT", - 1: "ASYNC_CLIENT", -} -var ClientType_value = map[string]int32{ - "SYNC_CLIENT": 0, - "ASYNC_CLIENT": 1, -} - -func (x ClientType) String() string { - return proto.EnumName(ClientType_name, int32(x)) -} -func (ClientType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type ServerType int32 - -const ( - ServerType_SYNC_SERVER ServerType = 0 - ServerType_ASYNC_SERVER ServerType = 1 - ServerType_ASYNC_GENERIC_SERVER ServerType = 2 -) - -var ServerType_name = map[int32]string{ - 0: "SYNC_SERVER", - 1: "ASYNC_SERVER", - 2: "ASYNC_GENERIC_SERVER", -} -var ServerType_value = map[string]int32{ - "SYNC_SERVER": 0, - "ASYNC_SERVER": 1, - "ASYNC_GENERIC_SERVER": 2, -} - -func (x ServerType) String() string { - return proto.EnumName(ServerType_name, int32(x)) -} -func (ServerType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -type RpcType int32 - -const ( - RpcType_UNARY RpcType = 0 - RpcType_STREAMING RpcType = 1 -) - -var RpcType_name = map[int32]string{ - 0: "UNARY", - 1: "STREAMING", -} -var RpcType_value = map[string]int32{ - "UNARY": 0, - "STREAMING": 1, -} - -func (x RpcType) String() string { - return proto.EnumName(RpcType_name, int32(x)) -} -func (RpcType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -// Parameters of poisson process distribution, which is a good representation -// of activity coming in from independent identical stationary sources. -type PoissonParams struct { - // The rate of arrivals (a.k.a. lambda parameter of the exp distribution). - OfferedLoad float64 `protobuf:"fixed64,1,opt,name=offered_load,json=offeredLoad" json:"offered_load,omitempty"` -} - -func (m *PoissonParams) Reset() { *m = PoissonParams{} } -func (m *PoissonParams) String() string { return proto.CompactTextString(m) } -func (*PoissonParams) ProtoMessage() {} -func (*PoissonParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *PoissonParams) GetOfferedLoad() float64 { - if m != nil { - return m.OfferedLoad - } - return 0 -} - -type UniformParams struct { - InterarrivalLo float64 `protobuf:"fixed64,1,opt,name=interarrival_lo,json=interarrivalLo" json:"interarrival_lo,omitempty"` - InterarrivalHi float64 `protobuf:"fixed64,2,opt,name=interarrival_hi,json=interarrivalHi" json:"interarrival_hi,omitempty"` -} - -func (m *UniformParams) Reset() { *m = UniformParams{} } -func (m *UniformParams) String() string { return proto.CompactTextString(m) } -func (*UniformParams) ProtoMessage() {} -func (*UniformParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *UniformParams) GetInterarrivalLo() float64 { - if m != nil { - return m.InterarrivalLo - } - return 0 -} - -func (m *UniformParams) GetInterarrivalHi() float64 { - if m != nil { - return m.InterarrivalHi - } - return 0 -} - -type DeterministicParams struct { - OfferedLoad float64 `protobuf:"fixed64,1,opt,name=offered_load,json=offeredLoad" json:"offered_load,omitempty"` -} - -func (m *DeterministicParams) Reset() { *m = DeterministicParams{} } -func (m *DeterministicParams) String() string { return proto.CompactTextString(m) } -func (*DeterministicParams) ProtoMessage() {} -func (*DeterministicParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *DeterministicParams) GetOfferedLoad() float64 { - if m != nil { - return m.OfferedLoad - } - return 0 -} - -type ParetoParams struct { - InterarrivalBase float64 `protobuf:"fixed64,1,opt,name=interarrival_base,json=interarrivalBase" json:"interarrival_base,omitempty"` - Alpha float64 `protobuf:"fixed64,2,opt,name=alpha" json:"alpha,omitempty"` -} - -func (m *ParetoParams) Reset() { *m = ParetoParams{} } -func (m *ParetoParams) String() string { return proto.CompactTextString(m) } -func (*ParetoParams) ProtoMessage() {} -func (*ParetoParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *ParetoParams) GetInterarrivalBase() float64 { - if m != nil { - return m.InterarrivalBase - } - return 0 -} - -func (m *ParetoParams) GetAlpha() float64 { - if m != nil { - return m.Alpha - } - return 0 -} - -// Once an RPC finishes, immediately start a new one. -// No configuration parameters needed. -type ClosedLoopParams struct { -} - -func (m *ClosedLoopParams) Reset() { *m = ClosedLoopParams{} } -func (m *ClosedLoopParams) String() string { return proto.CompactTextString(m) } -func (*ClosedLoopParams) ProtoMessage() {} -func (*ClosedLoopParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -type LoadParams struct { - // Types that are valid to be assigned to Load: - // *LoadParams_ClosedLoop - // *LoadParams_Poisson - // *LoadParams_Uniform - // *LoadParams_Determ - // *LoadParams_Pareto - Load isLoadParams_Load `protobuf_oneof:"load"` -} - -func (m *LoadParams) Reset() { *m = LoadParams{} } -func (m *LoadParams) String() string { return proto.CompactTextString(m) } -func (*LoadParams) ProtoMessage() {} -func (*LoadParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -type isLoadParams_Load interface { - isLoadParams_Load() -} - -type LoadParams_ClosedLoop struct { - ClosedLoop *ClosedLoopParams `protobuf:"bytes,1,opt,name=closed_loop,json=closedLoop,oneof"` -} -type LoadParams_Poisson struct { - Poisson *PoissonParams `protobuf:"bytes,2,opt,name=poisson,oneof"` -} -type LoadParams_Uniform struct { - Uniform *UniformParams `protobuf:"bytes,3,opt,name=uniform,oneof"` -} -type LoadParams_Determ struct { - Determ *DeterministicParams `protobuf:"bytes,4,opt,name=determ,oneof"` -} -type LoadParams_Pareto struct { - Pareto *ParetoParams `protobuf:"bytes,5,opt,name=pareto,oneof"` -} - -func (*LoadParams_ClosedLoop) isLoadParams_Load() {} -func (*LoadParams_Poisson) isLoadParams_Load() {} -func (*LoadParams_Uniform) isLoadParams_Load() {} -func (*LoadParams_Determ) isLoadParams_Load() {} -func (*LoadParams_Pareto) isLoadParams_Load() {} - -func (m *LoadParams) GetLoad() isLoadParams_Load { - if m != nil { - return m.Load - } - return nil -} - -func (m *LoadParams) GetClosedLoop() *ClosedLoopParams { - if x, ok := m.GetLoad().(*LoadParams_ClosedLoop); ok { - return x.ClosedLoop - } - return nil -} - -func (m *LoadParams) GetPoisson() *PoissonParams { - if x, ok := m.GetLoad().(*LoadParams_Poisson); ok { - return x.Poisson - } - return nil -} - -func (m *LoadParams) GetUniform() *UniformParams { - if x, ok := m.GetLoad().(*LoadParams_Uniform); ok { - return x.Uniform - } - return nil -} - -func (m *LoadParams) GetDeterm() *DeterministicParams { - if x, ok := m.GetLoad().(*LoadParams_Determ); ok { - return x.Determ - } - return nil -} - -func (m *LoadParams) GetPareto() *ParetoParams { - if x, ok := m.GetLoad().(*LoadParams_Pareto); ok { - return x.Pareto - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*LoadParams) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _LoadParams_OneofMarshaler, _LoadParams_OneofUnmarshaler, _LoadParams_OneofSizer, []interface{}{ - (*LoadParams_ClosedLoop)(nil), - (*LoadParams_Poisson)(nil), - (*LoadParams_Uniform)(nil), - (*LoadParams_Determ)(nil), - (*LoadParams_Pareto)(nil), - } -} - -func _LoadParams_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*LoadParams) - // load - switch x := m.Load.(type) { - case *LoadParams_ClosedLoop: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ClosedLoop); err != nil { - return err - } - case *LoadParams_Poisson: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Poisson); err != nil { - return err - } - case *LoadParams_Uniform: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Uniform); err != nil { - return err - } - case *LoadParams_Determ: - b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Determ); err != nil { - return err - } - case *LoadParams_Pareto: - b.EncodeVarint(5<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Pareto); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("LoadParams.Load has unexpected type %T", x) - } - return nil -} - -func _LoadParams_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*LoadParams) - switch tag { - case 1: // load.closed_loop - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ClosedLoopParams) - err := b.DecodeMessage(msg) - m.Load = &LoadParams_ClosedLoop{msg} - return true, err - case 2: // load.poisson - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PoissonParams) - err := b.DecodeMessage(msg) - m.Load = &LoadParams_Poisson{msg} - return true, err - case 3: // load.uniform - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(UniformParams) - err := b.DecodeMessage(msg) - m.Load = &LoadParams_Uniform{msg} - return true, err - case 4: // load.determ - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(DeterministicParams) - err := b.DecodeMessage(msg) - m.Load = &LoadParams_Determ{msg} - return true, err - case 5: // load.pareto - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ParetoParams) - err := b.DecodeMessage(msg) - m.Load = &LoadParams_Pareto{msg} - return true, err - default: - return false, nil - } -} - -func _LoadParams_OneofSizer(msg proto.Message) (n int) { - m := msg.(*LoadParams) - // load - switch x := m.Load.(type) { - case *LoadParams_ClosedLoop: - s := proto.Size(x.ClosedLoop) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *LoadParams_Poisson: - s := proto.Size(x.Poisson) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *LoadParams_Uniform: - s := proto.Size(x.Uniform) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *LoadParams_Determ: - s := proto.Size(x.Determ) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *LoadParams_Pareto: - s := proto.Size(x.Pareto) - n += proto.SizeVarint(5<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// presence of SecurityParams implies use of TLS -type SecurityParams struct { - UseTestCa bool `protobuf:"varint,1,opt,name=use_test_ca,json=useTestCa" json:"use_test_ca,omitempty"` - ServerHostOverride string `protobuf:"bytes,2,opt,name=server_host_override,json=serverHostOverride" json:"server_host_override,omitempty"` -} - -func (m *SecurityParams) Reset() { *m = SecurityParams{} } -func (m *SecurityParams) String() string { return proto.CompactTextString(m) } -func (*SecurityParams) ProtoMessage() {} -func (*SecurityParams) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -func (m *SecurityParams) GetUseTestCa() bool { - if m != nil { - return m.UseTestCa - } - return false -} - -func (m *SecurityParams) GetServerHostOverride() string { - if m != nil { - return m.ServerHostOverride - } - return "" -} - -type ClientConfig struct { - // List of targets to connect to. At least one target needs to be specified. - ServerTargets []string `protobuf:"bytes,1,rep,name=server_targets,json=serverTargets" json:"server_targets,omitempty"` - ClientType ClientType `protobuf:"varint,2,opt,name=client_type,json=clientType,enum=grpc.testing.ClientType" json:"client_type,omitempty"` - SecurityParams *SecurityParams `protobuf:"bytes,3,opt,name=security_params,json=securityParams" json:"security_params,omitempty"` - // How many concurrent RPCs to start for each channel. - // For synchronous client, use a separate thread for each outstanding RPC. - OutstandingRpcsPerChannel int32 `protobuf:"varint,4,opt,name=outstanding_rpcs_per_channel,json=outstandingRpcsPerChannel" json:"outstanding_rpcs_per_channel,omitempty"` - // Number of independent client channels to create. - // i-th channel will connect to server_target[i % server_targets.size()] - ClientChannels int32 `protobuf:"varint,5,opt,name=client_channels,json=clientChannels" json:"client_channels,omitempty"` - // Only for async client. Number of threads to use to start/manage RPCs. - AsyncClientThreads int32 `protobuf:"varint,7,opt,name=async_client_threads,json=asyncClientThreads" json:"async_client_threads,omitempty"` - RpcType RpcType `protobuf:"varint,8,opt,name=rpc_type,json=rpcType,enum=grpc.testing.RpcType" json:"rpc_type,omitempty"` - // The requested load for the entire client (aggregated over all the threads). - LoadParams *LoadParams `protobuf:"bytes,10,opt,name=load_params,json=loadParams" json:"load_params,omitempty"` - PayloadConfig *PayloadConfig `protobuf:"bytes,11,opt,name=payload_config,json=payloadConfig" json:"payload_config,omitempty"` - HistogramParams *HistogramParams `protobuf:"bytes,12,opt,name=histogram_params,json=histogramParams" json:"histogram_params,omitempty"` - // Specify the cores we should run the client on, if desired - CoreList []int32 `protobuf:"varint,13,rep,packed,name=core_list,json=coreList" json:"core_list,omitempty"` - CoreLimit int32 `protobuf:"varint,14,opt,name=core_limit,json=coreLimit" json:"core_limit,omitempty"` -} - -func (m *ClientConfig) Reset() { *m = ClientConfig{} } -func (m *ClientConfig) String() string { return proto.CompactTextString(m) } -func (*ClientConfig) ProtoMessage() {} -func (*ClientConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *ClientConfig) GetServerTargets() []string { - if m != nil { - return m.ServerTargets - } - return nil -} - -func (m *ClientConfig) GetClientType() ClientType { - if m != nil { - return m.ClientType - } - return ClientType_SYNC_CLIENT -} - -func (m *ClientConfig) GetSecurityParams() *SecurityParams { - if m != nil { - return m.SecurityParams - } - return nil -} - -func (m *ClientConfig) GetOutstandingRpcsPerChannel() int32 { - if m != nil { - return m.OutstandingRpcsPerChannel - } - return 0 -} - -func (m *ClientConfig) GetClientChannels() int32 { - if m != nil { - return m.ClientChannels - } - return 0 -} - -func (m *ClientConfig) GetAsyncClientThreads() int32 { - if m != nil { - return m.AsyncClientThreads - } - return 0 -} - -func (m *ClientConfig) GetRpcType() RpcType { - if m != nil { - return m.RpcType - } - return RpcType_UNARY -} - -func (m *ClientConfig) GetLoadParams() *LoadParams { - if m != nil { - return m.LoadParams - } - return nil -} - -func (m *ClientConfig) GetPayloadConfig() *PayloadConfig { - if m != nil { - return m.PayloadConfig - } - return nil -} - -func (m *ClientConfig) GetHistogramParams() *HistogramParams { - if m != nil { - return m.HistogramParams - } - return nil -} - -func (m *ClientConfig) GetCoreList() []int32 { - if m != nil { - return m.CoreList - } - return nil -} - -func (m *ClientConfig) GetCoreLimit() int32 { - if m != nil { - return m.CoreLimit - } - return 0 -} - -type ClientStatus struct { - Stats *ClientStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` -} - -func (m *ClientStatus) Reset() { *m = ClientStatus{} } -func (m *ClientStatus) String() string { return proto.CompactTextString(m) } -func (*ClientStatus) ProtoMessage() {} -func (*ClientStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *ClientStatus) GetStats() *ClientStats { - if m != nil { - return m.Stats - } - return nil -} - -// Request current stats -type Mark struct { - // if true, the stats will be reset after taking their snapshot. - Reset_ bool `protobuf:"varint,1,opt,name=reset" json:"reset,omitempty"` -} - -func (m *Mark) Reset() { *m = Mark{} } -func (m *Mark) String() string { return proto.CompactTextString(m) } -func (*Mark) ProtoMessage() {} -func (*Mark) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -func (m *Mark) GetReset_() bool { - if m != nil { - return m.Reset_ - } - return false -} - -type ClientArgs struct { - // Types that are valid to be assigned to Argtype: - // *ClientArgs_Setup - // *ClientArgs_Mark - Argtype isClientArgs_Argtype `protobuf_oneof:"argtype"` -} - -func (m *ClientArgs) Reset() { *m = ClientArgs{} } -func (m *ClientArgs) String() string { return proto.CompactTextString(m) } -func (*ClientArgs) ProtoMessage() {} -func (*ClientArgs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } - -type isClientArgs_Argtype interface { - isClientArgs_Argtype() -} - -type ClientArgs_Setup struct { - Setup *ClientConfig `protobuf:"bytes,1,opt,name=setup,oneof"` -} -type ClientArgs_Mark struct { - Mark *Mark `protobuf:"bytes,2,opt,name=mark,oneof"` -} - -func (*ClientArgs_Setup) isClientArgs_Argtype() {} -func (*ClientArgs_Mark) isClientArgs_Argtype() {} - -func (m *ClientArgs) GetArgtype() isClientArgs_Argtype { - if m != nil { - return m.Argtype - } - return nil -} - -func (m *ClientArgs) GetSetup() *ClientConfig { - if x, ok := m.GetArgtype().(*ClientArgs_Setup); ok { - return x.Setup - } - return nil -} - -func (m *ClientArgs) GetMark() *Mark { - if x, ok := m.GetArgtype().(*ClientArgs_Mark); ok { - return x.Mark - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ClientArgs) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ClientArgs_OneofMarshaler, _ClientArgs_OneofUnmarshaler, _ClientArgs_OneofSizer, []interface{}{ - (*ClientArgs_Setup)(nil), - (*ClientArgs_Mark)(nil), - } -} - -func _ClientArgs_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ClientArgs) - // argtype - switch x := m.Argtype.(type) { - case *ClientArgs_Setup: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Setup); err != nil { - return err - } - case *ClientArgs_Mark: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Mark); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ClientArgs.Argtype has unexpected type %T", x) - } - return nil -} - -func _ClientArgs_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ClientArgs) - switch tag { - case 1: // argtype.setup - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ClientConfig) - err := b.DecodeMessage(msg) - m.Argtype = &ClientArgs_Setup{msg} - return true, err - case 2: // argtype.mark - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Mark) - err := b.DecodeMessage(msg) - m.Argtype = &ClientArgs_Mark{msg} - return true, err - default: - return false, nil - } -} - -func _ClientArgs_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ClientArgs) - // argtype - switch x := m.Argtype.(type) { - case *ClientArgs_Setup: - s := proto.Size(x.Setup) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ClientArgs_Mark: - s := proto.Size(x.Mark) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type ServerConfig struct { - ServerType ServerType `protobuf:"varint,1,opt,name=server_type,json=serverType,enum=grpc.testing.ServerType" json:"server_type,omitempty"` - SecurityParams *SecurityParams `protobuf:"bytes,2,opt,name=security_params,json=securityParams" json:"security_params,omitempty"` - // Port on which to listen. Zero means pick unused port. - Port int32 `protobuf:"varint,4,opt,name=port" json:"port,omitempty"` - // Only for async server. Number of threads used to serve the requests. - AsyncServerThreads int32 `protobuf:"varint,7,opt,name=async_server_threads,json=asyncServerThreads" json:"async_server_threads,omitempty"` - // Specify the number of cores to limit server to, if desired - CoreLimit int32 `protobuf:"varint,8,opt,name=core_limit,json=coreLimit" json:"core_limit,omitempty"` - // payload config, used in generic server - PayloadConfig *PayloadConfig `protobuf:"bytes,9,opt,name=payload_config,json=payloadConfig" json:"payload_config,omitempty"` - // Specify the cores we should run the server on, if desired - CoreList []int32 `protobuf:"varint,10,rep,packed,name=core_list,json=coreList" json:"core_list,omitempty"` -} - -func (m *ServerConfig) Reset() { *m = ServerConfig{} } -func (m *ServerConfig) String() string { return proto.CompactTextString(m) } -func (*ServerConfig) ProtoMessage() {} -func (*ServerConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } - -func (m *ServerConfig) GetServerType() ServerType { - if m != nil { - return m.ServerType - } - return ServerType_SYNC_SERVER -} - -func (m *ServerConfig) GetSecurityParams() *SecurityParams { - if m != nil { - return m.SecurityParams - } - return nil -} - -func (m *ServerConfig) GetPort() int32 { - if m != nil { - return m.Port - } - return 0 -} - -func (m *ServerConfig) GetAsyncServerThreads() int32 { - if m != nil { - return m.AsyncServerThreads - } - return 0 -} - -func (m *ServerConfig) GetCoreLimit() int32 { - if m != nil { - return m.CoreLimit - } - return 0 -} - -func (m *ServerConfig) GetPayloadConfig() *PayloadConfig { - if m != nil { - return m.PayloadConfig - } - return nil -} - -func (m *ServerConfig) GetCoreList() []int32 { - if m != nil { - return m.CoreList - } - return nil -} - -type ServerArgs struct { - // Types that are valid to be assigned to Argtype: - // *ServerArgs_Setup - // *ServerArgs_Mark - Argtype isServerArgs_Argtype `protobuf_oneof:"argtype"` -} - -func (m *ServerArgs) Reset() { *m = ServerArgs{} } -func (m *ServerArgs) String() string { return proto.CompactTextString(m) } -func (*ServerArgs) ProtoMessage() {} -func (*ServerArgs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -type isServerArgs_Argtype interface { - isServerArgs_Argtype() -} - -type ServerArgs_Setup struct { - Setup *ServerConfig `protobuf:"bytes,1,opt,name=setup,oneof"` -} -type ServerArgs_Mark struct { - Mark *Mark `protobuf:"bytes,2,opt,name=mark,oneof"` -} - -func (*ServerArgs_Setup) isServerArgs_Argtype() {} -func (*ServerArgs_Mark) isServerArgs_Argtype() {} - -func (m *ServerArgs) GetArgtype() isServerArgs_Argtype { - if m != nil { - return m.Argtype - } - return nil -} - -func (m *ServerArgs) GetSetup() *ServerConfig { - if x, ok := m.GetArgtype().(*ServerArgs_Setup); ok { - return x.Setup - } - return nil -} - -func (m *ServerArgs) GetMark() *Mark { - if x, ok := m.GetArgtype().(*ServerArgs_Mark); ok { - return x.Mark - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ServerArgs) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ServerArgs_OneofMarshaler, _ServerArgs_OneofUnmarshaler, _ServerArgs_OneofSizer, []interface{}{ - (*ServerArgs_Setup)(nil), - (*ServerArgs_Mark)(nil), - } -} - -func _ServerArgs_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ServerArgs) - // argtype - switch x := m.Argtype.(type) { - case *ServerArgs_Setup: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Setup); err != nil { - return err - } - case *ServerArgs_Mark: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Mark); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ServerArgs.Argtype has unexpected type %T", x) - } - return nil -} - -func _ServerArgs_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ServerArgs) - switch tag { - case 1: // argtype.setup - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ServerConfig) - err := b.DecodeMessage(msg) - m.Argtype = &ServerArgs_Setup{msg} - return true, err - case 2: // argtype.mark - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Mark) - err := b.DecodeMessage(msg) - m.Argtype = &ServerArgs_Mark{msg} - return true, err - default: - return false, nil - } -} - -func _ServerArgs_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ServerArgs) - // argtype - switch x := m.Argtype.(type) { - case *ServerArgs_Setup: - s := proto.Size(x.Setup) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ServerArgs_Mark: - s := proto.Size(x.Mark) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type ServerStatus struct { - Stats *ServerStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"` - // the port bound by the server - Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` - // Number of cores available to the server - Cores int32 `protobuf:"varint,3,opt,name=cores" json:"cores,omitempty"` -} - -func (m *ServerStatus) Reset() { *m = ServerStatus{} } -func (m *ServerStatus) String() string { return proto.CompactTextString(m) } -func (*ServerStatus) ProtoMessage() {} -func (*ServerStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } - -func (m *ServerStatus) GetStats() *ServerStats { - if m != nil { - return m.Stats - } - return nil -} - -func (m *ServerStatus) GetPort() int32 { - if m != nil { - return m.Port - } - return 0 -} - -func (m *ServerStatus) GetCores() int32 { - if m != nil { - return m.Cores - } - return 0 -} - -type CoreRequest struct { -} - -func (m *CoreRequest) Reset() { *m = CoreRequest{} } -func (m *CoreRequest) String() string { return proto.CompactTextString(m) } -func (*CoreRequest) ProtoMessage() {} -func (*CoreRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } - -type CoreResponse struct { - // Number of cores available on the server - Cores int32 `protobuf:"varint,1,opt,name=cores" json:"cores,omitempty"` -} - -func (m *CoreResponse) Reset() { *m = CoreResponse{} } -func (m *CoreResponse) String() string { return proto.CompactTextString(m) } -func (*CoreResponse) ProtoMessage() {} -func (*CoreResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } - -func (m *CoreResponse) GetCores() int32 { - if m != nil { - return m.Cores - } - return 0 -} - -type Void struct { -} - -func (m *Void) Reset() { *m = Void{} } -func (m *Void) String() string { return proto.CompactTextString(m) } -func (*Void) ProtoMessage() {} -func (*Void) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -// A single performance scenario: input to qps_json_driver -type Scenario struct { - // Human readable name for this scenario - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Client configuration - ClientConfig *ClientConfig `protobuf:"bytes,2,opt,name=client_config,json=clientConfig" json:"client_config,omitempty"` - // Number of clients to start for the test - NumClients int32 `protobuf:"varint,3,opt,name=num_clients,json=numClients" json:"num_clients,omitempty"` - // Server configuration - ServerConfig *ServerConfig `protobuf:"bytes,4,opt,name=server_config,json=serverConfig" json:"server_config,omitempty"` - // Number of servers to start for the test - NumServers int32 `protobuf:"varint,5,opt,name=num_servers,json=numServers" json:"num_servers,omitempty"` - // Warmup period, in seconds - WarmupSeconds int32 `protobuf:"varint,6,opt,name=warmup_seconds,json=warmupSeconds" json:"warmup_seconds,omitempty"` - // Benchmark time, in seconds - BenchmarkSeconds int32 `protobuf:"varint,7,opt,name=benchmark_seconds,json=benchmarkSeconds" json:"benchmark_seconds,omitempty"` - // Number of workers to spawn locally (usually zero) - SpawnLocalWorkerCount int32 `protobuf:"varint,8,opt,name=spawn_local_worker_count,json=spawnLocalWorkerCount" json:"spawn_local_worker_count,omitempty"` -} - -func (m *Scenario) Reset() { *m = Scenario{} } -func (m *Scenario) String() string { return proto.CompactTextString(m) } -func (*Scenario) ProtoMessage() {} -func (*Scenario) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -func (m *Scenario) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Scenario) GetClientConfig() *ClientConfig { - if m != nil { - return m.ClientConfig - } - return nil -} - -func (m *Scenario) GetNumClients() int32 { - if m != nil { - return m.NumClients - } - return 0 -} - -func (m *Scenario) GetServerConfig() *ServerConfig { - if m != nil { - return m.ServerConfig - } - return nil -} - -func (m *Scenario) GetNumServers() int32 { - if m != nil { - return m.NumServers - } - return 0 -} - -func (m *Scenario) GetWarmupSeconds() int32 { - if m != nil { - return m.WarmupSeconds - } - return 0 -} - -func (m *Scenario) GetBenchmarkSeconds() int32 { - if m != nil { - return m.BenchmarkSeconds - } - return 0 -} - -func (m *Scenario) GetSpawnLocalWorkerCount() int32 { - if m != nil { - return m.SpawnLocalWorkerCount - } - return 0 -} - -// A set of scenarios to be run with qps_json_driver -type Scenarios struct { - Scenarios []*Scenario `protobuf:"bytes,1,rep,name=scenarios" json:"scenarios,omitempty"` -} - -func (m *Scenarios) Reset() { *m = Scenarios{} } -func (m *Scenarios) String() string { return proto.CompactTextString(m) } -func (*Scenarios) ProtoMessage() {} -func (*Scenarios) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } - -func (m *Scenarios) GetScenarios() []*Scenario { - if m != nil { - return m.Scenarios - } - return nil -} - -func init() { - proto.RegisterType((*PoissonParams)(nil), "grpc.testing.PoissonParams") - proto.RegisterType((*UniformParams)(nil), "grpc.testing.UniformParams") - proto.RegisterType((*DeterministicParams)(nil), "grpc.testing.DeterministicParams") - proto.RegisterType((*ParetoParams)(nil), "grpc.testing.ParetoParams") - proto.RegisterType((*ClosedLoopParams)(nil), "grpc.testing.ClosedLoopParams") - proto.RegisterType((*LoadParams)(nil), "grpc.testing.LoadParams") - proto.RegisterType((*SecurityParams)(nil), "grpc.testing.SecurityParams") - proto.RegisterType((*ClientConfig)(nil), "grpc.testing.ClientConfig") - proto.RegisterType((*ClientStatus)(nil), "grpc.testing.ClientStatus") - proto.RegisterType((*Mark)(nil), "grpc.testing.Mark") - proto.RegisterType((*ClientArgs)(nil), "grpc.testing.ClientArgs") - proto.RegisterType((*ServerConfig)(nil), "grpc.testing.ServerConfig") - proto.RegisterType((*ServerArgs)(nil), "grpc.testing.ServerArgs") - proto.RegisterType((*ServerStatus)(nil), "grpc.testing.ServerStatus") - proto.RegisterType((*CoreRequest)(nil), "grpc.testing.CoreRequest") - proto.RegisterType((*CoreResponse)(nil), "grpc.testing.CoreResponse") - proto.RegisterType((*Void)(nil), "grpc.testing.Void") - proto.RegisterType((*Scenario)(nil), "grpc.testing.Scenario") - proto.RegisterType((*Scenarios)(nil), "grpc.testing.Scenarios") - proto.RegisterEnum("grpc.testing.ClientType", ClientType_name, ClientType_value) - proto.RegisterEnum("grpc.testing.ServerType", ServerType_name, ServerType_value) - proto.RegisterEnum("grpc.testing.RpcType", RpcType_name, RpcType_value) -} - -func init() { proto.RegisterFile("control.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 1179 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0x6f, 0x6f, 0xdb, 0xb6, - 0x13, 0xb6, 0x1d, 0xdb, 0xb1, 0x4e, 0xb6, 0xe3, 0x1f, 0x7f, 0xe9, 0xa0, 0xa6, 0x69, 0x97, 0x6a, - 0x1b, 0x16, 0x64, 0x40, 0x5a, 0x78, 0x05, 0xba, 0x62, 0x2f, 0x02, 0xc7, 0x33, 0xea, 0x00, 0x69, - 0x96, 0xd1, 0x69, 0x87, 0xbe, 0x12, 0x18, 0x99, 0xb1, 0x85, 0xc8, 0xa2, 0x46, 0x52, 0x09, 0xf2, - 0x15, 0xf6, 0x99, 0xf6, 0x39, 0xf6, 0x35, 0xf6, 0x15, 0x06, 0xfe, 0x91, 0x23, 0xb9, 0x06, 0x9a, - 0x6d, 0xef, 0xc4, 0xbb, 0xe7, 0xe1, 0x91, 0xf7, 0xdc, 0x1d, 0x05, 0x9d, 0x90, 0x25, 0x92, 0xb3, - 0xf8, 0x30, 0xe5, 0x4c, 0x32, 0xd4, 0x9e, 0xf1, 0x34, 0x3c, 0x94, 0x54, 0xc8, 0x28, 0x99, 0xed, - 0x74, 0x53, 0x72, 0x17, 0x33, 0x32, 0x15, 0xc6, 0xbb, 0xe3, 0x0a, 0x49, 0xa4, 0x5d, 0xf8, 0x7d, - 0xe8, 0x9c, 0xb3, 0x48, 0x08, 0x96, 0x9c, 0x13, 0x4e, 0x16, 0x02, 0x3d, 0x87, 0x36, 0xbb, 0xba, - 0xa2, 0x9c, 0x4e, 0x03, 0x45, 0xf2, 0xaa, 0x7b, 0xd5, 0xfd, 0x2a, 0x76, 0xad, 0xed, 0x94, 0x91, - 0xa9, 0x4f, 0xa0, 0xf3, 0x3e, 0x89, 0xae, 0x18, 0x5f, 0x58, 0xce, 0xb7, 0xb0, 0x15, 0x25, 0x92, - 0x72, 0xc2, 0x79, 0x74, 0x43, 0xe2, 0x20, 0x66, 0x96, 0xd6, 0x2d, 0x9a, 0x4f, 0xd9, 0x27, 0xc0, - 0x79, 0xe4, 0xd5, 0x3e, 0x05, 0x8e, 0x23, 0xff, 0x07, 0xf8, 0xff, 0x4f, 0x54, 0x52, 0xbe, 0x88, - 0x92, 0x48, 0xc8, 0x28, 0x7c, 0xf8, 0xe1, 0x7e, 0x81, 0xf6, 0x39, 0xe1, 0x54, 0x32, 0x4b, 0xf9, - 0x0e, 0xfe, 0x57, 0x0a, 0x79, 0x49, 0x04, 0xb5, 0xbc, 0x5e, 0xd1, 0x71, 0x4c, 0x04, 0x45, 0xdb, - 0xd0, 0x20, 0x71, 0x3a, 0x27, 0xf6, 0x54, 0x66, 0xe1, 0x23, 0xe8, 0x0d, 0x63, 0x26, 0x54, 0x00, - 0x96, 0x9a, 0x6d, 0xfd, 0x3f, 0x6a, 0x00, 0x2a, 0x9e, 0x8d, 0x32, 0x00, 0x37, 0xd4, 0x90, 0x20, - 0x66, 0x2c, 0xd5, 0xfb, 0xbb, 0xfd, 0x67, 0x87, 0x45, 0x1d, 0x0e, 0x57, 0xf7, 0x18, 0x57, 0x30, - 0x84, 0x4b, 0x1b, 0x7a, 0x0d, 0x9b, 0xa9, 0x51, 0x42, 0x47, 0x77, 0xfb, 0x4f, 0xca, 0xf4, 0x92, - 0x4c, 0xe3, 0x0a, 0xce, 0xd1, 0x8a, 0x98, 0x19, 0x39, 0xbc, 0x8d, 0x75, 0xc4, 0x92, 0x56, 0x8a, - 0x68, 0xd1, 0xe8, 0x47, 0x68, 0x4e, 0x75, 0x92, 0xbd, 0xba, 0xe6, 0x3d, 0x2f, 0xf3, 0xd6, 0x08, - 0x30, 0xae, 0x60, 0x4b, 0x41, 0xaf, 0xa0, 0x99, 0xea, 0x3c, 0x7b, 0x0d, 0x4d, 0xde, 0x59, 0x39, - 0x6d, 0x41, 0x03, 0xc5, 0x32, 0xd8, 0xe3, 0x26, 0xd4, 0x95, 0x70, 0xfe, 0x25, 0x74, 0x27, 0x34, - 0xcc, 0x78, 0x24, 0xef, 0x6c, 0x06, 0x9f, 0x81, 0x9b, 0x09, 0x1a, 0x28, 0x7e, 0x10, 0x12, 0x9d, - 0xc1, 0x16, 0x76, 0x32, 0x41, 0x2f, 0xa8, 0x90, 0x43, 0x82, 0x5e, 0xc2, 0xb6, 0xa0, 0xfc, 0x86, - 0xf2, 0x60, 0xce, 0x84, 0x0c, 0xd8, 0x0d, 0xe5, 0x3c, 0x9a, 0x52, 0x9d, 0x2b, 0x07, 0x23, 0xe3, - 0x1b, 0x33, 0x21, 0x7f, 0xb6, 0x1e, 0xff, 0xf7, 0x06, 0xb4, 0x87, 0x71, 0x44, 0x13, 0x39, 0x64, - 0xc9, 0x55, 0x34, 0x43, 0xdf, 0x40, 0xd7, 0x6e, 0x21, 0x09, 0x9f, 0x51, 0x29, 0xbc, 0xea, 0xde, - 0xc6, 0xbe, 0x83, 0x3b, 0xc6, 0x7a, 0x61, 0x8c, 0xe8, 0x8d, 0xd2, 0x52, 0xd1, 0x02, 0x79, 0x97, - 0x9a, 0x00, 0xdd, 0xbe, 0xb7, 0xaa, 0xa5, 0x02, 0x5c, 0xdc, 0xa5, 0x54, 0x69, 0x98, 0x7f, 0xa3, - 0x11, 0x6c, 0x09, 0x7b, 0xad, 0x20, 0xd5, 0xf7, 0xb2, 0x92, 0xec, 0x96, 0xe9, 0xe5, 0xbb, 0xe3, - 0xae, 0x28, 0xe7, 0xe2, 0x08, 0x76, 0x59, 0x26, 0x85, 0x24, 0xc9, 0x34, 0x4a, 0x66, 0x01, 0x4f, - 0x43, 0x11, 0xa4, 0x94, 0x07, 0xe1, 0x9c, 0x24, 0x09, 0x8d, 0xb5, 0x5c, 0x0d, 0xfc, 0xb8, 0x80, - 0xc1, 0x69, 0x28, 0xce, 0x29, 0x1f, 0x1a, 0x80, 0xea, 0x33, 0x7b, 0x05, 0x4b, 0x11, 0x5a, 0xa5, - 0x06, 0xee, 0x1a, 0xb3, 0xc5, 0x09, 0x95, 0x55, 0x22, 0xee, 0x92, 0x30, 0xc8, 0x6f, 0x3c, 0xe7, - 0x94, 0x4c, 0x85, 0xb7, 0xa9, 0xd1, 0x48, 0xfb, 0xec, 0x5d, 0x8d, 0x07, 0xbd, 0x84, 0x16, 0x4f, - 0x43, 0x93, 0x9a, 0x96, 0x4e, 0xcd, 0xa3, 0xf2, 0xdd, 0x70, 0x1a, 0xea, 0xbc, 0x6c, 0x72, 0xf3, - 0xa1, 0xf2, 0xa9, 0x34, 0xcf, 0x13, 0x02, 0x3a, 0x21, 0x2b, 0xf9, 0xbc, 0x6f, 0x25, 0x0c, 0xf1, - 0x7d, 0x5b, 0x1d, 0x43, 0x3e, 0xbc, 0x82, 0x50, 0x6b, 0xe8, 0xb9, 0x6b, 0x5b, 0xc3, 0x60, 0x8c, - 0xcc, 0xb8, 0x93, 0x16, 0x97, 0x68, 0x0c, 0xbd, 0x79, 0x24, 0x24, 0x9b, 0x71, 0xb2, 0xc8, 0xcf, - 0xd0, 0xd6, 0xbb, 0x3c, 0x2d, 0xef, 0x32, 0xce, 0x51, 0xf6, 0x20, 0x5b, 0xf3, 0xb2, 0x01, 0x3d, - 0x01, 0x27, 0x64, 0x9c, 0x06, 0x71, 0x24, 0xa4, 0xd7, 0xd9, 0xdb, 0xd8, 0x6f, 0xe0, 0x96, 0x32, - 0x9c, 0x46, 0x42, 0xa2, 0xa7, 0x00, 0xd6, 0xb9, 0x88, 0xa4, 0xd7, 0xd5, 0xf9, 0x73, 0x8c, 0x77, - 0x11, 0x49, 0xff, 0x28, 0xaf, 0xc5, 0x89, 0x24, 0x32, 0x13, 0xe8, 0x05, 0x34, 0xf4, 0x18, 0xb6, - 0xa3, 0xe2, 0xf1, 0xba, 0xf2, 0x52, 0x50, 0x81, 0x0d, 0xce, 0xdf, 0x85, 0xfa, 0x3b, 0xc2, 0xaf, - 0xd5, 0x88, 0xe2, 0x54, 0x50, 0x69, 0x3b, 0xc4, 0x2c, 0xfc, 0x0c, 0xc0, 0x70, 0x06, 0x7c, 0x26, - 0x50, 0x1f, 0x1a, 0x82, 0xca, 0x2c, 0x9f, 0x43, 0x3b, 0xeb, 0x36, 0x37, 0xd9, 0x19, 0x57, 0xb0, - 0x81, 0xa2, 0x7d, 0xa8, 0x2f, 0x08, 0xbf, 0xb6, 0xb3, 0x07, 0x95, 0x29, 0x2a, 0xf2, 0xb8, 0x82, - 0x35, 0xe2, 0xd8, 0x81, 0x4d, 0xc2, 0x67, 0xaa, 0x00, 0xfc, 0x3f, 0x6b, 0xd0, 0x9e, 0xe8, 0xe6, - 0xb1, 0xc9, 0x7e, 0x03, 0x6e, 0xde, 0x62, 0xaa, 0x40, 0xaa, 0xeb, 0x7a, 0xc7, 0x10, 0x4c, 0xef, - 0x88, 0xe5, 0xf7, 0xba, 0xde, 0xa9, 0xfd, 0x8b, 0xde, 0x41, 0x50, 0x4f, 0x19, 0x97, 0xb6, 0x47, - 0xf4, 0xf7, 0x7d, 0x95, 0xe7, 0x67, 0x5b, 0x53, 0xe5, 0xf6, 0x54, 0xb6, 0xca, 0xcb, 0x6a, 0xb6, - 0x56, 0xd4, 0x5c, 0x53, 0x97, 0xce, 0x3f, 0xae, 0xcb, 0x52, 0x35, 0x41, 0xb9, 0x9a, 0x94, 0x9e, - 0xe6, 0x40, 0x0f, 0xd0, 0xb3, 0x28, 0xc0, 0x7f, 0xd4, 0x33, 0xca, 0xe5, 0x7c, 0x50, 0x95, 0xde, - 0x43, 0xf3, 0x2a, 0x5d, 0x66, 0xbf, 0x56, 0xc8, 0xfe, 0x36, 0x34, 0xd4, 0xbd, 0xcc, 0x28, 0x6c, - 0x60, 0xb3, 0xf0, 0x3b, 0xe0, 0x0e, 0x19, 0xa7, 0x98, 0xfe, 0x96, 0x51, 0x21, 0xfd, 0xaf, 0xa1, - 0x6d, 0x96, 0x22, 0x65, 0x89, 0x79, 0x89, 0x0d, 0xa9, 0x5a, 0x24, 0x35, 0xa1, 0xfe, 0x81, 0x45, - 0x53, 0xff, 0xaf, 0x1a, 0xb4, 0x26, 0x21, 0x4d, 0x08, 0x8f, 0x98, 0x8a, 0x99, 0x90, 0x85, 0x29, - 0x36, 0x07, 0xeb, 0x6f, 0x74, 0x04, 0x9d, 0x7c, 0x00, 0x1a, 0x7d, 0x6a, 0x9f, 0xeb, 0x04, 0xdc, - 0x0e, 0x8b, 0x6f, 0xc5, 0x97, 0xe0, 0x26, 0xd9, 0xc2, 0x8e, 0xc5, 0xfc, 0xe8, 0x90, 0x64, 0x0b, - 0xc3, 0x51, 0x33, 0xda, 0x3e, 0x1b, 0x79, 0x84, 0xfa, 0xe7, 0xb4, 0xc1, 0x6d, 0x51, 0x6c, 0x15, - 0x1b, 0xc1, 0xd8, 0xf2, 0xf9, 0xac, 0x22, 0x18, 0x8e, 0x50, 0xcf, 0xd5, 0x2d, 0xe1, 0x8b, 0x2c, - 0x0d, 0x04, 0x0d, 0x59, 0x32, 0x15, 0x5e, 0x53, 0x63, 0x3a, 0xc6, 0x3a, 0x31, 0x46, 0xf5, 0x83, - 0x73, 0x49, 0x93, 0x70, 0xae, 0xb4, 0x5c, 0x22, 0x4d, 0x65, 0xf7, 0x96, 0x8e, 0x1c, 0xfc, 0x1a, - 0x3c, 0x91, 0x92, 0xdb, 0x24, 0x88, 0x59, 0x48, 0xe2, 0xe0, 0x96, 0xf1, 0x6b, 0x7d, 0x83, 0x2c, - 0xc9, 0xab, 0xfc, 0x91, 0xf6, 0x9f, 0x2a, 0xf7, 0xaf, 0xda, 0x3b, 0x54, 0x4e, 0x7f, 0x00, 0x4e, - 0x9e, 0x70, 0x81, 0x5e, 0x81, 0x23, 0xf2, 0x85, 0x7e, 0x43, 0xdd, 0xfe, 0x17, 0x2b, 0xf7, 0xb6, - 0x6e, 0x7c, 0x0f, 0x3c, 0x78, 0x91, 0xcf, 0x28, 0xdd, 0xee, 0x5b, 0xe0, 0x4e, 0x3e, 0x9e, 0x0d, - 0x83, 0xe1, 0xe9, 0xc9, 0xe8, 0xec, 0xa2, 0x57, 0x41, 0x3d, 0x68, 0x0f, 0x8a, 0x96, 0xea, 0xc1, - 0x49, 0xde, 0x04, 0x25, 0xc2, 0x64, 0x84, 0x3f, 0x8c, 0x70, 0x91, 0x60, 0x2d, 0x55, 0xe4, 0xc1, - 0xb6, 0xb1, 0xbc, 0x1d, 0x9d, 0x8d, 0xf0, 0xc9, 0xd2, 0x53, 0x3b, 0xf8, 0x0a, 0x36, 0xed, 0xbb, - 0x84, 0x1c, 0x68, 0xbc, 0x3f, 0x1b, 0xe0, 0x8f, 0xbd, 0x0a, 0xea, 0x80, 0x33, 0xb9, 0xc0, 0xa3, - 0xc1, 0xbb, 0x93, 0xb3, 0xb7, 0xbd, 0xea, 0x65, 0x53, 0xff, 0x12, 0x7f, 0xff, 0x77, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x75, 0x59, 0xf4, 0x03, 0x4e, 0x0b, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto deleted file mode 100644 index 9379ef49a..000000000 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/control.proto +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -import "payloads.proto"; -import "stats.proto"; - -package grpc.testing; - -enum ClientType { - SYNC_CLIENT = 0; - ASYNC_CLIENT = 1; -} - -enum ServerType { - SYNC_SERVER = 0; - ASYNC_SERVER = 1; - ASYNC_GENERIC_SERVER = 2; -} - -enum RpcType { - UNARY = 0; - STREAMING = 1; -} - -// Parameters of poisson process distribution, which is a good representation -// of activity coming in from independent identical stationary sources. -message PoissonParams { - // The rate of arrivals (a.k.a. lambda parameter of the exp distribution). - double offered_load = 1; -} - -message UniformParams { - double interarrival_lo = 1; - double interarrival_hi = 2; -} - -message DeterministicParams { - double offered_load = 1; -} - -message ParetoParams { - double interarrival_base = 1; - double alpha = 2; -} - -// Once an RPC finishes, immediately start a new one. -// No configuration parameters needed. -message ClosedLoopParams { -} - -message LoadParams { - oneof load { - ClosedLoopParams closed_loop = 1; - PoissonParams poisson = 2; - UniformParams uniform = 3; - DeterministicParams determ = 4; - ParetoParams pareto = 5; - }; -} - -// presence of SecurityParams implies use of TLS -message SecurityParams { - bool use_test_ca = 1; - string server_host_override = 2; -} - -message ClientConfig { - // List of targets to connect to. At least one target needs to be specified. - repeated string server_targets = 1; - ClientType client_type = 2; - SecurityParams security_params = 3; - // How many concurrent RPCs to start for each channel. - // For synchronous client, use a separate thread for each outstanding RPC. - int32 outstanding_rpcs_per_channel = 4; - // Number of independent client channels to create. - // i-th channel will connect to server_target[i % server_targets.size()] - int32 client_channels = 5; - // Only for async client. Number of threads to use to start/manage RPCs. - int32 async_client_threads = 7; - RpcType rpc_type = 8; - // The requested load for the entire client (aggregated over all the threads). - LoadParams load_params = 10; - PayloadConfig payload_config = 11; - HistogramParams histogram_params = 12; - - // Specify the cores we should run the client on, if desired - repeated int32 core_list = 13; - int32 core_limit = 14; -} - -message ClientStatus { - ClientStats stats = 1; -} - -// Request current stats -message Mark { - // if true, the stats will be reset after taking their snapshot. - bool reset = 1; -} - -message ClientArgs { - oneof argtype { - ClientConfig setup = 1; - Mark mark = 2; - } -} - -message ServerConfig { - ServerType server_type = 1; - SecurityParams security_params = 2; - // Port on which to listen. Zero means pick unused port. - int32 port = 4; - // Only for async server. Number of threads used to serve the requests. - int32 async_server_threads = 7; - // Specify the number of cores to limit server to, if desired - int32 core_limit = 8; - // payload config, used in generic server - PayloadConfig payload_config = 9; - - // Specify the cores we should run the server on, if desired - repeated int32 core_list = 10; -} - -message ServerArgs { - oneof argtype { - ServerConfig setup = 1; - Mark mark = 2; - } -} - -message ServerStatus { - ServerStats stats = 1; - // the port bound by the server - int32 port = 2; - // Number of cores available to the server - int32 cores = 3; -} - -message CoreRequest { -} - -message CoreResponse { - // Number of cores available on the server - int32 cores = 1; -} - -message Void { -} - -// A single performance scenario: input to qps_json_driver -message Scenario { - // Human readable name for this scenario - string name = 1; - // Client configuration - ClientConfig client_config = 2; - // Number of clients to start for the test - int32 num_clients = 3; - // Server configuration - ServerConfig server_config = 4; - // Number of servers to start for the test - int32 num_servers = 5; - // Warmup period, in seconds - int32 warmup_seconds = 6; - // Benchmark time, in seconds - int32 benchmark_seconds = 7; - // Number of workers to spawn locally (usually zero) - int32 spawn_local_worker_count = 8; -} - -// A set of scenarios to be run with qps_json_driver -message Scenarios { - repeated Scenario scenarios = 1; -} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go deleted file mode 100644 index b34c5d59a..000000000 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.pb.go +++ /dev/null @@ -1,479 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: messages.proto - -package grpc_testing - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// The type of payload that should be returned. -type PayloadType int32 - -const ( - // Compressable text format. - PayloadType_COMPRESSABLE PayloadType = 0 - // Uncompressable binary format. - PayloadType_UNCOMPRESSABLE PayloadType = 1 - // Randomly chosen from all other formats defined in this enum. - PayloadType_RANDOM PayloadType = 2 -) - -var PayloadType_name = map[int32]string{ - 0: "COMPRESSABLE", - 1: "UNCOMPRESSABLE", - 2: "RANDOM", -} -var PayloadType_value = map[string]int32{ - "COMPRESSABLE": 0, - "UNCOMPRESSABLE": 1, - "RANDOM": 2, -} - -func (x PayloadType) String() string { - return proto.EnumName(PayloadType_name, int32(x)) -} -func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } - -// Compression algorithms -type CompressionType int32 - -const ( - // No compression - CompressionType_NONE CompressionType = 0 - CompressionType_GZIP CompressionType = 1 - CompressionType_DEFLATE CompressionType = 2 -) - -var CompressionType_name = map[int32]string{ - 0: "NONE", - 1: "GZIP", - 2: "DEFLATE", -} -var CompressionType_value = map[string]int32{ - "NONE": 0, - "GZIP": 1, - "DEFLATE": 2, -} - -func (x CompressionType) String() string { - return proto.EnumName(CompressionType_name, int32(x)) -} -func (CompressionType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } - -// A block of data, to simply increase gRPC message size. -type Payload struct { - // The type of data in body. - Type PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` - // Primary contents of payload. - Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` -} - -func (m *Payload) Reset() { *m = Payload{} } -func (m *Payload) String() string { return proto.CompactTextString(m) } -func (*Payload) ProtoMessage() {} -func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } - -func (m *Payload) GetType() PayloadType { - if m != nil { - return m.Type - } - return PayloadType_COMPRESSABLE -} - -func (m *Payload) GetBody() []byte { - if m != nil { - return m.Body - } - return nil -} - -// A protobuf representation for grpc status. This is used by test -// clients to specify a status that the server should attempt to return. -type EchoStatus struct { - Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` -} - -func (m *EchoStatus) Reset() { *m = EchoStatus{} } -func (m *EchoStatus) String() string { return proto.CompactTextString(m) } -func (*EchoStatus) ProtoMessage() {} -func (*EchoStatus) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } - -func (m *EchoStatus) GetCode() int32 { - if m != nil { - return m.Code - } - return 0 -} - -func (m *EchoStatus) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -// Unary request. -type SimpleRequest struct { - // Desired payload type in the response from the server. - // If response_type is RANDOM, server randomly chooses one from other formats. - ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` - // Desired payload size in the response from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - ResponseSize int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize" json:"response_size,omitempty"` - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` - // Whether SimpleResponse should include username. - FillUsername bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername" json:"fill_username,omitempty"` - // Whether SimpleResponse should include OAuth scope. - FillOauthScope bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope" json:"fill_oauth_scope,omitempty"` - // Compression algorithm to be used by the server for the response (stream) - ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"` - // Whether server should return a given status - ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"` -} - -func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } -func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } -func (*SimpleRequest) ProtoMessage() {} -func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } - -func (m *SimpleRequest) GetResponseType() PayloadType { - if m != nil { - return m.ResponseType - } - return PayloadType_COMPRESSABLE -} - -func (m *SimpleRequest) GetResponseSize() int32 { - if m != nil { - return m.ResponseSize - } - return 0 -} - -func (m *SimpleRequest) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *SimpleRequest) GetFillUsername() bool { - if m != nil { - return m.FillUsername - } - return false -} - -func (m *SimpleRequest) GetFillOauthScope() bool { - if m != nil { - return m.FillOauthScope - } - return false -} - -func (m *SimpleRequest) GetResponseCompression() CompressionType { - if m != nil { - return m.ResponseCompression - } - return CompressionType_NONE -} - -func (m *SimpleRequest) GetResponseStatus() *EchoStatus { - if m != nil { - return m.ResponseStatus - } - return nil -} - -// Unary response, as configured by the request. -type SimpleResponse struct { - // Payload to increase message size. - Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` - // The user the request came from, for verifying authentication was - // successful when the client expected it. - Username string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` - // OAuth scope. - OauthScope string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope" json:"oauth_scope,omitempty"` -} - -func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } -func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } -func (*SimpleResponse) ProtoMessage() {} -func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } - -func (m *SimpleResponse) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *SimpleResponse) GetUsername() string { - if m != nil { - return m.Username - } - return "" -} - -func (m *SimpleResponse) GetOauthScope() string { - if m != nil { - return m.OauthScope - } - return "" -} - -// Client-streaming request. -type StreamingInputCallRequest struct { - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` -} - -func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } -func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } -func (*StreamingInputCallRequest) ProtoMessage() {} -func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } - -func (m *StreamingInputCallRequest) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -// Client-streaming response. -type StreamingInputCallResponse struct { - // Aggregated size of payloads received from the client. - AggregatedPayloadSize int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize" json:"aggregated_payload_size,omitempty"` -} - -func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } -func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } -func (*StreamingInputCallResponse) ProtoMessage() {} -func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } - -func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { - if m != nil { - return m.AggregatedPayloadSize - } - return 0 -} - -// Configuration for a particular response. -type ResponseParameters struct { - // Desired payload sizes in responses from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - Size int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` - // Desired interval between consecutive responses in the response stream in - // microseconds. - IntervalUs int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs" json:"interval_us,omitempty"` -} - -func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } -func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } -func (*ResponseParameters) ProtoMessage() {} -func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } - -func (m *ResponseParameters) GetSize() int32 { - if m != nil { - return m.Size - } - return 0 -} - -func (m *ResponseParameters) GetIntervalUs() int32 { - if m != nil { - return m.IntervalUs - } - return 0 -} - -// Server-streaming request. -type StreamingOutputCallRequest struct { - // Desired payload type in the response from the server. - // If response_type is RANDOM, the payload from each response in the stream - // might be of different types. This is to simulate a mixed type of payload - // stream. - ResponseType PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` - // Configuration for each expected response message. - ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters" json:"response_parameters,omitempty"` - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` - // Compression algorithm to be used by the server for the response (stream) - ResponseCompression CompressionType `protobuf:"varint,6,opt,name=response_compression,json=responseCompression,enum=grpc.testing.CompressionType" json:"response_compression,omitempty"` - // Whether server should return a given status - ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"` -} - -func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } -func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } -func (*StreamingOutputCallRequest) ProtoMessage() {} -func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } - -func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { - if m != nil { - return m.ResponseType - } - return PayloadType_COMPRESSABLE -} - -func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { - if m != nil { - return m.ResponseParameters - } - return nil -} - -func (m *StreamingOutputCallRequest) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *StreamingOutputCallRequest) GetResponseCompression() CompressionType { - if m != nil { - return m.ResponseCompression - } - return CompressionType_NONE -} - -func (m *StreamingOutputCallRequest) GetResponseStatus() *EchoStatus { - if m != nil { - return m.ResponseStatus - } - return nil -} - -// Server-streaming response, as configured by the request and parameters. -type StreamingOutputCallResponse struct { - // Payload to increase response size. - Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` -} - -func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } -func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } -func (*StreamingOutputCallResponse) ProtoMessage() {} -func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } - -func (m *StreamingOutputCallResponse) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -// For reconnect interop test only. -// Client tells server what reconnection parameters it used. -type ReconnectParams struct { - MaxReconnectBackoffMs int32 `protobuf:"varint,1,opt,name=max_reconnect_backoff_ms,json=maxReconnectBackoffMs" json:"max_reconnect_backoff_ms,omitempty"` -} - -func (m *ReconnectParams) Reset() { *m = ReconnectParams{} } -func (m *ReconnectParams) String() string { return proto.CompactTextString(m) } -func (*ReconnectParams) ProtoMessage() {} -func (*ReconnectParams) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } - -func (m *ReconnectParams) GetMaxReconnectBackoffMs() int32 { - if m != nil { - return m.MaxReconnectBackoffMs - } - return 0 -} - -// For reconnect interop test only. -// Server tells client whether its reconnects are following the spec and the -// reconnect backoffs it saw. -type ReconnectInfo struct { - Passed bool `protobuf:"varint,1,opt,name=passed" json:"passed,omitempty"` - BackoffMs []int32 `protobuf:"varint,2,rep,packed,name=backoff_ms,json=backoffMs" json:"backoff_ms,omitempty"` -} - -func (m *ReconnectInfo) Reset() { *m = ReconnectInfo{} } -func (m *ReconnectInfo) String() string { return proto.CompactTextString(m) } -func (*ReconnectInfo) ProtoMessage() {} -func (*ReconnectInfo) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } - -func (m *ReconnectInfo) GetPassed() bool { - if m != nil { - return m.Passed - } - return false -} - -func (m *ReconnectInfo) GetBackoffMs() []int32 { - if m != nil { - return m.BackoffMs - } - return nil -} - -func init() { - proto.RegisterType((*Payload)(nil), "grpc.testing.Payload") - proto.RegisterType((*EchoStatus)(nil), "grpc.testing.EchoStatus") - proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") - proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") - proto.RegisterType((*StreamingInputCallRequest)(nil), "grpc.testing.StreamingInputCallRequest") - proto.RegisterType((*StreamingInputCallResponse)(nil), "grpc.testing.StreamingInputCallResponse") - proto.RegisterType((*ResponseParameters)(nil), "grpc.testing.ResponseParameters") - proto.RegisterType((*StreamingOutputCallRequest)(nil), "grpc.testing.StreamingOutputCallRequest") - proto.RegisterType((*StreamingOutputCallResponse)(nil), "grpc.testing.StreamingOutputCallResponse") - proto.RegisterType((*ReconnectParams)(nil), "grpc.testing.ReconnectParams") - proto.RegisterType((*ReconnectInfo)(nil), "grpc.testing.ReconnectInfo") - proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) - proto.RegisterEnum("grpc.testing.CompressionType", CompressionType_name, CompressionType_value) -} - -func init() { proto.RegisterFile("messages.proto", fileDescriptor1) } - -var fileDescriptor1 = []byte{ - // 652 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x55, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0xc5, 0xf9, 0xee, 0x24, 0x4d, 0xa3, 0x85, 0x82, 0x5b, 0x54, 0x11, 0x99, 0x4b, 0x54, 0x89, - 0x20, 0x05, 0x09, 0x24, 0x0e, 0xa0, 0xb4, 0x4d, 0x51, 0x50, 0x9a, 0x84, 0x75, 0x7b, 0xe1, 0x62, - 0x6d, 0x9c, 0x8d, 0x6b, 0x11, 0x7b, 0x8d, 0x77, 0x8d, 0x9a, 0x1e, 0xb8, 0xf3, 0x83, 0xb9, 0xa3, - 0x5d, 0x7f, 0xc4, 0x69, 0x7b, 0x68, 0xe1, 0xc2, 0x6d, 0xf7, 0xed, 0x9b, 0x97, 0x79, 0x33, 0xcf, - 0x0a, 0x34, 0x3d, 0xca, 0x39, 0x71, 0x28, 0xef, 0x06, 0x21, 0x13, 0x0c, 0x35, 0x9c, 0x30, 0xb0, - 0xbb, 0x82, 0x72, 0xe1, 0xfa, 0x8e, 0x31, 0x82, 0xea, 0x94, 0xac, 0x96, 0x8c, 0xcc, 0xd1, 0x2b, - 0x28, 0x89, 0x55, 0x40, 0x75, 0xad, 0xad, 0x75, 0x9a, 0xbd, 0xbd, 0x6e, 0x9e, 0xd7, 0x4d, 0x48, - 0xe7, 0xab, 0x80, 0x62, 0x45, 0x43, 0x08, 0x4a, 0x33, 0x36, 0x5f, 0xe9, 0x85, 0xb6, 0xd6, 0x69, - 0x60, 0x75, 0x36, 0xde, 0x03, 0x0c, 0xec, 0x4b, 0x66, 0x0a, 0x22, 0x22, 0x2e, 0x19, 0x36, 0x9b, - 0xc7, 0x82, 0x65, 0xac, 0xce, 0x48, 0x87, 0x6a, 0xd2, 0x8f, 0x2a, 0xdc, 0xc2, 0xe9, 0xd5, 0xf8, - 0x55, 0x84, 0x6d, 0xd3, 0xf5, 0x82, 0x25, 0xc5, 0xf4, 0x7b, 0x44, 0xb9, 0x40, 0x1f, 0x60, 0x3b, - 0xa4, 0x3c, 0x60, 0x3e, 0xa7, 0xd6, 0xfd, 0x3a, 0x6b, 0xa4, 0x7c, 0x79, 0x43, 0x2f, 0x73, 0xf5, - 0xdc, 0xbd, 0x8e, 0x7f, 0xb1, 0xbc, 0x26, 0x99, 0xee, 0x35, 0x45, 0xaf, 0xa1, 0x1a, 0xc4, 0x0a, - 0x7a, 0xb1, 0xad, 0x75, 0xea, 0xbd, 0xdd, 0x3b, 0xe5, 0x71, 0xca, 0x92, 0xaa, 0x0b, 0x77, 0xb9, - 0xb4, 0x22, 0x4e, 0x43, 0x9f, 0x78, 0x54, 0x2f, 0xb5, 0xb5, 0x4e, 0x0d, 0x37, 0x24, 0x78, 0x91, - 0x60, 0xa8, 0x03, 0x2d, 0x45, 0x62, 0x24, 0x12, 0x97, 0x16, 0xb7, 0x59, 0x40, 0xf5, 0xb2, 0xe2, - 0x35, 0x25, 0x3e, 0x91, 0xb0, 0x29, 0x51, 0x34, 0x85, 0x27, 0x59, 0x93, 0x36, 0xf3, 0x82, 0x90, - 0x72, 0xee, 0x32, 0x5f, 0xaf, 0x28, 0xaf, 0x07, 0x9b, 0xcd, 0x1c, 0xaf, 0x09, 0xca, 0xef, 0xe3, - 0xb4, 0x34, 0xf7, 0x80, 0xfa, 0xb0, 0xb3, 0xb6, 0xad, 0x36, 0xa1, 0x57, 0x95, 0x33, 0x7d, 0x53, - 0x6c, 0xbd, 0x29, 0xdc, 0xcc, 0x46, 0xa2, 0xee, 0xc6, 0x4f, 0x68, 0xa6, 0xab, 0x88, 0xf1, 0xfc, - 0x98, 0xb4, 0x7b, 0x8d, 0x69, 0x1f, 0x6a, 0xd9, 0x84, 0xe2, 0x4d, 0x67, 0x77, 0xf4, 0x02, 0xea, - 0xf9, 0xc1, 0x14, 0xd5, 0x33, 0xb0, 0x6c, 0x28, 0xc6, 0x08, 0xf6, 0x4c, 0x11, 0x52, 0xe2, 0xb9, - 0xbe, 0x33, 0xf4, 0x83, 0x48, 0x1c, 0x93, 0xe5, 0x32, 0x8d, 0xc5, 0x43, 0x5b, 0x31, 0xce, 0x61, - 0xff, 0x2e, 0xb5, 0xc4, 0xd9, 0x5b, 0x78, 0x46, 0x1c, 0x27, 0xa4, 0x0e, 0x11, 0x74, 0x6e, 0x25, - 0x35, 0x71, 0x5e, 0xe2, 0xe0, 0xee, 0xae, 0x9f, 0x13, 0x69, 0x19, 0x1c, 0x63, 0x08, 0x28, 0xd5, - 0x98, 0x92, 0x90, 0x78, 0x54, 0xd0, 0x50, 0x65, 0x3e, 0x57, 0xaa, 0xce, 0xd2, 0xae, 0xeb, 0x0b, - 0x1a, 0xfe, 0x20, 0x32, 0x35, 0x49, 0x0a, 0x21, 0x85, 0x2e, 0xb8, 0xf1, 0xbb, 0x90, 0xeb, 0x70, - 0x12, 0x89, 0x1b, 0x86, 0xff, 0xf5, 0x3b, 0xf8, 0x02, 0x59, 0x4e, 0xac, 0x20, 0x6b, 0x55, 0x2f, - 0xb4, 0x8b, 0x9d, 0x7a, 0xaf, 0xbd, 0xa9, 0x72, 0xdb, 0x12, 0x46, 0xe1, 0x6d, 0x9b, 0x0f, 0xfe, - 0x6a, 0xfe, 0xcb, 0x98, 0x8f, 0xe1, 0xf9, 0x9d, 0x63, 0xff, 0xcb, 0xcc, 0x1b, 0x9f, 0x61, 0x07, - 0x53, 0x9b, 0xf9, 0x3e, 0xb5, 0x85, 0x1a, 0x16, 0x47, 0xef, 0x40, 0xf7, 0xc8, 0x95, 0x15, 0xa6, - 0xb0, 0x35, 0x23, 0xf6, 0x37, 0xb6, 0x58, 0x58, 0x1e, 0x4f, 0xe3, 0xe5, 0x91, 0xab, 0xac, 0xea, - 0x28, 0x7e, 0x3d, 0xe3, 0xc6, 0x29, 0x6c, 0x67, 0xe8, 0xd0, 0x5f, 0x30, 0xf4, 0x14, 0x2a, 0x01, - 0xe1, 0x9c, 0xc6, 0xcd, 0xd4, 0x70, 0x72, 0x43, 0x07, 0x00, 0x39, 0x4d, 0xb9, 0xd4, 0x32, 0xde, - 0x9a, 0xa5, 0x3a, 0x87, 0x1f, 0xa1, 0x9e, 0x4b, 0x06, 0x6a, 0x41, 0xe3, 0x78, 0x72, 0x36, 0xc5, - 0x03, 0xd3, 0xec, 0x1f, 0x8d, 0x06, 0xad, 0x47, 0x08, 0x41, 0xf3, 0x62, 0xbc, 0x81, 0x69, 0x08, - 0xa0, 0x82, 0xfb, 0xe3, 0x93, 0xc9, 0x59, 0xab, 0x70, 0xd8, 0x83, 0x9d, 0x1b, 0xfb, 0x40, 0x35, - 0x28, 0x8d, 0x27, 0x63, 0x59, 0x5c, 0x83, 0xd2, 0xa7, 0xaf, 0xc3, 0x69, 0x4b, 0x43, 0x75, 0xa8, - 0x9e, 0x0c, 0x4e, 0x47, 0xfd, 0xf3, 0x41, 0xab, 0x30, 0xab, 0xa8, 0xbf, 0x9a, 0x37, 0x7f, 0x02, - 0x00, 0x00, 0xff, 0xff, 0xc2, 0x6a, 0xce, 0x1e, 0x7c, 0x06, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto deleted file mode 100644 index bd83f095f..000000000 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/messages.proto +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Message definitions to be used by integration test service definitions. - -syntax = "proto3"; - -package grpc.testing; - -// The type of payload that should be returned. -enum PayloadType { - // Compressable text format. - COMPRESSABLE = 0; - - // Uncompressable binary format. - UNCOMPRESSABLE = 1; - - // Randomly chosen from all other formats defined in this enum. - RANDOM = 2; -} - -// Compression algorithms -enum CompressionType { - // No compression - NONE = 0; - GZIP = 1; - DEFLATE = 2; -} - -// A block of data, to simply increase gRPC message size. -message Payload { - // The type of data in body. - PayloadType type = 1; - // Primary contents of payload. - bytes body = 2; -} - -// A protobuf representation for grpc status. This is used by test -// clients to specify a status that the server should attempt to return. -message EchoStatus { - int32 code = 1; - string message = 2; -} - -// Unary request. -message SimpleRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, server randomly chooses one from other formats. - PayloadType response_type = 1; - - // Desired payload size in the response from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - int32 response_size = 2; - - // Optional input payload sent along with the request. - Payload payload = 3; - - // Whether SimpleResponse should include username. - bool fill_username = 4; - - // Whether SimpleResponse should include OAuth scope. - bool fill_oauth_scope = 5; - - // Compression algorithm to be used by the server for the response (stream) - CompressionType response_compression = 6; - - // Whether server should return a given status - EchoStatus response_status = 7; -} - -// Unary response, as configured by the request. -message SimpleResponse { - // Payload to increase message size. - Payload payload = 1; - // The user the request came from, for verifying authentication was - // successful when the client expected it. - string username = 2; - // OAuth scope. - string oauth_scope = 3; -} - -// Client-streaming request. -message StreamingInputCallRequest { - // Optional input payload sent along with the request. - Payload payload = 1; - - // Not expecting any payload from the response. -} - -// Client-streaming response. -message StreamingInputCallResponse { - // Aggregated size of payloads received from the client. - int32 aggregated_payload_size = 1; -} - -// Configuration for a particular response. -message ResponseParameters { - // Desired payload sizes in responses from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - int32 size = 1; - - // Desired interval between consecutive responses in the response stream in - // microseconds. - int32 interval_us = 2; -} - -// Server-streaming request. -message StreamingOutputCallRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, the payload from each response in the stream - // might be of different types. This is to simulate a mixed type of payload - // stream. - PayloadType response_type = 1; - - // Configuration for each expected response message. - repeated ResponseParameters response_parameters = 2; - - // Optional input payload sent along with the request. - Payload payload = 3; - - // Compression algorithm to be used by the server for the response (stream) - CompressionType response_compression = 6; - - // Whether server should return a given status - EchoStatus response_status = 7; -} - -// Server-streaming response, as configured by the request and parameters. -message StreamingOutputCallResponse { - // Payload to increase response size. - Payload payload = 1; -} - -// For reconnect interop test only. -// Client tells server what reconnection parameters it used. -message ReconnectParams { - int32 max_reconnect_backoff_ms = 1; -} - -// For reconnect interop test only. -// Server tells client whether its reconnects are following the spec and the -// reconnect backoffs it saw. -message ReconnectInfo { - bool passed = 1; - repeated int32 backoff_ms = 2; -} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go deleted file mode 100644 index d70d1f745..000000000 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.pb.go +++ /dev/null @@ -1,250 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: payloads.proto - -package grpc_testing - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type ByteBufferParams struct { - ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize" json:"req_size,omitempty"` - RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize" json:"resp_size,omitempty"` -} - -func (m *ByteBufferParams) Reset() { *m = ByteBufferParams{} } -func (m *ByteBufferParams) String() string { return proto.CompactTextString(m) } -func (*ByteBufferParams) ProtoMessage() {} -func (*ByteBufferParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } - -func (m *ByteBufferParams) GetReqSize() int32 { - if m != nil { - return m.ReqSize - } - return 0 -} - -func (m *ByteBufferParams) GetRespSize() int32 { - if m != nil { - return m.RespSize - } - return 0 -} - -type SimpleProtoParams struct { - ReqSize int32 `protobuf:"varint,1,opt,name=req_size,json=reqSize" json:"req_size,omitempty"` - RespSize int32 `protobuf:"varint,2,opt,name=resp_size,json=respSize" json:"resp_size,omitempty"` -} - -func (m *SimpleProtoParams) Reset() { *m = SimpleProtoParams{} } -func (m *SimpleProtoParams) String() string { return proto.CompactTextString(m) } -func (*SimpleProtoParams) ProtoMessage() {} -func (*SimpleProtoParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } - -func (m *SimpleProtoParams) GetReqSize() int32 { - if m != nil { - return m.ReqSize - } - return 0 -} - -func (m *SimpleProtoParams) GetRespSize() int32 { - if m != nil { - return m.RespSize - } - return 0 -} - -type ComplexProtoParams struct { -} - -func (m *ComplexProtoParams) Reset() { *m = ComplexProtoParams{} } -func (m *ComplexProtoParams) String() string { return proto.CompactTextString(m) } -func (*ComplexProtoParams) ProtoMessage() {} -func (*ComplexProtoParams) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } - -type PayloadConfig struct { - // Types that are valid to be assigned to Payload: - // *PayloadConfig_BytebufParams - // *PayloadConfig_SimpleParams - // *PayloadConfig_ComplexParams - Payload isPayloadConfig_Payload `protobuf_oneof:"payload"` -} - -func (m *PayloadConfig) Reset() { *m = PayloadConfig{} } -func (m *PayloadConfig) String() string { return proto.CompactTextString(m) } -func (*PayloadConfig) ProtoMessage() {} -func (*PayloadConfig) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } - -type isPayloadConfig_Payload interface { - isPayloadConfig_Payload() -} - -type PayloadConfig_BytebufParams struct { - BytebufParams *ByteBufferParams `protobuf:"bytes,1,opt,name=bytebuf_params,json=bytebufParams,oneof"` -} -type PayloadConfig_SimpleParams struct { - SimpleParams *SimpleProtoParams `protobuf:"bytes,2,opt,name=simple_params,json=simpleParams,oneof"` -} -type PayloadConfig_ComplexParams struct { - ComplexParams *ComplexProtoParams `protobuf:"bytes,3,opt,name=complex_params,json=complexParams,oneof"` -} - -func (*PayloadConfig_BytebufParams) isPayloadConfig_Payload() {} -func (*PayloadConfig_SimpleParams) isPayloadConfig_Payload() {} -func (*PayloadConfig_ComplexParams) isPayloadConfig_Payload() {} - -func (m *PayloadConfig) GetPayload() isPayloadConfig_Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *PayloadConfig) GetBytebufParams() *ByteBufferParams { - if x, ok := m.GetPayload().(*PayloadConfig_BytebufParams); ok { - return x.BytebufParams - } - return nil -} - -func (m *PayloadConfig) GetSimpleParams() *SimpleProtoParams { - if x, ok := m.GetPayload().(*PayloadConfig_SimpleParams); ok { - return x.SimpleParams - } - return nil -} - -func (m *PayloadConfig) GetComplexParams() *ComplexProtoParams { - if x, ok := m.GetPayload().(*PayloadConfig_ComplexParams); ok { - return x.ComplexParams - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*PayloadConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _PayloadConfig_OneofMarshaler, _PayloadConfig_OneofUnmarshaler, _PayloadConfig_OneofSizer, []interface{}{ - (*PayloadConfig_BytebufParams)(nil), - (*PayloadConfig_SimpleParams)(nil), - (*PayloadConfig_ComplexParams)(nil), - } -} - -func _PayloadConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*PayloadConfig) - // payload - switch x := m.Payload.(type) { - case *PayloadConfig_BytebufParams: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.BytebufParams); err != nil { - return err - } - case *PayloadConfig_SimpleParams: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.SimpleParams); err != nil { - return err - } - case *PayloadConfig_ComplexParams: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ComplexParams); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("PayloadConfig.Payload has unexpected type %T", x) - } - return nil -} - -func _PayloadConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*PayloadConfig) - switch tag { - case 1: // payload.bytebuf_params - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ByteBufferParams) - err := b.DecodeMessage(msg) - m.Payload = &PayloadConfig_BytebufParams{msg} - return true, err - case 2: // payload.simple_params - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(SimpleProtoParams) - err := b.DecodeMessage(msg) - m.Payload = &PayloadConfig_SimpleParams{msg} - return true, err - case 3: // payload.complex_params - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ComplexProtoParams) - err := b.DecodeMessage(msg) - m.Payload = &PayloadConfig_ComplexParams{msg} - return true, err - default: - return false, nil - } -} - -func _PayloadConfig_OneofSizer(msg proto.Message) (n int) { - m := msg.(*PayloadConfig) - // payload - switch x := m.Payload.(type) { - case *PayloadConfig_BytebufParams: - s := proto.Size(x.BytebufParams) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *PayloadConfig_SimpleParams: - s := proto.Size(x.SimpleParams) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *PayloadConfig_ComplexParams: - s := proto.Size(x.ComplexParams) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -func init() { - proto.RegisterType((*ByteBufferParams)(nil), "grpc.testing.ByteBufferParams") - proto.RegisterType((*SimpleProtoParams)(nil), "grpc.testing.SimpleProtoParams") - proto.RegisterType((*ComplexProtoParams)(nil), "grpc.testing.ComplexProtoParams") - proto.RegisterType((*PayloadConfig)(nil), "grpc.testing.PayloadConfig") -} - -func init() { proto.RegisterFile("payloads.proto", fileDescriptor2) } - -var fileDescriptor2 = []byte{ - // 254 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x48, 0xac, 0xcc, - 0xc9, 0x4f, 0x4c, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2f, 0x2a, 0x48, - 0xd6, 0x2b, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0x57, 0xf2, 0xe2, 0x12, 0x70, 0xaa, 0x2c, 0x49, - 0x75, 0x2a, 0x4d, 0x4b, 0x4b, 0x2d, 0x0a, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0x16, 0x92, 0xe4, 0xe2, - 0x28, 0x4a, 0x2d, 0x8c, 0x2f, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x54, 0x60, 0xd4, 0x60, 0x0d, 0x62, - 0x2f, 0x4a, 0x2d, 0x0c, 0xce, 0xac, 0x4a, 0x15, 0x92, 0xe6, 0xe2, 0x2c, 0x4a, 0x2d, 0x2e, 0x80, - 0xc8, 0x31, 0x81, 0xe5, 0x38, 0x40, 0x02, 0x20, 0x49, 0x25, 0x6f, 0x2e, 0xc1, 0xe0, 0xcc, 0xdc, - 0x82, 0x9c, 0xd4, 0x00, 0x90, 0x45, 0x14, 0x1a, 0x26, 0xc2, 0x25, 0xe4, 0x9c, 0x0f, 0x32, 0xac, - 0x02, 0xc9, 0x34, 0xa5, 0x6f, 0x8c, 0x5c, 0xbc, 0x01, 0x10, 0xff, 0x38, 0xe7, 0xe7, 0xa5, 0x65, - 0xa6, 0x0b, 0xb9, 0x73, 0xf1, 0x25, 0x55, 0x96, 0xa4, 0x26, 0x95, 0xa6, 0xc5, 0x17, 0x80, 0xd5, - 0x80, 0x6d, 0xe1, 0x36, 0x92, 0xd3, 0x43, 0xf6, 0xa7, 0x1e, 0xba, 0x27, 0x3d, 0x18, 0x82, 0x78, - 0xa1, 0xfa, 0xa0, 0x0e, 0x75, 0xe3, 0xe2, 0x2d, 0x06, 0xbb, 0x1e, 0x66, 0x0e, 0x13, 0xd8, 0x1c, - 0x79, 0x54, 0x73, 0x30, 0x3c, 0xe8, 0xc1, 0x10, 0xc4, 0x03, 0xd1, 0x07, 0x35, 0xc7, 0x93, 0x8b, - 0x2f, 0x19, 0xe2, 0x70, 0x98, 0x41, 0xcc, 0x60, 0x83, 0x14, 0x50, 0x0d, 0xc2, 0xf4, 0x1c, 0xc8, - 0x49, 0x50, 0x9d, 0x10, 0x01, 0x27, 0x4e, 0x2e, 0x76, 0x68, 0xe4, 0x25, 0xb1, 0x81, 0x23, 0xcf, - 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x8c, 0x18, 0x4e, 0xce, 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto deleted file mode 100644 index 5d4871f5f..000000000 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/payloads.proto +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package grpc.testing; - -message ByteBufferParams { - int32 req_size = 1; - int32 resp_size = 2; -} - -message SimpleProtoParams { - int32 req_size = 1; - int32 resp_size = 2; -} - -message ComplexProtoParams { - // TODO (vpai): Fill this in once the details of complex, representative - // protos are decided -} - -message PayloadConfig { - oneof payload { - ByteBufferParams bytebuf_params = 1; - SimpleProtoParams simple_params = 2; - ComplexProtoParams complex_params = 3; - } -} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go deleted file mode 100644 index 50e350595..000000000 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.pb.go +++ /dev/null @@ -1,442 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: services.proto - -package grpc_testing - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for BenchmarkService service - -type BenchmarkServiceClient interface { - // One request followed by one response. - // The server returns the client payload as-is. - UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) - // One request followed by one response. - // The server returns the client payload as-is. - StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) -} - -type benchmarkServiceClient struct { - cc *grpc.ClientConn -} - -func NewBenchmarkServiceClient(cc *grpc.ClientConn) BenchmarkServiceClient { - return &benchmarkServiceClient{cc} -} - -func (c *benchmarkServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { - out := new(SimpleResponse) - err := grpc.Invoke(ctx, "/grpc.testing.BenchmarkService/UnaryCall", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *benchmarkServiceClient) StreamingCall(ctx context.Context, opts ...grpc.CallOption) (BenchmarkService_StreamingCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_BenchmarkService_serviceDesc.Streams[0], c.cc, "/grpc.testing.BenchmarkService/StreamingCall", opts...) - if err != nil { - return nil, err - } - x := &benchmarkServiceStreamingCallClient{stream} - return x, nil -} - -type BenchmarkService_StreamingCallClient interface { - Send(*SimpleRequest) error - Recv() (*SimpleResponse, error) - grpc.ClientStream -} - -type benchmarkServiceStreamingCallClient struct { - grpc.ClientStream -} - -func (x *benchmarkServiceStreamingCallClient) Send(m *SimpleRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *benchmarkServiceStreamingCallClient) Recv() (*SimpleResponse, error) { - m := new(SimpleResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for BenchmarkService service - -type BenchmarkServiceServer interface { - // One request followed by one response. - // The server returns the client payload as-is. - UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) - // One request followed by one response. - // The server returns the client payload as-is. - StreamingCall(BenchmarkService_StreamingCallServer) error -} - -func RegisterBenchmarkServiceServer(s *grpc.Server, srv BenchmarkServiceServer) { - s.RegisterService(&_BenchmarkService_serviceDesc, srv) -} - -func _BenchmarkService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SimpleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BenchmarkServiceServer).UnaryCall(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.testing.BenchmarkService/UnaryCall", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BenchmarkServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BenchmarkService_StreamingCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(BenchmarkServiceServer).StreamingCall(&benchmarkServiceStreamingCallServer{stream}) -} - -type BenchmarkService_StreamingCallServer interface { - Send(*SimpleResponse) error - Recv() (*SimpleRequest, error) - grpc.ServerStream -} - -type benchmarkServiceStreamingCallServer struct { - grpc.ServerStream -} - -func (x *benchmarkServiceStreamingCallServer) Send(m *SimpleResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *benchmarkServiceStreamingCallServer) Recv() (*SimpleRequest, error) { - m := new(SimpleRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _BenchmarkService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.testing.BenchmarkService", - HandlerType: (*BenchmarkServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "UnaryCall", - Handler: _BenchmarkService_UnaryCall_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamingCall", - Handler: _BenchmarkService_StreamingCall_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "services.proto", -} - -// Client API for WorkerService service - -type WorkerServiceClient interface { - // Start server with specified workload. - // First request sent specifies the ServerConfig followed by ServerStatus - // response. After that, a "Mark" can be sent anytime to request the latest - // stats. Closing the stream will initiate shutdown of the test server - // and once the shutdown has finished, the OK status is sent to terminate - // this RPC. - RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) - // Start client with specified workload. - // First request sent specifies the ClientConfig followed by ClientStatus - // response. After that, a "Mark" can be sent anytime to request the latest - // stats. Closing the stream will initiate shutdown of the test client - // and once the shutdown has finished, the OK status is sent to terminate - // this RPC. - RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) - // Just return the core count - unary call - CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) - // Quit this worker - QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) -} - -type workerServiceClient struct { - cc *grpc.ClientConn -} - -func NewWorkerServiceClient(cc *grpc.ClientConn) WorkerServiceClient { - return &workerServiceClient{cc} -} - -func (c *workerServiceClient) RunServer(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunServerClient, error) { - stream, err := grpc.NewClientStream(ctx, &_WorkerService_serviceDesc.Streams[0], c.cc, "/grpc.testing.WorkerService/RunServer", opts...) - if err != nil { - return nil, err - } - x := &workerServiceRunServerClient{stream} - return x, nil -} - -type WorkerService_RunServerClient interface { - Send(*ServerArgs) error - Recv() (*ServerStatus, error) - grpc.ClientStream -} - -type workerServiceRunServerClient struct { - grpc.ClientStream -} - -func (x *workerServiceRunServerClient) Send(m *ServerArgs) error { - return x.ClientStream.SendMsg(m) -} - -func (x *workerServiceRunServerClient) Recv() (*ServerStatus, error) { - m := new(ServerStatus) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *workerServiceClient) RunClient(ctx context.Context, opts ...grpc.CallOption) (WorkerService_RunClientClient, error) { - stream, err := grpc.NewClientStream(ctx, &_WorkerService_serviceDesc.Streams[1], c.cc, "/grpc.testing.WorkerService/RunClient", opts...) - if err != nil { - return nil, err - } - x := &workerServiceRunClientClient{stream} - return x, nil -} - -type WorkerService_RunClientClient interface { - Send(*ClientArgs) error - Recv() (*ClientStatus, error) - grpc.ClientStream -} - -type workerServiceRunClientClient struct { - grpc.ClientStream -} - -func (x *workerServiceRunClientClient) Send(m *ClientArgs) error { - return x.ClientStream.SendMsg(m) -} - -func (x *workerServiceRunClientClient) Recv() (*ClientStatus, error) { - m := new(ClientStatus) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *workerServiceClient) CoreCount(ctx context.Context, in *CoreRequest, opts ...grpc.CallOption) (*CoreResponse, error) { - out := new(CoreResponse) - err := grpc.Invoke(ctx, "/grpc.testing.WorkerService/CoreCount", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *workerServiceClient) QuitWorker(ctx context.Context, in *Void, opts ...grpc.CallOption) (*Void, error) { - out := new(Void) - err := grpc.Invoke(ctx, "/grpc.testing.WorkerService/QuitWorker", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for WorkerService service - -type WorkerServiceServer interface { - // Start server with specified workload. - // First request sent specifies the ServerConfig followed by ServerStatus - // response. After that, a "Mark" can be sent anytime to request the latest - // stats. Closing the stream will initiate shutdown of the test server - // and once the shutdown has finished, the OK status is sent to terminate - // this RPC. - RunServer(WorkerService_RunServerServer) error - // Start client with specified workload. - // First request sent specifies the ClientConfig followed by ClientStatus - // response. After that, a "Mark" can be sent anytime to request the latest - // stats. Closing the stream will initiate shutdown of the test client - // and once the shutdown has finished, the OK status is sent to terminate - // this RPC. - RunClient(WorkerService_RunClientServer) error - // Just return the core count - unary call - CoreCount(context.Context, *CoreRequest) (*CoreResponse, error) - // Quit this worker - QuitWorker(context.Context, *Void) (*Void, error) -} - -func RegisterWorkerServiceServer(s *grpc.Server, srv WorkerServiceServer) { - s.RegisterService(&_WorkerService_serviceDesc, srv) -} - -func _WorkerService_RunServer_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(WorkerServiceServer).RunServer(&workerServiceRunServerServer{stream}) -} - -type WorkerService_RunServerServer interface { - Send(*ServerStatus) error - Recv() (*ServerArgs, error) - grpc.ServerStream -} - -type workerServiceRunServerServer struct { - grpc.ServerStream -} - -func (x *workerServiceRunServerServer) Send(m *ServerStatus) error { - return x.ServerStream.SendMsg(m) -} - -func (x *workerServiceRunServerServer) Recv() (*ServerArgs, error) { - m := new(ServerArgs) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _WorkerService_RunClient_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(WorkerServiceServer).RunClient(&workerServiceRunClientServer{stream}) -} - -type WorkerService_RunClientServer interface { - Send(*ClientStatus) error - Recv() (*ClientArgs, error) - grpc.ServerStream -} - -type workerServiceRunClientServer struct { - grpc.ServerStream -} - -func (x *workerServiceRunClientServer) Send(m *ClientStatus) error { - return x.ServerStream.SendMsg(m) -} - -func (x *workerServiceRunClientServer) Recv() (*ClientArgs, error) { - m := new(ClientArgs) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _WorkerService_CoreCount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CoreRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WorkerServiceServer).CoreCount(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.testing.WorkerService/CoreCount", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WorkerServiceServer).CoreCount(ctx, req.(*CoreRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _WorkerService_QuitWorker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Void) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(WorkerServiceServer).QuitWorker(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.testing.WorkerService/QuitWorker", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WorkerServiceServer).QuitWorker(ctx, req.(*Void)) - } - return interceptor(ctx, in, info, handler) -} - -var _WorkerService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.testing.WorkerService", - HandlerType: (*WorkerServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CoreCount", - Handler: _WorkerService_CoreCount_Handler, - }, - { - MethodName: "QuitWorker", - Handler: _WorkerService_QuitWorker_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "RunServer", - Handler: _WorkerService_RunServer_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "RunClient", - Handler: _WorkerService_RunClient_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "services.proto", -} - -func init() { proto.RegisterFile("services.proto", fileDescriptor3) } - -var fileDescriptor3 = []byte{ - // 255 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x91, 0xc1, 0x4a, 0xc4, 0x30, - 0x10, 0x86, 0xa9, 0x07, 0xa1, 0xc1, 0x2e, 0x92, 0x93, 0x46, 0x1f, 0xc0, 0x53, 0x91, 0xd5, 0x17, - 0x70, 0x8b, 0x1e, 0x05, 0xb7, 0xa8, 0xe7, 0x58, 0x87, 0x1a, 0x36, 0xcd, 0xd4, 0x99, 0x89, 0xe0, - 0x93, 0xf8, 0x0e, 0x3e, 0xa5, 0xec, 0x66, 0x57, 0xd6, 0x92, 0x9b, 0xc7, 0xf9, 0xbf, 0xe1, 0x23, - 0x7f, 0x46, 0xcd, 0x18, 0xe8, 0xc3, 0x75, 0xc0, 0xf5, 0x48, 0x28, 0xa8, 0x8f, 0x7a, 0x1a, 0xbb, - 0x5a, 0x80, 0xc5, 0x85, 0xde, 0xcc, 0x06, 0x60, 0xb6, 0xfd, 0x8e, 0x9a, 0xaa, 0xc3, 0x20, 0x84, - 0x3e, 0x8d, 0xf3, 0xef, 0x42, 0x1d, 0x2f, 0x20, 0x74, 0x6f, 0x83, 0xa5, 0x55, 0x9b, 0x44, 0xfa, - 0x4e, 0x95, 0x8f, 0xc1, 0xd2, 0x67, 0x63, 0xbd, 0xd7, 0x67, 0xf5, 0xbe, 0xaf, 0x6e, 0xdd, 0x30, - 0x7a, 0x58, 0xc2, 0x7b, 0x04, 0x16, 0x73, 0x9e, 0x87, 0x3c, 0x62, 0x60, 0xd0, 0xf7, 0xaa, 0x6a, - 0x85, 0xc0, 0x0e, 0x2e, 0xf4, 0xff, 0x74, 0x5d, 0x14, 0x97, 0xc5, 0xfc, 0xeb, 0x40, 0x55, 0xcf, - 0x48, 0x2b, 0xa0, 0xdd, 0x4b, 0x6f, 0x55, 0xb9, 0x8c, 0x61, 0x3d, 0x01, 0xe9, 0x93, 0x89, 0x60, - 0x93, 0xde, 0x50, 0xcf, 0xc6, 0xe4, 0x48, 0x2b, 0x56, 0x22, 0xaf, 0xc5, 0x5b, 0x4d, 0xe3, 0x1d, - 0x04, 0x99, 0x6a, 0x52, 0x9a, 0xd3, 0x24, 0xb2, 0xa7, 0x59, 0xa8, 0xb2, 0x41, 0x82, 0x06, 0x63, - 0x10, 0x7d, 0x3a, 0x59, 0x46, 0xfa, 0x6d, 0x6a, 0x72, 0x68, 0xfb, 0x67, 0xd7, 0x4a, 0x3d, 0x44, - 0x27, 0xa9, 0xa6, 0xd6, 0x7f, 0x37, 0x9f, 0xd0, 0xbd, 0x9a, 0x4c, 0xf6, 0x72, 0xb8, 0xb9, 0xe6, - 0xd5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3b, 0x84, 0x02, 0xe3, 0x0c, 0x02, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto deleted file mode 100644 index f4e790782..000000000 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/services.proto +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// An integration test service that covers all the method signature permutations -// of unary/streaming requests/responses. -syntax = "proto3"; - -import "messages.proto"; -import "control.proto"; - -package grpc.testing; - -service BenchmarkService { - // One request followed by one response. - // The server returns the client payload as-is. - rpc UnaryCall(SimpleRequest) returns (SimpleResponse); - - // One request followed by one response. - // The server returns the client payload as-is. - rpc StreamingCall(stream SimpleRequest) returns (stream SimpleResponse); -} - -service WorkerService { - // Start server with specified workload. - // First request sent specifies the ServerConfig followed by ServerStatus - // response. After that, a "Mark" can be sent anytime to request the latest - // stats. Closing the stream will initiate shutdown of the test server - // and once the shutdown has finished, the OK status is sent to terminate - // this RPC. - rpc RunServer(stream ServerArgs) returns (stream ServerStatus); - - // Start client with specified workload. - // First request sent specifies the ClientConfig followed by ClientStatus - // response. After that, a "Mark" can be sent anytime to request the latest - // stats. Closing the stream will initiate shutdown of the test client - // and once the shutdown has finished, the OK status is sent to terminate - // this RPC. - rpc RunClient(stream ClientArgs) returns (stream ClientStatus); - - // Just return the core count - unary call - rpc CoreCount(CoreRequest) returns (CoreResponse); - - // Quit this worker - rpc QuitWorker(Void) returns (Void); -} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go b/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go deleted file mode 100644 index d69cb7410..000000000 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.pb.go +++ /dev/null @@ -1,208 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: stats.proto - -package grpc_testing - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type ServerStats struct { - // wall clock time change in seconds since last reset - TimeElapsed float64 `protobuf:"fixed64,1,opt,name=time_elapsed,json=timeElapsed" json:"time_elapsed,omitempty"` - // change in user time (in seconds) used by the server since last reset - TimeUser float64 `protobuf:"fixed64,2,opt,name=time_user,json=timeUser" json:"time_user,omitempty"` - // change in server time (in seconds) used by the server process and all - // threads since last reset - TimeSystem float64 `protobuf:"fixed64,3,opt,name=time_system,json=timeSystem" json:"time_system,omitempty"` -} - -func (m *ServerStats) Reset() { *m = ServerStats{} } -func (m *ServerStats) String() string { return proto.CompactTextString(m) } -func (*ServerStats) ProtoMessage() {} -func (*ServerStats) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } - -func (m *ServerStats) GetTimeElapsed() float64 { - if m != nil { - return m.TimeElapsed - } - return 0 -} - -func (m *ServerStats) GetTimeUser() float64 { - if m != nil { - return m.TimeUser - } - return 0 -} - -func (m *ServerStats) GetTimeSystem() float64 { - if m != nil { - return m.TimeSystem - } - return 0 -} - -// Histogram params based on grpc/support/histogram.c -type HistogramParams struct { - Resolution float64 `protobuf:"fixed64,1,opt,name=resolution" json:"resolution,omitempty"` - MaxPossible float64 `protobuf:"fixed64,2,opt,name=max_possible,json=maxPossible" json:"max_possible,omitempty"` -} - -func (m *HistogramParams) Reset() { *m = HistogramParams{} } -func (m *HistogramParams) String() string { return proto.CompactTextString(m) } -func (*HistogramParams) ProtoMessage() {} -func (*HistogramParams) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{1} } - -func (m *HistogramParams) GetResolution() float64 { - if m != nil { - return m.Resolution - } - return 0 -} - -func (m *HistogramParams) GetMaxPossible() float64 { - if m != nil { - return m.MaxPossible - } - return 0 -} - -// Histogram data based on grpc/support/histogram.c -type HistogramData struct { - Bucket []uint32 `protobuf:"varint,1,rep,packed,name=bucket" json:"bucket,omitempty"` - MinSeen float64 `protobuf:"fixed64,2,opt,name=min_seen,json=minSeen" json:"min_seen,omitempty"` - MaxSeen float64 `protobuf:"fixed64,3,opt,name=max_seen,json=maxSeen" json:"max_seen,omitempty"` - Sum float64 `protobuf:"fixed64,4,opt,name=sum" json:"sum,omitempty"` - SumOfSquares float64 `protobuf:"fixed64,5,opt,name=sum_of_squares,json=sumOfSquares" json:"sum_of_squares,omitempty"` - Count float64 `protobuf:"fixed64,6,opt,name=count" json:"count,omitempty"` -} - -func (m *HistogramData) Reset() { *m = HistogramData{} } -func (m *HistogramData) String() string { return proto.CompactTextString(m) } -func (*HistogramData) ProtoMessage() {} -func (*HistogramData) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{2} } - -func (m *HistogramData) GetBucket() []uint32 { - if m != nil { - return m.Bucket - } - return nil -} - -func (m *HistogramData) GetMinSeen() float64 { - if m != nil { - return m.MinSeen - } - return 0 -} - -func (m *HistogramData) GetMaxSeen() float64 { - if m != nil { - return m.MaxSeen - } - return 0 -} - -func (m *HistogramData) GetSum() float64 { - if m != nil { - return m.Sum - } - return 0 -} - -func (m *HistogramData) GetSumOfSquares() float64 { - if m != nil { - return m.SumOfSquares - } - return 0 -} - -func (m *HistogramData) GetCount() float64 { - if m != nil { - return m.Count - } - return 0 -} - -type ClientStats struct { - // Latency histogram. Data points are in nanoseconds. - Latencies *HistogramData `protobuf:"bytes,1,opt,name=latencies" json:"latencies,omitempty"` - // See ServerStats for details. - TimeElapsed float64 `protobuf:"fixed64,2,opt,name=time_elapsed,json=timeElapsed" json:"time_elapsed,omitempty"` - TimeUser float64 `protobuf:"fixed64,3,opt,name=time_user,json=timeUser" json:"time_user,omitempty"` - TimeSystem float64 `protobuf:"fixed64,4,opt,name=time_system,json=timeSystem" json:"time_system,omitempty"` -} - -func (m *ClientStats) Reset() { *m = ClientStats{} } -func (m *ClientStats) String() string { return proto.CompactTextString(m) } -func (*ClientStats) ProtoMessage() {} -func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{3} } - -func (m *ClientStats) GetLatencies() *HistogramData { - if m != nil { - return m.Latencies - } - return nil -} - -func (m *ClientStats) GetTimeElapsed() float64 { - if m != nil { - return m.TimeElapsed - } - return 0 -} - -func (m *ClientStats) GetTimeUser() float64 { - if m != nil { - return m.TimeUser - } - return 0 -} - -func (m *ClientStats) GetTimeSystem() float64 { - if m != nil { - return m.TimeSystem - } - return 0 -} - -func init() { - proto.RegisterType((*ServerStats)(nil), "grpc.testing.ServerStats") - proto.RegisterType((*HistogramParams)(nil), "grpc.testing.HistogramParams") - proto.RegisterType((*HistogramData)(nil), "grpc.testing.HistogramData") - proto.RegisterType((*ClientStats)(nil), "grpc.testing.ClientStats") -} - -func init() { proto.RegisterFile("stats.proto", fileDescriptor4) } - -var fileDescriptor4 = []byte{ - // 341 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xc1, 0x4a, 0xeb, 0x40, - 0x14, 0x86, 0x49, 0xd3, 0xf6, 0xb6, 0x27, 0xed, 0xbd, 0x97, 0x41, 0x24, 0x52, 0xd0, 0x1a, 0x5c, - 0x74, 0x95, 0x85, 0xae, 0x5c, 0xab, 0xe0, 0xce, 0xd2, 0xe8, 0x3a, 0x4c, 0xe3, 0x69, 0x19, 0xcc, - 0xcc, 0xc4, 0x39, 0x33, 0x12, 0x1f, 0x49, 0x7c, 0x49, 0xc9, 0x24, 0x68, 0x55, 0xd0, 0x5d, 0xe6, - 0xfb, 0x7e, 0xe6, 0xe4, 0xe4, 0x0f, 0x44, 0x64, 0xb9, 0xa5, 0xb4, 0x32, 0xda, 0x6a, 0x36, 0xd9, - 0x9a, 0xaa, 0x48, 0x2d, 0x92, 0x15, 0x6a, 0x9b, 0x28, 0x88, 0x32, 0x34, 0x4f, 0x68, 0xb2, 0x26, - 0xc2, 0x8e, 0x61, 0x62, 0x85, 0xc4, 0x1c, 0x4b, 0x5e, 0x11, 0xde, 0xc7, 0xc1, 0x3c, 0x58, 0x04, - 0xab, 0xa8, 0x61, 0x57, 0x2d, 0x62, 0x33, 0x18, 0xfb, 0x88, 0x23, 0x34, 0x71, 0xcf, 0xfb, 0x51, - 0x03, 0xee, 0x08, 0x0d, 0x3b, 0x02, 0x9f, 0xcd, 0xe9, 0x99, 0x2c, 0xca, 0x38, 0xf4, 0x1a, 0x1a, - 0x94, 0x79, 0x92, 0xdc, 0xc2, 0xbf, 0x6b, 0x41, 0x56, 0x6f, 0x0d, 0x97, 0x4b, 0x6e, 0xb8, 0x24, - 0x76, 0x08, 0x60, 0x90, 0x74, 0xe9, 0xac, 0xd0, 0xaa, 0x9b, 0xb8, 0x43, 0x9a, 0x77, 0x92, 0xbc, - 0xce, 0x2b, 0x4d, 0x24, 0xd6, 0x25, 0x76, 0x33, 0x23, 0xc9, 0xeb, 0x65, 0x87, 0x92, 0xd7, 0x00, - 0xa6, 0xef, 0xd7, 0x5e, 0x72, 0xcb, 0xd9, 0x3e, 0x0c, 0xd7, 0xae, 0x78, 0x40, 0x1b, 0x07, 0xf3, - 0x70, 0x31, 0x5d, 0x75, 0x27, 0x76, 0x00, 0x23, 0x29, 0x54, 0x4e, 0x88, 0xaa, 0xbb, 0xe8, 0x8f, - 0x14, 0x2a, 0x43, 0x54, 0x5e, 0xf1, 0xba, 0x55, 0x61, 0xa7, 0x78, 0xed, 0xd5, 0x7f, 0x08, 0xc9, - 0xc9, 0xb8, 0xef, 0x69, 0xf3, 0xc8, 0x4e, 0xe0, 0x2f, 0x39, 0x99, 0xeb, 0x4d, 0x4e, 0x8f, 0x8e, - 0x1b, 0xa4, 0x78, 0xe0, 0xe5, 0x84, 0x9c, 0xbc, 0xd9, 0x64, 0x2d, 0x63, 0x7b, 0x30, 0x28, 0xb4, - 0x53, 0x36, 0x1e, 0x7a, 0xd9, 0x1e, 0x92, 0x97, 0x00, 0xa2, 0x8b, 0x52, 0xa0, 0xb2, 0xed, 0x47, - 0x3f, 0x87, 0x71, 0xc9, 0x2d, 0xaa, 0x42, 0x20, 0xf9, 0xfd, 0xa3, 0xd3, 0x59, 0xba, 0xdb, 0x52, - 0xfa, 0x69, 0xb7, 0xd5, 0x47, 0xfa, 0x5b, 0x5f, 0xbd, 0x5f, 0xfa, 0x0a, 0x7f, 0xee, 0xab, 0xff, - 0xb5, 0xaf, 0xf5, 0xd0, 0xff, 0x34, 0x67, 0x6f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xea, 0x75, 0x34, - 0x90, 0x43, 0x02, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto b/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto deleted file mode 100644 index baf3610f3..000000000 --- a/vendor/google.golang.org/grpc/benchmark/grpc_testing/stats.proto +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package grpc.testing; - -message ServerStats { - // wall clock time change in seconds since last reset - double time_elapsed = 1; - - // change in user time (in seconds) used by the server since last reset - double time_user = 2; - - // change in server time (in seconds) used by the server process and all - // threads since last reset - double time_system = 3; -} - -// Histogram params based on grpc/support/histogram.c -message HistogramParams { - double resolution = 1; // first bucket is [0, 1 + resolution) - double max_possible = 2; // use enough buckets to allow this value -} - -// Histogram data based on grpc/support/histogram.c -message HistogramData { - repeated uint32 bucket = 1; - double min_seen = 2; - double max_seen = 3; - double sum = 4; - double sum_of_squares = 5; - double count = 6; -} - -message ClientStats { - // Latency histogram. Data points are in nanoseconds. - HistogramData latencies = 1; - - // See ServerStats for details. - double time_elapsed = 2; - double time_user = 3; - double time_system = 4; -} diff --git a/vendor/google.golang.org/grpc/benchmark/latency/latency.go b/vendor/google.golang.org/grpc/benchmark/latency/latency.go deleted file mode 100644 index 5839a5c44..000000000 --- a/vendor/google.golang.org/grpc/benchmark/latency/latency.go +++ /dev/null @@ -1,316 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package latency provides wrappers for net.Conn, net.Listener, and -// net.Dialers, designed to interoperate to inject real-world latency into -// network connections. -package latency - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "net" - "time" - - "golang.org/x/net/context" -) - -// Dialer is a function matching the signature of net.Dial. -type Dialer func(network, address string) (net.Conn, error) - -// TimeoutDialer is a function matching the signature of net.DialTimeout. -type TimeoutDialer func(network, address string, timeout time.Duration) (net.Conn, error) - -// ContextDialer is a function matching the signature of -// net.Dialer.DialContext. -type ContextDialer func(ctx context.Context, network, address string) (net.Conn, error) - -// Network represents a network with the given bandwidth, latency, and MTU -// (Maximum Transmission Unit) configuration, and can produce wrappers of -// net.Listeners, net.Conn, and various forms of dialing functions. The -// Listeners and Dialers/Conns on both sides of connections must come from this -// package, but need not be created from the same Network. Latency is computed -// when sending (in Write), and is injected when receiving (in Read). This -// allows senders' Write calls to be non-blocking, as in real-world -// applications. -// -// Note: Latency is injected by the sender specifying the absolute time data -// should be available, and the reader delaying until that time arrives to -// provide the data. This package attempts to counter-act the effects of clock -// drift and existing network latency by measuring the delay between the -// sender's transmission time and the receiver's reception time during startup. -// No attempt is made to measure the existing bandwidth of the connection. -type Network struct { - Kbps int // Kilobits per second; if non-positive, infinite - Latency time.Duration // One-way latency (sending); if non-positive, no delay - MTU int // Bytes per packet; if non-positive, infinite -} - -var ( - //Local simulates local network. - Local = Network{0, 0, 0} - //LAN simulates local area network network. - LAN = Network{100 * 1024, 2 * time.Millisecond, 1500} - //WAN simulates wide area network. - WAN = Network{20 * 1024, 30 * time.Millisecond, 1500} - //Longhaul simulates bad network. - Longhaul = Network{1000 * 1024, 200 * time.Millisecond, 9000} -) - -// Conn returns a net.Conn that wraps c and injects n's latency into that -// connection. This function also imposes latency for connection creation. -// If n's Latency is lower than the measured latency in c, an error is -// returned. -func (n *Network) Conn(c net.Conn) (net.Conn, error) { - start := now() - nc := &conn{Conn: c, network: n, readBuf: new(bytes.Buffer)} - if err := nc.sync(); err != nil { - return nil, err - } - sleep(start.Add(nc.delay).Sub(now())) - return nc, nil -} - -type conn struct { - net.Conn - network *Network - - readBuf *bytes.Buffer // one packet worth of data received - lastSendEnd time.Time // time the previous Write should be fully on the wire - delay time.Duration // desired latency - measured latency -} - -// header is sent before all data transmitted by the application. -type header struct { - ReadTime int64 // Time the reader is allowed to read this packet (UnixNano) - Sz int32 // Size of the data in the packet -} - -func (c *conn) Write(p []byte) (n int, err error) { - tNow := now() - if c.lastSendEnd.Before(tNow) { - c.lastSendEnd = tNow - } - for len(p) > 0 { - pkt := p - if c.network.MTU > 0 && len(pkt) > c.network.MTU { - pkt = pkt[:c.network.MTU] - p = p[c.network.MTU:] - } else { - p = nil - } - if c.network.Kbps > 0 { - if congestion := c.lastSendEnd.Sub(tNow) - c.delay; congestion > 0 { - // The network is full; sleep until this packet can be sent. - sleep(congestion) - tNow = tNow.Add(congestion) - } - } - c.lastSendEnd = c.lastSendEnd.Add(c.network.pktTime(len(pkt))) - hdr := header{ReadTime: c.lastSendEnd.Add(c.delay).UnixNano(), Sz: int32(len(pkt))} - if err := binary.Write(c.Conn, binary.BigEndian, hdr); err != nil { - return n, err - } - x, err := c.Conn.Write(pkt) - n += x - if err != nil { - return n, err - } - } - return n, nil -} - -func (c *conn) Read(p []byte) (n int, err error) { - if c.readBuf.Len() == 0 { - var hdr header - if err := binary.Read(c.Conn, binary.BigEndian, &hdr); err != nil { - return 0, err - } - defer func() { sleep(time.Unix(0, hdr.ReadTime).Sub(now())) }() - - if _, err := io.CopyN(c.readBuf, c.Conn, int64(hdr.Sz)); err != nil { - return 0, err - } - } - // Read from readBuf. - return c.readBuf.Read(p) -} - -// sync does a handshake and then measures the latency on the network in -// coordination with the other side. -func (c *conn) sync() error { - const ( - pingMsg = "syncPing" - warmup = 10 // minimum number of iterations to measure latency - giveUp = 50 // maximum number of iterations to measure latency - accuracy = time.Millisecond // req'd accuracy to stop early - goodRun = 3 // stop early if latency within accuracy this many times - ) - - type syncMsg struct { - SendT int64 // Time sent. If zero, stop. - RecvT int64 // Time received. If zero, fill in and respond. - } - - // A trivial handshake - if err := binary.Write(c.Conn, binary.BigEndian, []byte(pingMsg)); err != nil { - return err - } - var ping [8]byte - if err := binary.Read(c.Conn, binary.BigEndian, &ping); err != nil { - return err - } else if string(ping[:]) != pingMsg { - return fmt.Errorf("malformed handshake message: %v (want %q)", ping, pingMsg) - } - - // Both sides are alive and syncing. Calculate network delay / clock skew. - att := 0 - good := 0 - var latency time.Duration - localDone, remoteDone := false, false - send := true - for !localDone || !remoteDone { - if send { - if err := binary.Write(c.Conn, binary.BigEndian, syncMsg{SendT: now().UnixNano()}); err != nil { - return err - } - att++ - send = false - } - - // Block until we get a syncMsg - m := syncMsg{} - if err := binary.Read(c.Conn, binary.BigEndian, &m); err != nil { - return err - } - - if m.RecvT == 0 { - // Message initiated from other side. - if m.SendT == 0 { - remoteDone = true - continue - } - // Send response. - m.RecvT = now().UnixNano() - if err := binary.Write(c.Conn, binary.BigEndian, m); err != nil { - return err - } - continue - } - - lag := time.Duration(m.RecvT - m.SendT) - latency += lag - avgLatency := latency / time.Duration(att) - if e := lag - avgLatency; e > -accuracy && e < accuracy { - good++ - } else { - good = 0 - } - if att < giveUp && (att < warmup || good < goodRun) { - send = true - continue - } - localDone = true - latency = avgLatency - // Tell the other side we're done. - if err := binary.Write(c.Conn, binary.BigEndian, syncMsg{}); err != nil { - return err - } - } - if c.network.Latency <= 0 { - return nil - } - c.delay = c.network.Latency - latency - if c.delay < 0 { - return fmt.Errorf("measured network latency (%v) higher than desired latency (%v)", latency, c.network.Latency) - } - return nil -} - -// Listener returns a net.Listener that wraps l and injects n's latency in its -// connections. -func (n *Network) Listener(l net.Listener) net.Listener { - return &listener{Listener: l, network: n} -} - -type listener struct { - net.Listener - network *Network -} - -func (l *listener) Accept() (net.Conn, error) { - c, err := l.Listener.Accept() - if err != nil { - return nil, err - } - return l.network.Conn(c) -} - -// Dialer returns a Dialer that wraps d and injects n's latency in its -// connections. n's Latency is also injected to the connection's creation. -func (n *Network) Dialer(d Dialer) Dialer { - return func(network, address string) (net.Conn, error) { - conn, err := d(network, address) - if err != nil { - return nil, err - } - return n.Conn(conn) - } -} - -// TimeoutDialer returns a TimeoutDialer that wraps d and injects n's latency -// in its connections. n's Latency is also injected to the connection's -// creation. -func (n *Network) TimeoutDialer(d TimeoutDialer) TimeoutDialer { - return func(network, address string, timeout time.Duration) (net.Conn, error) { - conn, err := d(network, address, timeout) - if err != nil { - return nil, err - } - return n.Conn(conn) - } -} - -// ContextDialer returns a ContextDialer that wraps d and injects n's latency -// in its connections. n's Latency is also injected to the connection's -// creation. -func (n *Network) ContextDialer(d ContextDialer) ContextDialer { - return func(ctx context.Context, network, address string) (net.Conn, error) { - conn, err := d(ctx, network, address) - if err != nil { - return nil, err - } - return n.Conn(conn) - } -} - -// pktTime returns the time it takes to transmit one packet of data of size b -// in bytes. -func (n *Network) pktTime(b int) time.Duration { - if n.Kbps <= 0 { - return time.Duration(0) - } - return time.Duration(b) * time.Second / time.Duration(n.Kbps*(1024/8)) -} - -// Wrappers for testing - -var now = time.Now -var sleep = time.Sleep diff --git a/vendor/google.golang.org/grpc/benchmark/server/main.go b/vendor/google.golang.org/grpc/benchmark/server/main.go deleted file mode 100644 index dcce130e4..000000000 --- a/vendor/google.golang.org/grpc/benchmark/server/main.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "flag" - "math" - "net" - "net/http" - _ "net/http/pprof" - "time" - - "google.golang.org/grpc/benchmark" - "google.golang.org/grpc/grpclog" -) - -var ( - duration = flag.Int("duration", math.MaxInt32, "The duration in seconds to run the benchmark server") -) - -func main() { - flag.Parse() - go func() { - lis, err := net.Listen("tcp", ":0") - if err != nil { - grpclog.Fatalf("Failed to listen: %v", err) - } - grpclog.Println("Server profiling address: ", lis.Addr().String()) - if err := http.Serve(lis, nil); err != nil { - grpclog.Fatalf("Failed to serve: %v", err) - } - }() - addr, stopper := benchmark.StartServer(benchmark.ServerInfo{Addr: ":0", Type: "protobuf"}) // listen on all interfaces - grpclog.Println("Server Address: ", addr) - <-time.After(time.Duration(*duration) * time.Second) - stopper() -} diff --git a/vendor/google.golang.org/grpc/benchmark/stats/histogram.go b/vendor/google.golang.org/grpc/benchmark/stats/histogram.go deleted file mode 100644 index f038d26ed..000000000 --- a/vendor/google.golang.org/grpc/benchmark/stats/histogram.go +++ /dev/null @@ -1,222 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package stats - -import ( - "bytes" - "fmt" - "io" - "log" - "math" - "strconv" - "strings" -) - -// Histogram accumulates values in the form of a histogram with -// exponentially increased bucket sizes. -type Histogram struct { - // Count is the total number of values added to the histogram. - Count int64 - // Sum is the sum of all the values added to the histogram. - Sum int64 - // SumOfSquares is the sum of squares of all values. - SumOfSquares int64 - // Min is the minimum of all the values added to the histogram. - Min int64 - // Max is the maximum of all the values added to the histogram. - Max int64 - // Buckets contains all the buckets of the histogram. - Buckets []HistogramBucket - - opts HistogramOptions - logBaseBucketSize float64 - oneOverLogOnePlusGrowthFactor float64 -} - -// HistogramOptions contains the parameters that define the histogram's buckets. -// The first bucket of the created histogram (with index 0) contains [min, min+n) -// where n = BaseBucketSize, min = MinValue. -// Bucket i (i>=1) contains [min + n * m^(i-1), min + n * m^i), where m = 1+GrowthFactor. -// The type of the values is int64. -type HistogramOptions struct { - // NumBuckets is the number of buckets. - NumBuckets int - // GrowthFactor is the growth factor of the buckets. A value of 0.1 - // indicates that bucket N+1 will be 10% larger than bucket N. - GrowthFactor float64 - // BaseBucketSize is the size of the first bucket. - BaseBucketSize float64 - // MinValue is the lower bound of the first bucket. - MinValue int64 -} - -// HistogramBucket represents one histogram bucket. -type HistogramBucket struct { - // LowBound is the lower bound of the bucket. - LowBound float64 - // Count is the number of values in the bucket. - Count int64 -} - -// NewHistogram returns a pointer to a new Histogram object that was created -// with the provided options. -func NewHistogram(opts HistogramOptions) *Histogram { - if opts.NumBuckets == 0 { - opts.NumBuckets = 32 - } - if opts.BaseBucketSize == 0.0 { - opts.BaseBucketSize = 1.0 - } - h := Histogram{ - Buckets: make([]HistogramBucket, opts.NumBuckets), - Min: math.MaxInt64, - Max: math.MinInt64, - - opts: opts, - logBaseBucketSize: math.Log(opts.BaseBucketSize), - oneOverLogOnePlusGrowthFactor: 1 / math.Log(1+opts.GrowthFactor), - } - m := 1.0 + opts.GrowthFactor - delta := opts.BaseBucketSize - h.Buckets[0].LowBound = float64(opts.MinValue) - for i := 1; i < opts.NumBuckets; i++ { - h.Buckets[i].LowBound = float64(opts.MinValue) + delta - delta = delta * m - } - return &h -} - -// Print writes textual output of the histogram values. -func (h *Histogram) Print(w io.Writer) { - h.PrintWithUnit(w, 1) -} - -// PrintWithUnit writes textual output of the histogram values . -// Data in histogram is divided by a Unit before print. -func (h *Histogram) PrintWithUnit(w io.Writer, unit float64) { - avg := float64(h.Sum) / float64(h.Count) - fmt.Fprintf(w, "Count: %d Min: %5.1f Max: %5.1f Avg: %.2f\n", h.Count, float64(h.Min)/unit, float64(h.Max)/unit, avg/unit) - fmt.Fprintf(w, "%s\n", strings.Repeat("-", 60)) - if h.Count <= 0 { - return - } - - maxBucketDigitLen := len(strconv.FormatFloat(h.Buckets[len(h.Buckets)-1].LowBound, 'f', 6, 64)) - if maxBucketDigitLen < 3 { - // For "inf". - maxBucketDigitLen = 3 - } - maxCountDigitLen := len(strconv.FormatInt(h.Count, 10)) - percentMulti := 100 / float64(h.Count) - - accCount := int64(0) - for i, b := range h.Buckets { - fmt.Fprintf(w, "[%*f, ", maxBucketDigitLen, b.LowBound/unit) - if i+1 < len(h.Buckets) { - fmt.Fprintf(w, "%*f)", maxBucketDigitLen, h.Buckets[i+1].LowBound/unit) - } else { - fmt.Fprintf(w, "%*s)", maxBucketDigitLen, "inf") - } - - accCount += b.Count - fmt.Fprintf(w, " %*d %5.1f%% %5.1f%%", maxCountDigitLen, b.Count, float64(b.Count)*percentMulti, float64(accCount)*percentMulti) - - const barScale = 0.1 - barLength := int(float64(b.Count)*percentMulti*barScale + 0.5) - fmt.Fprintf(w, " %s\n", strings.Repeat("#", barLength)) - } -} - -// String returns the textual output of the histogram values as string. -func (h *Histogram) String() string { - var b bytes.Buffer - h.Print(&b) - return b.String() -} - -// Clear resets all the content of histogram. -func (h *Histogram) Clear() { - h.Count = 0 - h.Sum = 0 - h.SumOfSquares = 0 - h.Min = math.MaxInt64 - h.Max = math.MinInt64 - for i := range h.Buckets { - h.Buckets[i].Count = 0 - } -} - -// Opts returns a copy of the options used to create the Histogram. -func (h *Histogram) Opts() HistogramOptions { - return h.opts -} - -// Add adds a value to the histogram. -func (h *Histogram) Add(value int64) error { - bucket, err := h.findBucket(value) - if err != nil { - return err - } - h.Buckets[bucket].Count++ - h.Count++ - h.Sum += value - h.SumOfSquares += value * value - if value < h.Min { - h.Min = value - } - if value > h.Max { - h.Max = value - } - return nil -} - -func (h *Histogram) findBucket(value int64) (int, error) { - delta := float64(value - h.opts.MinValue) - var b int - if delta >= h.opts.BaseBucketSize { - // b = log_{1+growthFactor} (delta / baseBucketSize) + 1 - // = log(delta / baseBucketSize) / log(1+growthFactor) + 1 - // = (log(delta) - log(baseBucketSize)) * (1 / log(1+growthFactor)) + 1 - b = int((math.Log(delta)-h.logBaseBucketSize)*h.oneOverLogOnePlusGrowthFactor + 1) - } - if b >= len(h.Buckets) { - return 0, fmt.Errorf("no bucket for value: %d", value) - } - return b, nil -} - -// Merge takes another histogram h2, and merges its content into h. -// The two histograms must be created by equivalent HistogramOptions. -func (h *Histogram) Merge(h2 *Histogram) { - if h.opts != h2.opts { - log.Fatalf("failed to merge histograms, created by inequivalent options") - } - h.Count += h2.Count - h.Sum += h2.Sum - h.SumOfSquares += h2.SumOfSquares - if h2.Min < h.Min { - h.Min = h2.Min - } - if h2.Max > h.Max { - h.Max = h2.Max - } - for i, b := range h2.Buckets { - h.Buckets[i].Count += b.Count - } -} diff --git a/vendor/google.golang.org/grpc/benchmark/stats/stats.go b/vendor/google.golang.org/grpc/benchmark/stats/stats.go deleted file mode 100644 index 412daead0..000000000 --- a/vendor/google.golang.org/grpc/benchmark/stats/stats.go +++ /dev/null @@ -1,291 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package stats - -import ( - "bytes" - "fmt" - "io" - "math" - "sort" - "strconv" - "time" -) - -// Features contains most fields for a benchmark -type Features struct { - NetworkMode string - EnableTrace bool - Latency time.Duration - Kbps int - Mtu int - MaxConcurrentCalls int - ReqSizeBytes int - RespSizeBytes int - EnableCompressor bool -} - -// String returns the textual output of the Features as string. -func (f Features) String() string { - return fmt.Sprintf("traceMode_%t-latency_%s-kbps_%#v-MTU_%#v-maxConcurrentCalls_"+ - "%#v-reqSize_%#vB-respSize_%#vB-Compressor_%t", f.EnableTrace, - f.Latency.String(), f.Kbps, f.Mtu, f.MaxConcurrentCalls, f.ReqSizeBytes, f.RespSizeBytes, f.EnableCompressor) -} - -// PartialPrintString can print certain features with different format. -func PartialPrintString(noneEmptyPos []bool, f Features, shared bool) string { - s := "" - var ( - prefix, suffix, linker string - isNetwork bool - ) - if shared { - suffix = "\n" - linker = ": " - } else { - prefix = "-" - linker = "_" - } - if noneEmptyPos[0] { - s += fmt.Sprintf("%sTrace%s%t%s", prefix, linker, f.EnableCompressor, suffix) - } - if shared && f.NetworkMode != "" { - s += fmt.Sprintf("Network: %s \n", f.NetworkMode) - isNetwork = true - } - if !isNetwork { - if noneEmptyPos[1] { - s += fmt.Sprintf("%slatency%s%s%s", prefix, linker, f.Latency.String(), suffix) - } - if noneEmptyPos[2] { - s += fmt.Sprintf("%skbps%s%#v%s", prefix, linker, f.Kbps, suffix) - } - if noneEmptyPos[3] { - s += fmt.Sprintf("%sMTU%s%#v%s", prefix, linker, f.Mtu, suffix) - } - } - if noneEmptyPos[4] { - s += fmt.Sprintf("%sCallers%s%#v%s", prefix, linker, f.MaxConcurrentCalls, suffix) - } - if noneEmptyPos[5] { - s += fmt.Sprintf("%sreqSize%s%#vB%s", prefix, linker, f.ReqSizeBytes, suffix) - } - if noneEmptyPos[6] { - s += fmt.Sprintf("%srespSize%s%#vB%s", prefix, linker, f.RespSizeBytes, suffix) - } - if noneEmptyPos[7] { - s += fmt.Sprintf("%sCompressor%s%t%s", prefix, linker, f.EnableCompressor, suffix) - } - return s -} - -type percentLatency struct { - Percent int - Value time.Duration -} - -// BenchResults records features and result of a benchmark. -type BenchResults struct { - RunMode string - Features Features - Latency []percentLatency - Operations int - NsPerOp int64 - AllocedBytesPerOp int64 - AllocsPerOp int64 - SharedPosion []bool -} - -// SetBenchmarkResult sets features of benchmark and basic results. -func (stats *Stats) SetBenchmarkResult(mode string, features Features, o int, allocdBytes, allocs int64, sharedPos []bool) { - stats.result.RunMode = mode - stats.result.Features = features - stats.result.Operations = o - stats.result.AllocedBytesPerOp = allocdBytes - stats.result.AllocsPerOp = allocs - stats.result.SharedPosion = sharedPos -} - -// GetBenchmarkResults returns the result of the benchmark including features and result. -func (stats *Stats) GetBenchmarkResults() BenchResults { - return stats.result -} - -// BenchString output latency stats as the format as time + unit. -func (stats *Stats) BenchString() string { - stats.maybeUpdate() - s := stats.result - res := s.RunMode + "-" + s.Features.String() + ": \n" - if len(s.Latency) != 0 { - var statsUnit = s.Latency[0].Value - var timeUnit = fmt.Sprintf("%v", statsUnit)[1:] - for i := 1; i < len(s.Latency)-1; i++ { - res += fmt.Sprintf("%d_Latency: %s %s \t", s.Latency[i].Percent, - strconv.FormatFloat(float64(s.Latency[i].Value)/float64(statsUnit), 'f', 4, 64), timeUnit) - } - res += fmt.Sprintf("Avg latency: %s %s \t", - strconv.FormatFloat(float64(s.Latency[len(s.Latency)-1].Value)/float64(statsUnit), 'f', 4, 64), timeUnit) - } - res += fmt.Sprintf("Count: %v \t", s.Operations) - res += fmt.Sprintf("%v Bytes/op\t", s.AllocedBytesPerOp) - res += fmt.Sprintf("%v Allocs/op\t", s.AllocsPerOp) - - return res -} - -// Stats is a simple helper for gathering additional statistics like histogram -// during benchmarks. This is not thread safe. -type Stats struct { - numBuckets int - unit time.Duration - min, max int64 - histogram *Histogram - - durations durationSlice - dirty bool - - sortLatency bool - result BenchResults -} - -type durationSlice []time.Duration - -// NewStats creates a new Stats instance. If numBuckets is not positive, -// the default value (16) will be used. -func NewStats(numBuckets int) *Stats { - if numBuckets <= 0 { - numBuckets = 16 - } - return &Stats{ - // Use one more bucket for the last unbounded bucket. - numBuckets: numBuckets + 1, - durations: make(durationSlice, 0, 100000), - } -} - -// Add adds an elapsed time per operation to the stats. -func (stats *Stats) Add(d time.Duration) { - stats.durations = append(stats.durations, d) - stats.dirty = true -} - -// Clear resets the stats, removing all values. -func (stats *Stats) Clear() { - stats.durations = stats.durations[:0] - stats.histogram = nil - stats.dirty = false - stats.result = BenchResults{} -} - -//Sort method for durations -func (a durationSlice) Len() int { return len(a) } -func (a durationSlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a durationSlice) Less(i, j int) bool { return a[i] < a[j] } -func max(a, b int64) int64 { - if a > b { - return a - } - return b -} - -// maybeUpdate updates internal stat data if there was any newly added -// stats since this was updated. -func (stats *Stats) maybeUpdate() { - if !stats.dirty { - return - } - - if stats.sortLatency { - sort.Sort(stats.durations) - stats.min = int64(stats.durations[0]) - stats.max = int64(stats.durations[len(stats.durations)-1]) - } - - stats.min = math.MaxInt64 - stats.max = 0 - for _, d := range stats.durations { - if stats.min > int64(d) { - stats.min = int64(d) - } - if stats.max < int64(d) { - stats.max = int64(d) - } - } - - // Use the largest unit that can represent the minimum time duration. - stats.unit = time.Nanosecond - for _, u := range []time.Duration{time.Microsecond, time.Millisecond, time.Second} { - if stats.min <= int64(u) { - break - } - stats.unit = u - } - - numBuckets := stats.numBuckets - if n := int(stats.max - stats.min + 1); n < numBuckets { - numBuckets = n - } - stats.histogram = NewHistogram(HistogramOptions{ - NumBuckets: numBuckets, - // max-min(lower bound of last bucket) = (1 + growthFactor)^(numBuckets-2) * baseBucketSize. - GrowthFactor: math.Pow(float64(stats.max-stats.min), 1/float64(numBuckets-2)) - 1, - BaseBucketSize: 1.0, - MinValue: stats.min}) - - for _, d := range stats.durations { - stats.histogram.Add(int64(d)) - } - - stats.dirty = false - - if stats.durations.Len() != 0 { - var percentToObserve = []int{50, 90} - // First data record min unit from the latency result. - stats.result.Latency = append(stats.result.Latency, percentLatency{Percent: -1, Value: stats.unit}) - for _, position := range percentToObserve { - stats.result.Latency = append(stats.result.Latency, percentLatency{Percent: position, Value: stats.durations[max(stats.histogram.Count*int64(position)/100-1, 0)]}) - } - // Last data record the average latency. - avg := float64(stats.histogram.Sum) / float64(stats.histogram.Count) - stats.result.Latency = append(stats.result.Latency, percentLatency{Percent: -1, Value: time.Duration(avg)}) - } -} - -// SortLatency blocks the output -func (stats *Stats) SortLatency() { - stats.sortLatency = true -} - -// Print writes textual output of the Stats. -func (stats *Stats) Print(w io.Writer) { - stats.maybeUpdate() - if stats.histogram == nil { - fmt.Fprint(w, "Histogram (empty)\n") - } else { - fmt.Fprintf(w, "Histogram (unit: %s)\n", fmt.Sprintf("%v", stats.unit)[1:]) - stats.histogram.PrintWithUnit(w, float64(stats.unit)) - } -} - -// String returns the textual output of the Stats as string. -func (stats *Stats) String() string { - var b bytes.Buffer - stats.Print(&b) - return b.String() -} diff --git a/vendor/google.golang.org/grpc/benchmark/stats/util.go b/vendor/google.golang.org/grpc/benchmark/stats/util.go deleted file mode 100644 index f3bb3a364..000000000 --- a/vendor/google.golang.org/grpc/benchmark/stats/util.go +++ /dev/null @@ -1,208 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package stats - -import ( - "bufio" - "bytes" - "fmt" - "os" - "runtime" - "sort" - "strings" - "sync" - "testing" -) - -var ( - curB *testing.B - curBenchName string - curStats map[string]*Stats - - orgStdout *os.File - nextOutPos int - - injectCond *sync.Cond - injectDone chan struct{} -) - -// AddStats adds a new unnamed Stats instance to the current benchmark. You need -// to run benchmarks by calling RunTestMain() to inject the stats to the -// benchmark results. If numBuckets is not positive, the default value (16) will -// be used. Please note that this calls b.ResetTimer() since it may be blocked -// until the previous benchmark stats is printed out. So AddStats() should -// typically be called at the very beginning of each benchmark function. -func AddStats(b *testing.B, numBuckets int) *Stats { - return AddStatsWithName(b, "", numBuckets) -} - -// AddStatsWithName adds a new named Stats instance to the current benchmark. -// With this, you can add multiple stats in a single benchmark. You need -// to run benchmarks by calling RunTestMain() to inject the stats to the -// benchmark results. If numBuckets is not positive, the default value (16) will -// be used. Please note that this calls b.ResetTimer() since it may be blocked -// until the previous benchmark stats is printed out. So AddStatsWithName() -// should typically be called at the very beginning of each benchmark function. -func AddStatsWithName(b *testing.B, name string, numBuckets int) *Stats { - var benchName string - for i := 1; ; i++ { - pc, _, _, ok := runtime.Caller(i) - if !ok { - panic("benchmark function not found") - } - p := strings.Split(runtime.FuncForPC(pc).Name(), ".") - benchName = p[len(p)-1] - if strings.HasPrefix(benchName, "run") { - break - } - } - procs := runtime.GOMAXPROCS(-1) - if procs != 1 { - benchName = fmt.Sprintf("%s-%d", benchName, procs) - } - - stats := NewStats(numBuckets) - - if injectCond != nil { - // We need to wait until the previous benchmark stats is printed out. - injectCond.L.Lock() - for curB != nil && curBenchName != benchName { - injectCond.Wait() - } - - curB = b - curBenchName = benchName - curStats[name] = stats - - injectCond.L.Unlock() - } - - b.ResetTimer() - return stats -} - -// RunTestMain runs the tests with enabling injection of benchmark stats. It -// returns an exit code to pass to os.Exit. -func RunTestMain(m *testing.M) int { - startStatsInjector() - defer stopStatsInjector() - return m.Run() -} - -// startStatsInjector starts stats injection to benchmark results. -func startStatsInjector() { - orgStdout = os.Stdout - r, w, _ := os.Pipe() - os.Stdout = w - nextOutPos = 0 - - resetCurBenchStats() - - injectCond = sync.NewCond(&sync.Mutex{}) - injectDone = make(chan struct{}) - go func() { - defer close(injectDone) - - scanner := bufio.NewScanner(r) - scanner.Split(splitLines) - for scanner.Scan() { - injectStatsIfFinished(scanner.Text()) - } - if err := scanner.Err(); err != nil { - panic(err) - } - }() -} - -// stopStatsInjector stops stats injection and restores os.Stdout. -func stopStatsInjector() { - os.Stdout.Close() - <-injectDone - injectCond = nil - os.Stdout = orgStdout -} - -// splitLines is a split function for a bufio.Scanner that returns each line -// of text, teeing texts to the original stdout even before each line ends. -func splitLines(data []byte, eof bool) (advance int, token []byte, err error) { - if eof && len(data) == 0 { - return 0, nil, nil - } - - if i := bytes.IndexByte(data, '\n'); i >= 0 { - orgStdout.Write(data[nextOutPos : i+1]) - nextOutPos = 0 - return i + 1, data[0:i], nil - } - - orgStdout.Write(data[nextOutPos:]) - nextOutPos = len(data) - - if eof { - // This is a final, non-terminated line. Return it. - return len(data), data, nil - } - - return 0, nil, nil -} - -// injectStatsIfFinished prints out the stats if the current benchmark finishes. -func injectStatsIfFinished(line string) { - injectCond.L.Lock() - defer injectCond.L.Unlock() - // We assume that the benchmark results start with "Benchmark". - if curB == nil || !strings.HasPrefix(line, "Benchmark") { - return - } - - if !curB.Failed() { - // Output all stats in alphabetical order. - names := make([]string, 0, len(curStats)) - for name := range curStats { - names = append(names, name) - } - sort.Strings(names) - for _, name := range names { - stats := curStats[name] - // The output of stats starts with a header like "Histogram (unit: ms)" - // followed by statistical properties and the buckets. Add the stats name - // if it is a named stats and indent them as Go testing outputs. - lines := strings.Split(stats.String(), "\n") - if n := len(lines); n > 0 { - if name != "" { - name = ": " + name - } - fmt.Fprintf(orgStdout, "--- %s%s\n", lines[0], name) - for _, line := range lines[1 : n-1] { - fmt.Fprintf(orgStdout, "\t%s\n", line) - } - } - } - } - - resetCurBenchStats() - injectCond.Signal() -} - -// resetCurBenchStats resets the current benchmark stats. -func resetCurBenchStats() { - curB = nil - curBenchName = "" - curStats = make(map[string]*Stats) -} diff --git a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go deleted file mode 100644 index 9db1d8504..000000000 --- a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_client.go +++ /dev/null @@ -1,392 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "flag" - "math" - "runtime" - "sync" - "syscall" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/benchmark" - testpb "google.golang.org/grpc/benchmark/grpc_testing" - "google.golang.org/grpc/benchmark/stats" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/testdata" -) - -var ( - caFile = flag.String("ca_file", "", "The file containing the CA root cert file") -) - -type lockingHistogram struct { - mu sync.Mutex - histogram *stats.Histogram -} - -func (h *lockingHistogram) add(value int64) { - h.mu.Lock() - defer h.mu.Unlock() - h.histogram.Add(value) -} - -// swap sets h.histogram to new, and returns its old value. -func (h *lockingHistogram) swap(new *stats.Histogram) *stats.Histogram { - h.mu.Lock() - defer h.mu.Unlock() - old := h.histogram - h.histogram = new - return old -} - -func (h *lockingHistogram) mergeInto(merged *stats.Histogram) { - h.mu.Lock() - defer h.mu.Unlock() - merged.Merge(h.histogram) -} - -type benchmarkClient struct { - closeConns func() - stop chan bool - lastResetTime time.Time - histogramOptions stats.HistogramOptions - lockingHistograms []lockingHistogram - rusageLastReset *syscall.Rusage -} - -func printClientConfig(config *testpb.ClientConfig) { - // Some config options are ignored: - // - client type: - // will always create sync client - // - async client threads. - // - core list - grpclog.Printf(" * client type: %v (ignored, always creates sync client)", config.ClientType) - grpclog.Printf(" * async client threads: %v (ignored)", config.AsyncClientThreads) - // TODO: use cores specified by CoreList when setting list of cores is supported in go. - grpclog.Printf(" * core list: %v (ignored)", config.CoreList) - - grpclog.Printf(" - security params: %v", config.SecurityParams) - grpclog.Printf(" - core limit: %v", config.CoreLimit) - grpclog.Printf(" - payload config: %v", config.PayloadConfig) - grpclog.Printf(" - rpcs per chann: %v", config.OutstandingRpcsPerChannel) - grpclog.Printf(" - channel number: %v", config.ClientChannels) - grpclog.Printf(" - load params: %v", config.LoadParams) - grpclog.Printf(" - rpc type: %v", config.RpcType) - grpclog.Printf(" - histogram params: %v", config.HistogramParams) - grpclog.Printf(" - server targets: %v", config.ServerTargets) -} - -func setupClientEnv(config *testpb.ClientConfig) { - // Use all cpu cores available on machine by default. - // TODO: Revisit this for the optimal default setup. - if config.CoreLimit > 0 { - runtime.GOMAXPROCS(int(config.CoreLimit)) - } else { - runtime.GOMAXPROCS(runtime.NumCPU()) - } -} - -// createConns creates connections according to given config. -// It returns the connections and corresponding function to close them. -// It returns non-nil error if there is anything wrong. -func createConns(config *testpb.ClientConfig) ([]*grpc.ClientConn, func(), error) { - var opts []grpc.DialOption - - // Sanity check for client type. - switch config.ClientType { - case testpb.ClientType_SYNC_CLIENT: - case testpb.ClientType_ASYNC_CLIENT: - default: - return nil, nil, grpc.Errorf(codes.InvalidArgument, "unknow client type: %v", config.ClientType) - } - - // Check and set security options. - if config.SecurityParams != nil { - if *caFile == "" { - *caFile = testdata.Path("ca.pem") - } - creds, err := credentials.NewClientTLSFromFile(*caFile, config.SecurityParams.ServerHostOverride) - if err != nil { - return nil, nil, grpc.Errorf(codes.InvalidArgument, "failed to create TLS credentials %v", err) - } - opts = append(opts, grpc.WithTransportCredentials(creds)) - } else { - opts = append(opts, grpc.WithInsecure()) - } - - // Use byteBufCodec if it is required. - if config.PayloadConfig != nil { - switch config.PayloadConfig.Payload.(type) { - case *testpb.PayloadConfig_BytebufParams: - opts = append(opts, grpc.WithCodec(byteBufCodec{})) - case *testpb.PayloadConfig_SimpleParams: - default: - return nil, nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig) - } - } - - // Create connections. - connCount := int(config.ClientChannels) - conns := make([]*grpc.ClientConn, connCount, connCount) - for connIndex := 0; connIndex < connCount; connIndex++ { - conns[connIndex] = benchmark.NewClientConn(config.ServerTargets[connIndex%len(config.ServerTargets)], opts...) - } - - return conns, func() { - for _, conn := range conns { - conn.Close() - } - }, nil -} - -func performRPCs(config *testpb.ClientConfig, conns []*grpc.ClientConn, bc *benchmarkClient) error { - // Read payload size and type from config. - var ( - payloadReqSize, payloadRespSize int - payloadType string - ) - if config.PayloadConfig != nil { - switch c := config.PayloadConfig.Payload.(type) { - case *testpb.PayloadConfig_BytebufParams: - payloadReqSize = int(c.BytebufParams.ReqSize) - payloadRespSize = int(c.BytebufParams.RespSize) - payloadType = "bytebuf" - case *testpb.PayloadConfig_SimpleParams: - payloadReqSize = int(c.SimpleParams.ReqSize) - payloadRespSize = int(c.SimpleParams.RespSize) - payloadType = "protobuf" - default: - return grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig) - } - } - - // TODO add open loop distribution. - switch config.LoadParams.Load.(type) { - case *testpb.LoadParams_ClosedLoop: - case *testpb.LoadParams_Poisson: - return grpc.Errorf(codes.Unimplemented, "unsupported load params: %v", config.LoadParams) - default: - return grpc.Errorf(codes.InvalidArgument, "unknown load params: %v", config.LoadParams) - } - - rpcCountPerConn := int(config.OutstandingRpcsPerChannel) - - switch config.RpcType { - case testpb.RpcType_UNARY: - bc.doCloseLoopUnary(conns, rpcCountPerConn, payloadReqSize, payloadRespSize) - // TODO open loop. - case testpb.RpcType_STREAMING: - bc.doCloseLoopStreaming(conns, rpcCountPerConn, payloadReqSize, payloadRespSize, payloadType) - // TODO open loop. - default: - return grpc.Errorf(codes.InvalidArgument, "unknown rpc type: %v", config.RpcType) - } - - return nil -} - -func startBenchmarkClient(config *testpb.ClientConfig) (*benchmarkClient, error) { - printClientConfig(config) - - // Set running environment like how many cores to use. - setupClientEnv(config) - - conns, closeConns, err := createConns(config) - if err != nil { - return nil, err - } - - rusage := new(syscall.Rusage) - syscall.Getrusage(syscall.RUSAGE_SELF, rusage) - - rpcCountPerConn := int(config.OutstandingRpcsPerChannel) - bc := &benchmarkClient{ - histogramOptions: stats.HistogramOptions{ - NumBuckets: int(math.Log(config.HistogramParams.MaxPossible)/math.Log(1+config.HistogramParams.Resolution)) + 1, - GrowthFactor: config.HistogramParams.Resolution, - BaseBucketSize: (1 + config.HistogramParams.Resolution), - MinValue: 0, - }, - lockingHistograms: make([]lockingHistogram, rpcCountPerConn*len(conns), rpcCountPerConn*len(conns)), - - stop: make(chan bool), - lastResetTime: time.Now(), - closeConns: closeConns, - rusageLastReset: rusage, - } - - if err = performRPCs(config, conns, bc); err != nil { - // Close all connections if performRPCs failed. - closeConns() - return nil, err - } - - return bc, nil -} - -func (bc *benchmarkClient) doCloseLoopUnary(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int) { - for ic, conn := range conns { - client := testpb.NewBenchmarkServiceClient(conn) - // For each connection, create rpcCountPerConn goroutines to do rpc. - for j := 0; j < rpcCountPerConn; j++ { - // Create histogram for each goroutine. - idx := ic*rpcCountPerConn + j - bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions) - // Start goroutine on the created mutex and histogram. - go func(idx int) { - // TODO: do warm up if necessary. - // Now relying on worker client to reserve time to do warm up. - // The worker client needs to wait for some time after client is created, - // before starting benchmark. - done := make(chan bool) - for { - go func() { - start := time.Now() - if err := benchmark.DoUnaryCall(client, reqSize, respSize); err != nil { - select { - case <-bc.stop: - case done <- false: - } - return - } - elapse := time.Since(start) - bc.lockingHistograms[idx].add(int64(elapse)) - select { - case <-bc.stop: - case done <- true: - } - }() - select { - case <-bc.stop: - return - case <-done: - } - } - }(idx) - } - } -} - -func (bc *benchmarkClient) doCloseLoopStreaming(conns []*grpc.ClientConn, rpcCountPerConn int, reqSize int, respSize int, payloadType string) { - var doRPC func(testpb.BenchmarkService_StreamingCallClient, int, int) error - if payloadType == "bytebuf" { - doRPC = benchmark.DoByteBufStreamingRoundTrip - } else { - doRPC = benchmark.DoStreamingRoundTrip - } - for ic, conn := range conns { - // For each connection, create rpcCountPerConn goroutines to do rpc. - for j := 0; j < rpcCountPerConn; j++ { - c := testpb.NewBenchmarkServiceClient(conn) - stream, err := c.StreamingCall(context.Background()) - if err != nil { - grpclog.Fatalf("%v.StreamingCall(_) = _, %v", c, err) - } - // Create histogram for each goroutine. - idx := ic*rpcCountPerConn + j - bc.lockingHistograms[idx].histogram = stats.NewHistogram(bc.histogramOptions) - // Start goroutine on the created mutex and histogram. - go func(idx int) { - // TODO: do warm up if necessary. - // Now relying on worker client to reserve time to do warm up. - // The worker client needs to wait for some time after client is created, - // before starting benchmark. - for { - start := time.Now() - if err := doRPC(stream, reqSize, respSize); err != nil { - return - } - elapse := time.Since(start) - bc.lockingHistograms[idx].add(int64(elapse)) - select { - case <-bc.stop: - return - default: - } - } - }(idx) - } - } -} - -// getStats returns the stats for benchmark client. -// It resets lastResetTime and all histograms if argument reset is true. -func (bc *benchmarkClient) getStats(reset bool) *testpb.ClientStats { - var wallTimeElapsed, uTimeElapsed, sTimeElapsed float64 - mergedHistogram := stats.NewHistogram(bc.histogramOptions) - latestRusage := new(syscall.Rusage) - - if reset { - // Merging histogram may take some time. - // Put all histograms aside and merge later. - toMerge := make([]*stats.Histogram, len(bc.lockingHistograms), len(bc.lockingHistograms)) - for i := range bc.lockingHistograms { - toMerge[i] = bc.lockingHistograms[i].swap(stats.NewHistogram(bc.histogramOptions)) - } - - for i := 0; i < len(toMerge); i++ { - mergedHistogram.Merge(toMerge[i]) - } - - wallTimeElapsed = time.Since(bc.lastResetTime).Seconds() - syscall.Getrusage(syscall.RUSAGE_SELF, latestRusage) - uTimeElapsed, sTimeElapsed = cpuTimeDiff(bc.rusageLastReset, latestRusage) - - bc.rusageLastReset = latestRusage - bc.lastResetTime = time.Now() - } else { - // Merge only, not reset. - for i := range bc.lockingHistograms { - bc.lockingHistograms[i].mergeInto(mergedHistogram) - } - - wallTimeElapsed = time.Since(bc.lastResetTime).Seconds() - syscall.Getrusage(syscall.RUSAGE_SELF, latestRusage) - uTimeElapsed, sTimeElapsed = cpuTimeDiff(bc.rusageLastReset, latestRusage) - } - - b := make([]uint32, len(mergedHistogram.Buckets), len(mergedHistogram.Buckets)) - for i, v := range mergedHistogram.Buckets { - b[i] = uint32(v.Count) - } - return &testpb.ClientStats{ - Latencies: &testpb.HistogramData{ - Bucket: b, - MinSeen: float64(mergedHistogram.Min), - MaxSeen: float64(mergedHistogram.Max), - Sum: float64(mergedHistogram.Sum), - SumOfSquares: float64(mergedHistogram.SumOfSquares), - Count: float64(mergedHistogram.Count), - }, - TimeElapsed: wallTimeElapsed, - TimeUser: uTimeElapsed, - TimeSystem: sTimeElapsed, - } -} - -func (bc *benchmarkClient) shutdown() { - close(bc.stop) - bc.closeConns() -} diff --git a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go b/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go deleted file mode 100644 index 238dfdebc..000000000 --- a/vendor/google.golang.org/grpc/benchmark/worker/benchmark_server.go +++ /dev/null @@ -1,184 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "flag" - "runtime" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/benchmark" - testpb "google.golang.org/grpc/benchmark/grpc_testing" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/testdata" -) - -var ( - certFile = flag.String("tls_cert_file", "", "The TLS cert file") - keyFile = flag.String("tls_key_file", "", "The TLS key file") -) - -type benchmarkServer struct { - port int - cores int - closeFunc func() - mu sync.RWMutex - lastResetTime time.Time - rusageLastReset *syscall.Rusage -} - -func printServerConfig(config *testpb.ServerConfig) { - // Some config options are ignored: - // - server type: - // will always start sync server - // - async server threads - // - core list - grpclog.Printf(" * server type: %v (ignored, always starts sync server)", config.ServerType) - grpclog.Printf(" * async server threads: %v (ignored)", config.AsyncServerThreads) - // TODO: use cores specified by CoreList when setting list of cores is supported in go. - grpclog.Printf(" * core list: %v (ignored)", config.CoreList) - - grpclog.Printf(" - security params: %v", config.SecurityParams) - grpclog.Printf(" - core limit: %v", config.CoreLimit) - grpclog.Printf(" - port: %v", config.Port) - grpclog.Printf(" - payload config: %v", config.PayloadConfig) -} - -func startBenchmarkServer(config *testpb.ServerConfig, serverPort int) (*benchmarkServer, error) { - printServerConfig(config) - - // Use all cpu cores available on machine by default. - // TODO: Revisit this for the optimal default setup. - numOfCores := runtime.NumCPU() - if config.CoreLimit > 0 { - numOfCores = int(config.CoreLimit) - } - runtime.GOMAXPROCS(numOfCores) - - var opts []grpc.ServerOption - - // Sanity check for server type. - switch config.ServerType { - case testpb.ServerType_SYNC_SERVER: - case testpb.ServerType_ASYNC_SERVER: - case testpb.ServerType_ASYNC_GENERIC_SERVER: - default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknow server type: %v", config.ServerType) - } - - // Set security options. - if config.SecurityParams != nil { - if *certFile == "" { - *certFile = testdata.Path("server1.pem") - } - if *keyFile == "" { - *keyFile = testdata.Path("server1.key") - } - creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) - if err != nil { - grpclog.Fatalf("failed to generate credentials %v", err) - } - opts = append(opts, grpc.Creds(creds)) - } - - // Priority: config.Port > serverPort > default (0). - port := int(config.Port) - if port == 0 { - port = serverPort - } - - // Create different benchmark server according to config. - var ( - addr string - closeFunc func() - err error - ) - if config.PayloadConfig != nil { - switch payload := config.PayloadConfig.Payload.(type) { - case *testpb.PayloadConfig_BytebufParams: - opts = append(opts, grpc.CustomCodec(byteBufCodec{})) - addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{ - Addr: ":" + strconv.Itoa(port), - Type: "bytebuf", - Metadata: payload.BytebufParams.RespSize, - }, opts...) - case *testpb.PayloadConfig_SimpleParams: - addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{ - Addr: ":" + strconv.Itoa(port), - Type: "protobuf", - }, opts...) - case *testpb.PayloadConfig_ComplexParams: - return nil, grpc.Errorf(codes.Unimplemented, "unsupported payload config: %v", config.PayloadConfig) - default: - return nil, grpc.Errorf(codes.InvalidArgument, "unknow payload config: %v", config.PayloadConfig) - } - } else { - // Start protobuf server if payload config is nil. - addr, closeFunc = benchmark.StartServer(benchmark.ServerInfo{ - Addr: ":" + strconv.Itoa(port), - Type: "protobuf", - }, opts...) - } - - grpclog.Printf("benchmark server listening at %v", addr) - addrSplitted := strings.Split(addr, ":") - p, err := strconv.Atoi(addrSplitted[len(addrSplitted)-1]) - if err != nil { - grpclog.Fatalf("failed to get port number from server address: %v", err) - } - - rusage := new(syscall.Rusage) - syscall.Getrusage(syscall.RUSAGE_SELF, rusage) - - return &benchmarkServer{ - port: p, - cores: numOfCores, - closeFunc: closeFunc, - lastResetTime: time.Now(), - rusageLastReset: rusage, - }, nil -} - -// getStats returns the stats for benchmark server. -// It resets lastResetTime if argument reset is true. -func (bs *benchmarkServer) getStats(reset bool) *testpb.ServerStats { - bs.mu.RLock() - defer bs.mu.RUnlock() - wallTimeElapsed := time.Since(bs.lastResetTime).Seconds() - rusageLatest := new(syscall.Rusage) - syscall.Getrusage(syscall.RUSAGE_SELF, rusageLatest) - uTimeElapsed, sTimeElapsed := cpuTimeDiff(bs.rusageLastReset, rusageLatest) - - if reset { - bs.lastResetTime = time.Now() - bs.rusageLastReset = rusageLatest - } - return &testpb.ServerStats{ - TimeElapsed: wallTimeElapsed, - TimeUser: uTimeElapsed, - TimeSystem: sTimeElapsed, - } -} diff --git a/vendor/google.golang.org/grpc/benchmark/worker/main.go b/vendor/google.golang.org/grpc/benchmark/worker/main.go deleted file mode 100644 index 2b1ba985b..000000000 --- a/vendor/google.golang.org/grpc/benchmark/worker/main.go +++ /dev/null @@ -1,229 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "flag" - "fmt" - "io" - "net" - "net/http" - _ "net/http/pprof" - "runtime" - "strconv" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc" - testpb "google.golang.org/grpc/benchmark/grpc_testing" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" -) - -var ( - driverPort = flag.Int("driver_port", 10000, "port for communication with driver") - serverPort = flag.Int("server_port", 0, "port for benchmark server if not specified by server config message") - pprofPort = flag.Int("pprof_port", -1, "Port for pprof debug server to listen on. Pprof server doesn't start if unset") - blockProfRate = flag.Int("block_prof_rate", 0, "fraction of goroutine blocking events to report in blocking profile") -) - -type byteBufCodec struct { -} - -func (byteBufCodec) Marshal(v interface{}) ([]byte, error) { - b, ok := v.(*[]byte) - if !ok { - return nil, fmt.Errorf("failed to marshal: %v is not type of *[]byte", v) - } - return *b, nil -} - -func (byteBufCodec) Unmarshal(data []byte, v interface{}) error { - b, ok := v.(*[]byte) - if !ok { - return fmt.Errorf("failed to marshal: %v is not type of *[]byte", v) - } - *b = data - return nil -} - -func (byteBufCodec) String() string { - return "bytebuffer" -} - -// workerServer implements WorkerService rpc handlers. -// It can create benchmarkServer or benchmarkClient on demand. -type workerServer struct { - stop chan<- bool - serverPort int -} - -func (s *workerServer) RunServer(stream testpb.WorkerService_RunServerServer) error { - var bs *benchmarkServer - defer func() { - // Close benchmark server when stream ends. - grpclog.Printf("closing benchmark server") - if bs != nil { - bs.closeFunc() - } - }() - for { - in, err := stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - var out *testpb.ServerStatus - switch argtype := in.Argtype.(type) { - case *testpb.ServerArgs_Setup: - grpclog.Printf("server setup received:") - if bs != nil { - grpclog.Printf("server setup received when server already exists, closing the existing server") - bs.closeFunc() - } - bs, err = startBenchmarkServer(argtype.Setup, s.serverPort) - if err != nil { - return err - } - out = &testpb.ServerStatus{ - Stats: bs.getStats(false), - Port: int32(bs.port), - Cores: int32(bs.cores), - } - - case *testpb.ServerArgs_Mark: - grpclog.Printf("server mark received:") - grpclog.Printf(" - %v", argtype) - if bs == nil { - return grpc.Errorf(codes.InvalidArgument, "server does not exist when mark received") - } - out = &testpb.ServerStatus{ - Stats: bs.getStats(argtype.Mark.Reset_), - Port: int32(bs.port), - Cores: int32(bs.cores), - } - } - - if err := stream.Send(out); err != nil { - return err - } - } -} - -func (s *workerServer) RunClient(stream testpb.WorkerService_RunClientServer) error { - var bc *benchmarkClient - defer func() { - // Shut down benchmark client when stream ends. - grpclog.Printf("shuting down benchmark client") - if bc != nil { - bc.shutdown() - } - }() - for { - in, err := stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - - var out *testpb.ClientStatus - switch t := in.Argtype.(type) { - case *testpb.ClientArgs_Setup: - grpclog.Printf("client setup received:") - if bc != nil { - grpclog.Printf("client setup received when client already exists, shuting down the existing client") - bc.shutdown() - } - bc, err = startBenchmarkClient(t.Setup) - if err != nil { - return err - } - out = &testpb.ClientStatus{ - Stats: bc.getStats(false), - } - - case *testpb.ClientArgs_Mark: - grpclog.Printf("client mark received:") - grpclog.Printf(" - %v", t) - if bc == nil { - return grpc.Errorf(codes.InvalidArgument, "client does not exist when mark received") - } - out = &testpb.ClientStatus{ - Stats: bc.getStats(t.Mark.Reset_), - } - } - - if err := stream.Send(out); err != nil { - return err - } - } -} - -func (s *workerServer) CoreCount(ctx context.Context, in *testpb.CoreRequest) (*testpb.CoreResponse, error) { - grpclog.Printf("core count: %v", runtime.NumCPU()) - return &testpb.CoreResponse{Cores: int32(runtime.NumCPU())}, nil -} - -func (s *workerServer) QuitWorker(ctx context.Context, in *testpb.Void) (*testpb.Void, error) { - grpclog.Printf("quiting worker") - s.stop <- true - return &testpb.Void{}, nil -} - -func main() { - grpc.EnableTracing = false - - flag.Parse() - lis, err := net.Listen("tcp", ":"+strconv.Itoa(*driverPort)) - if err != nil { - grpclog.Fatalf("failed to listen: %v", err) - } - grpclog.Printf("worker listening at port %v", *driverPort) - - s := grpc.NewServer() - stop := make(chan bool) - testpb.RegisterWorkerServiceServer(s, &workerServer{ - stop: stop, - serverPort: *serverPort, - }) - - go func() { - <-stop - // Wait for 1 second before stopping the server to make sure the return value of QuitWorker is sent to client. - // TODO revise this once server graceful stop is supported in gRPC. - time.Sleep(time.Second) - s.Stop() - }() - - runtime.SetBlockProfileRate(*blockProfRate) - - if *pprofPort >= 0 { - go func() { - grpclog.Println("Starting pprof server on port " + strconv.Itoa(*pprofPort)) - grpclog.Println(http.ListenAndServe("localhost:"+strconv.Itoa(*pprofPort), nil)) - }() - } - - s.Serve(lis) -} diff --git a/vendor/google.golang.org/grpc/benchmark/worker/util.go b/vendor/google.golang.org/grpc/benchmark/worker/util.go deleted file mode 100644 index f26993e65..000000000 --- a/vendor/google.golang.org/grpc/benchmark/worker/util.go +++ /dev/null @@ -1,35 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import "syscall" - -func cpuTimeDiff(first *syscall.Rusage, latest *syscall.Rusage) (float64, float64) { - var ( - utimeDiffs = latest.Utime.Sec - first.Utime.Sec - utimeDiffus = latest.Utime.Usec - first.Utime.Usec - stimeDiffs = latest.Stime.Sec - first.Stime.Sec - stimeDiffus = latest.Stime.Usec - first.Stime.Usec - ) - - uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 - sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 - - return uTimeElapsed, sTimeElapsed -} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh deleted file mode 100755 index 4cdc6ba7c..000000000 --- a/vendor/google.golang.org/grpc/codegen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -# This script serves as an example to demonstrate how to generate the gRPC-Go -# interface and the related messages from .proto file. -# -# It assumes the installation of i) Google proto buffer compiler at -# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen -# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have -# not, please install them first. -# -# We recommend running this script at $GOPATH/src. -# -# If this is not what you need, feel free to make your own scripts. Again, this -# script is for demonstration purpose. -# -proto=$1 -protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/examples/helloworld/greeter_client/main.go b/vendor/google.golang.org/grpc/examples/helloworld/greeter_client/main.go deleted file mode 100644 index 3dc426e24..000000000 --- a/vendor/google.golang.org/grpc/examples/helloworld/greeter_client/main.go +++ /dev/null @@ -1,54 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "log" - "os" - - "golang.org/x/net/context" - "google.golang.org/grpc" - pb "google.golang.org/grpc/examples/helloworld/helloworld" -) - -const ( - address = "localhost:50051" - defaultName = "world" -) - -func main() { - // Set up a connection to the server. - conn, err := grpc.Dial(address, grpc.WithInsecure()) - if err != nil { - log.Fatalf("did not connect: %v", err) - } - defer conn.Close() - c := pb.NewGreeterClient(conn) - - // Contact the server and print out its response. - name := defaultName - if len(os.Args) > 1 { - name = os.Args[1] - } - r, err := c.SayHello(context.Background(), &pb.HelloRequest{Name: name}) - if err != nil { - log.Fatalf("could not greet: %v", err) - } - log.Printf("Greeting: %s", r.Message) -} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/greeter_server/main.go b/vendor/google.golang.org/grpc/examples/helloworld/greeter_server/main.go deleted file mode 100644 index 702a3b617..000000000 --- a/vendor/google.golang.org/grpc/examples/helloworld/greeter_server/main.go +++ /dev/null @@ -1,57 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -//go:generate protoc -I ../helloworld --go_out=plugins=grpc:../helloworld ../helloworld/helloworld.proto - -package main - -import ( - "log" - "net" - - "golang.org/x/net/context" - "google.golang.org/grpc" - pb "google.golang.org/grpc/examples/helloworld/helloworld" - "google.golang.org/grpc/reflection" -) - -const ( - port = ":50051" -) - -// server is used to implement helloworld.GreeterServer. -type server struct{} - -// SayHello implements helloworld.GreeterServer -func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) { - return &pb.HelloReply{Message: "Hello " + in.Name}, nil -} - -func main() { - lis, err := net.Listen("tcp", port) - if err != nil { - log.Fatalf("failed to listen: %v", err) - } - s := grpc.NewServer() - pb.RegisterGreeterServer(s, &server{}) - // Register reflection service on gRPC server. - reflection.Register(s) - if err := s.Serve(lis); err != nil { - log.Fatalf("failed to serve: %v", err) - } -} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go deleted file mode 100644 index 64bd1ef3c..000000000 --- a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.pb.go +++ /dev/null @@ -1,164 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: helloworld.proto - -/* -Package helloworld is a generated protocol buffer package. - -It is generated from these files: - helloworld.proto - -It has these top-level messages: - HelloRequest - HelloReply -*/ -package helloworld - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The request message containing the user's name. -type HelloRequest struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *HelloRequest) Reset() { *m = HelloRequest{} } -func (m *HelloRequest) String() string { return proto.CompactTextString(m) } -func (*HelloRequest) ProtoMessage() {} -func (*HelloRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *HelloRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -// The response message containing the greetings -type HelloReply struct { - Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` -} - -func (m *HelloReply) Reset() { *m = HelloReply{} } -func (m *HelloReply) String() string { return proto.CompactTextString(m) } -func (*HelloReply) ProtoMessage() {} -func (*HelloReply) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *HelloReply) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func init() { - proto.RegisterType((*HelloRequest)(nil), "helloworld.HelloRequest") - proto.RegisterType((*HelloReply)(nil), "helloworld.HelloReply") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Greeter service - -type GreeterClient interface { - // Sends a greeting - SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) -} - -type greeterClient struct { - cc *grpc.ClientConn -} - -func NewGreeterClient(cc *grpc.ClientConn) GreeterClient { - return &greeterClient{cc} -} - -func (c *greeterClient) SayHello(ctx context.Context, in *HelloRequest, opts ...grpc.CallOption) (*HelloReply, error) { - out := new(HelloReply) - err := grpc.Invoke(ctx, "/helloworld.Greeter/SayHello", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Greeter service - -type GreeterServer interface { - // Sends a greeting - SayHello(context.Context, *HelloRequest) (*HelloReply, error) -} - -func RegisterGreeterServer(s *grpc.Server, srv GreeterServer) { - s.RegisterService(&_Greeter_serviceDesc, srv) -} - -func _Greeter_SayHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HelloRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GreeterServer).SayHello(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/helloworld.Greeter/SayHello", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GreeterServer).SayHello(ctx, req.(*HelloRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Greeter_serviceDesc = grpc.ServiceDesc{ - ServiceName: "helloworld.Greeter", - HandlerType: (*GreeterServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SayHello", - Handler: _Greeter_SayHello_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "helloworld.proto", -} - -func init() { proto.RegisterFile("helloworld.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 175 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xc8, 0x48, 0xcd, 0xc9, - 0xc9, 0x2f, 0xcf, 0x2f, 0xca, 0x49, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x42, 0x88, - 0x28, 0x29, 0x71, 0xf1, 0x78, 0x80, 0x78, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, - 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x60, 0xb6, 0x92, - 0x1a, 0x17, 0x17, 0x54, 0x4d, 0x41, 0x4e, 0xa5, 0x90, 0x04, 0x17, 0x7b, 0x6e, 0x6a, 0x71, 0x71, - 0x62, 0x3a, 0x4c, 0x11, 0x8c, 0x6b, 0xe4, 0xc9, 0xc5, 0xee, 0x5e, 0x94, 0x9a, 0x5a, 0x92, 0x5a, - 0x24, 0x64, 0xc7, 0xc5, 0x11, 0x9c, 0x58, 0x09, 0xd6, 0x25, 0x24, 0xa1, 0x87, 0xe4, 0x02, 0x64, - 0xcb, 0xa4, 0xc4, 0xb0, 0xc8, 0x14, 0xe4, 0x54, 0x2a, 0x31, 0x38, 0x19, 0x70, 0x49, 0x67, 0xe6, - 0xeb, 0xa5, 0x17, 0x15, 0x24, 0xeb, 0xa5, 0x56, 0x24, 0xe6, 0x16, 0xe4, 0xa4, 0x16, 0x23, 0xa9, - 0x75, 0xe2, 0x07, 0x2b, 0x0e, 0x07, 0xb1, 0x03, 0x40, 0x5e, 0x0a, 0x60, 0x4c, 0x62, 0x03, 0xfb, - 0xcd, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x0f, 0xb7, 0xcd, 0xf2, 0xef, 0x00, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto b/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto deleted file mode 100644 index d79a6a0d1..000000000 --- a/vendor/google.golang.org/grpc/examples/helloworld/helloworld/helloworld.proto +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -option java_multiple_files = true; -option java_package = "io.grpc.examples.helloworld"; -option java_outer_classname = "HelloWorldProto"; - -package helloworld; - -// The greeting service definition. -service Greeter { - // Sends a greeting - rpc SayHello (HelloRequest) returns (HelloReply) {} -} - -// The request message containing the user's name. -message HelloRequest { - string name = 1; -} - -// The response message containing the greetings -message HelloReply { - string message = 1; -} diff --git a/vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock.go b/vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock.go deleted file mode 100644 index 14957ed5f..000000000 --- a/vendor/google.golang.org/grpc/examples/helloworld/mock_helloworld/hw_mock.go +++ /dev/null @@ -1,48 +0,0 @@ -// Automatically generated by MockGen. DO NOT EDIT! -// Source: google.golang.org/grpc/examples/helloworld/helloworld (interfaces: GreeterClient) - -package mock_helloworld - -import ( - gomock "github.com/golang/mock/gomock" - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" - helloworld "google.golang.org/grpc/examples/helloworld/helloworld" -) - -// Mock of GreeterClient interface -type MockGreeterClient struct { - ctrl *gomock.Controller - recorder *_MockGreeterClientRecorder -} - -// Recorder for MockGreeterClient (not exported) -type _MockGreeterClientRecorder struct { - mock *MockGreeterClient -} - -func NewMockGreeterClient(ctrl *gomock.Controller) *MockGreeterClient { - mock := &MockGreeterClient{ctrl: ctrl} - mock.recorder = &_MockGreeterClientRecorder{mock} - return mock -} - -func (_m *MockGreeterClient) EXPECT() *_MockGreeterClientRecorder { - return _m.recorder -} - -func (_m *MockGreeterClient) SayHello(_param0 context.Context, _param1 *helloworld.HelloRequest, _param2 ...grpc.CallOption) (*helloworld.HelloReply, error) { - _s := []interface{}{_param0, _param1} - for _, _x := range _param2 { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "SayHello", _s...) - ret0, _ := ret[0].(*helloworld.HelloReply) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockGreeterClientRecorder) SayHello(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "SayHello", _s...) -} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/client/client.go b/vendor/google.golang.org/grpc/examples/route_guide/client/client.go deleted file mode 100644 index 6fc7a079c..000000000 --- a/vendor/google.golang.org/grpc/examples/route_guide/client/client.go +++ /dev/null @@ -1,184 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package main implements a simple gRPC client that demonstrates how to use gRPC-Go libraries -// to perform unary, client streaming, server streaming and full duplex RPCs. -// -// It interacts with the route guide service whose definition can be found in routeguide/route_guide.proto. -package main - -import ( - "flag" - "io" - "log" - "math/rand" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - pb "google.golang.org/grpc/examples/route_guide/routeguide" - "google.golang.org/grpc/testdata" -) - -var ( - tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") - caFile = flag.String("ca_file", "", "The file containning the CA root cert file") - serverAddr = flag.String("server_addr", "127.0.0.1:10000", "The server address in the format of host:port") - serverHostOverride = flag.String("server_host_override", "x.test.youtube.com", "The server name use to verify the hostname returned by TLS handshake") -) - -// printFeature gets the feature for the given point. -func printFeature(client pb.RouteGuideClient, point *pb.Point) { - log.Printf("Getting feature for point (%d, %d)", point.Latitude, point.Longitude) - feature, err := client.GetFeature(context.Background(), point) - if err != nil { - log.Fatalf("%v.GetFeatures(_) = _, %v: ", client, err) - } - log.Println(feature) -} - -// printFeatures lists all the features within the given bounding Rectangle. -func printFeatures(client pb.RouteGuideClient, rect *pb.Rectangle) { - log.Printf("Looking for features within %v", rect) - stream, err := client.ListFeatures(context.Background(), rect) - if err != nil { - log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) - } - for { - feature, err := stream.Recv() - if err == io.EOF { - break - } - if err != nil { - log.Fatalf("%v.ListFeatures(_) = _, %v", client, err) - } - log.Println(feature) - } -} - -// runRecordRoute sends a sequence of points to server and expects to get a RouteSummary from server. -func runRecordRoute(client pb.RouteGuideClient) { - // Create a random number of random points - r := rand.New(rand.NewSource(time.Now().UnixNano())) - pointCount := int(r.Int31n(100)) + 2 // Traverse at least two points - var points []*pb.Point - for i := 0; i < pointCount; i++ { - points = append(points, randomPoint(r)) - } - log.Printf("Traversing %d points.", len(points)) - stream, err := client.RecordRoute(context.Background()) - if err != nil { - log.Fatalf("%v.RecordRoute(_) = _, %v", client, err) - } - for _, point := range points { - if err := stream.Send(point); err != nil { - log.Fatalf("%v.Send(%v) = %v", stream, point, err) - } - } - reply, err := stream.CloseAndRecv() - if err != nil { - log.Fatalf("%v.CloseAndRecv() got error %v, want %v", stream, err, nil) - } - log.Printf("Route summary: %v", reply) -} - -// runRouteChat receives a sequence of route notes, while sending notes for various locations. -func runRouteChat(client pb.RouteGuideClient) { - notes := []*pb.RouteNote{ - {&pb.Point{Latitude: 0, Longitude: 1}, "First message"}, - {&pb.Point{Latitude: 0, Longitude: 2}, "Second message"}, - {&pb.Point{Latitude: 0, Longitude: 3}, "Third message"}, - {&pb.Point{Latitude: 0, Longitude: 1}, "Fourth message"}, - {&pb.Point{Latitude: 0, Longitude: 2}, "Fifth message"}, - {&pb.Point{Latitude: 0, Longitude: 3}, "Sixth message"}, - } - stream, err := client.RouteChat(context.Background()) - if err != nil { - log.Fatalf("%v.RouteChat(_) = _, %v", client, err) - } - waitc := make(chan struct{}) - go func() { - for { - in, err := stream.Recv() - if err == io.EOF { - // read done. - close(waitc) - return - } - if err != nil { - log.Fatalf("Failed to receive a note : %v", err) - } - log.Printf("Got message %s at point(%d, %d)", in.Message, in.Location.Latitude, in.Location.Longitude) - } - }() - for _, note := range notes { - if err := stream.Send(note); err != nil { - log.Fatalf("Failed to send a note: %v", err) - } - } - stream.CloseSend() - <-waitc -} - -func randomPoint(r *rand.Rand) *pb.Point { - lat := (r.Int31n(180) - 90) * 1e7 - long := (r.Int31n(360) - 180) * 1e7 - return &pb.Point{Latitude: lat, Longitude: long} -} - -func main() { - flag.Parse() - var opts []grpc.DialOption - if *tls { - if *caFile == "" { - *caFile = testdata.Path("ca.pem") - } - creds, err := credentials.NewClientTLSFromFile(*caFile, *serverHostOverride) - if err != nil { - log.Fatalf("Failed to create TLS credentials %v", err) - } - opts = append(opts, grpc.WithTransportCredentials(creds)) - } else { - opts = append(opts, grpc.WithInsecure()) - } - conn, err := grpc.Dial(*serverAddr, opts...) - if err != nil { - log.Fatalf("fail to dial: %v", err) - } - defer conn.Close() - client := pb.NewRouteGuideClient(conn) - - // Looking for a valid feature - printFeature(client, &pb.Point{Latitude: 409146138, Longitude: -746188906}) - - // Feature missing. - printFeature(client, &pb.Point{Latitude: 0, Longitude: 0}) - - // Looking for features between 40, -75 and 42, -73. - printFeatures(client, &pb.Rectangle{ - Lo: &pb.Point{Latitude: 400000000, Longitude: -750000000}, - Hi: &pb.Point{Latitude: 420000000, Longitude: -730000000}, - }) - - // RecordRoute - runRecordRoute(client) - - // RouteChat - runRouteChat(client) -} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go b/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go deleted file mode 100644 index 328c929fa..000000000 --- a/vendor/google.golang.org/grpc/examples/route_guide/mock_routeguide/rg_mock.go +++ /dev/null @@ -1,200 +0,0 @@ -// Automatically generated by MockGen. DO NOT EDIT! -// Source: google.golang.org/grpc/examples/route_guide/routeguide (interfaces: RouteGuideClient,RouteGuide_RouteChatClient) - -package mock_routeguide - -import ( - gomock "github.com/golang/mock/gomock" - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" - routeguide "google.golang.org/grpc/examples/route_guide/routeguide" - metadata "google.golang.org/grpc/metadata" -) - -// Mock of RouteGuideClient interface -type MockRouteGuideClient struct { - ctrl *gomock.Controller - recorder *_MockRouteGuideClientRecorder -} - -// Recorder for MockRouteGuideClient (not exported) -type _MockRouteGuideClientRecorder struct { - mock *MockRouteGuideClient -} - -func NewMockRouteGuideClient(ctrl *gomock.Controller) *MockRouteGuideClient { - mock := &MockRouteGuideClient{ctrl: ctrl} - mock.recorder = &_MockRouteGuideClientRecorder{mock} - return mock -} - -func (_m *MockRouteGuideClient) EXPECT() *_MockRouteGuideClientRecorder { - return _m.recorder -} - -func (_m *MockRouteGuideClient) GetFeature(_param0 context.Context, _param1 *routeguide.Point, _param2 ...grpc.CallOption) (*routeguide.Feature, error) { - _s := []interface{}{_param0, _param1} - for _, _x := range _param2 { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "GetFeature", _s...) - ret0, _ := ret[0].(*routeguide.Feature) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockRouteGuideClientRecorder) GetFeature(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "GetFeature", _s...) -} - -func (_m *MockRouteGuideClient) ListFeatures(_param0 context.Context, _param1 *routeguide.Rectangle, _param2 ...grpc.CallOption) (routeguide.RouteGuide_ListFeaturesClient, error) { - _s := []interface{}{_param0, _param1} - for _, _x := range _param2 { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "ListFeatures", _s...) - ret0, _ := ret[0].(routeguide.RouteGuide_ListFeaturesClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockRouteGuideClientRecorder) ListFeatures(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0, arg1}, arg2...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "ListFeatures", _s...) -} - -func (_m *MockRouteGuideClient) RecordRoute(_param0 context.Context, _param1 ...grpc.CallOption) (routeguide.RouteGuide_RecordRouteClient, error) { - _s := []interface{}{_param0} - for _, _x := range _param1 { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "RecordRoute", _s...) - ret0, _ := ret[0].(routeguide.RouteGuide_RecordRouteClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockRouteGuideClientRecorder) RecordRoute(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0}, arg1...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "RecordRoute", _s...) -} - -func (_m *MockRouteGuideClient) RouteChat(_param0 context.Context, _param1 ...grpc.CallOption) (routeguide.RouteGuide_RouteChatClient, error) { - _s := []interface{}{_param0} - for _, _x := range _param1 { - _s = append(_s, _x) - } - ret := _m.ctrl.Call(_m, "RouteChat", _s...) - ret0, _ := ret[0].(routeguide.RouteGuide_RouteChatClient) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockRouteGuideClientRecorder) RouteChat(arg0 interface{}, arg1 ...interface{}) *gomock.Call { - _s := append([]interface{}{arg0}, arg1...) - return _mr.mock.ctrl.RecordCall(_mr.mock, "RouteChat", _s...) -} - -// Mock of RouteGuide_RouteChatClient interface -type MockRouteGuide_RouteChatClient struct { - ctrl *gomock.Controller - recorder *_MockRouteGuide_RouteChatClientRecorder -} - -// Recorder for MockRouteGuide_RouteChatClient (not exported) -type _MockRouteGuide_RouteChatClientRecorder struct { - mock *MockRouteGuide_RouteChatClient -} - -func NewMockRouteGuide_RouteChatClient(ctrl *gomock.Controller) *MockRouteGuide_RouteChatClient { - mock := &MockRouteGuide_RouteChatClient{ctrl: ctrl} - mock.recorder = &_MockRouteGuide_RouteChatClientRecorder{mock} - return mock -} - -func (_m *MockRouteGuide_RouteChatClient) EXPECT() *_MockRouteGuide_RouteChatClientRecorder { - return _m.recorder -} - -func (_m *MockRouteGuide_RouteChatClient) CloseSend() error { - ret := _m.ctrl.Call(_m, "CloseSend") - ret0, _ := ret[0].(error) - return ret0 -} - -func (_mr *_MockRouteGuide_RouteChatClientRecorder) CloseSend() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "CloseSend") -} - -func (_m *MockRouteGuide_RouteChatClient) Context() context.Context { - ret := _m.ctrl.Call(_m, "Context") - ret0, _ := ret[0].(context.Context) - return ret0 -} - -func (_mr *_MockRouteGuide_RouteChatClientRecorder) Context() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Context") -} - -func (_m *MockRouteGuide_RouteChatClient) Header() (metadata.MD, error) { - ret := _m.ctrl.Call(_m, "Header") - ret0, _ := ret[0].(metadata.MD) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockRouteGuide_RouteChatClientRecorder) Header() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Header") -} - -func (_m *MockRouteGuide_RouteChatClient) Recv() (*routeguide.RouteNote, error) { - ret := _m.ctrl.Call(_m, "Recv") - ret0, _ := ret[0].(*routeguide.RouteNote) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -func (_mr *_MockRouteGuide_RouteChatClientRecorder) Recv() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Recv") -} - -func (_m *MockRouteGuide_RouteChatClient) RecvMsg(_param0 interface{}) error { - ret := _m.ctrl.Call(_m, "RecvMsg", _param0) - ret0, _ := ret[0].(error) - return ret0 -} - -func (_mr *_MockRouteGuide_RouteChatClientRecorder) RecvMsg(arg0 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "RecvMsg", arg0) -} - -func (_m *MockRouteGuide_RouteChatClient) Send(_param0 *routeguide.RouteNote) error { - ret := _m.ctrl.Call(_m, "Send", _param0) - ret0, _ := ret[0].(error) - return ret0 -} - -func (_mr *_MockRouteGuide_RouteChatClientRecorder) Send(arg0 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Send", arg0) -} - -func (_m *MockRouteGuide_RouteChatClient) SendMsg(_param0 interface{}) error { - ret := _m.ctrl.Call(_m, "SendMsg", _param0) - ret0, _ := ret[0].(error) - return ret0 -} - -func (_mr *_MockRouteGuide_RouteChatClientRecorder) SendMsg(arg0 interface{}) *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "SendMsg", arg0) -} - -func (_m *MockRouteGuide_RouteChatClient) Trailer() metadata.MD { - ret := _m.ctrl.Call(_m, "Trailer") - ret0, _ := ret[0].(metadata.MD) - return ret0 -} - -func (_mr *_MockRouteGuide_RouteChatClientRecorder) Trailer() *gomock.Call { - return _mr.mock.ctrl.RecordCall(_mr.mock, "Trailer") -} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go b/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go deleted file mode 100644 index cf7f3937e..000000000 --- a/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.pb.go +++ /dev/null @@ -1,543 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: route_guide.proto - -/* -Package routeguide is a generated protocol buffer package. - -It is generated from these files: - route_guide.proto - -It has these top-level messages: - Point - Rectangle - Feature - RouteNote - RouteSummary -*/ -package routeguide - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Points are represented as latitude-longitude pairs in the E7 representation -// (degrees multiplied by 10**7 and rounded to the nearest integer). -// Latitudes should be in the range +/- 90 degrees and longitude should be in -// the range +/- 180 degrees (inclusive). -type Point struct { - Latitude int32 `protobuf:"varint,1,opt,name=latitude" json:"latitude,omitempty"` - Longitude int32 `protobuf:"varint,2,opt,name=longitude" json:"longitude,omitempty"` -} - -func (m *Point) Reset() { *m = Point{} } -func (m *Point) String() string { return proto.CompactTextString(m) } -func (*Point) ProtoMessage() {} -func (*Point) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *Point) GetLatitude() int32 { - if m != nil { - return m.Latitude - } - return 0 -} - -func (m *Point) GetLongitude() int32 { - if m != nil { - return m.Longitude - } - return 0 -} - -// A latitude-longitude rectangle, represented as two diagonally opposite -// points "lo" and "hi". -type Rectangle struct { - // One corner of the rectangle. - Lo *Point `protobuf:"bytes,1,opt,name=lo" json:"lo,omitempty"` - // The other corner of the rectangle. - Hi *Point `protobuf:"bytes,2,opt,name=hi" json:"hi,omitempty"` -} - -func (m *Rectangle) Reset() { *m = Rectangle{} } -func (m *Rectangle) String() string { return proto.CompactTextString(m) } -func (*Rectangle) ProtoMessage() {} -func (*Rectangle) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *Rectangle) GetLo() *Point { - if m != nil { - return m.Lo - } - return nil -} - -func (m *Rectangle) GetHi() *Point { - if m != nil { - return m.Hi - } - return nil -} - -// A feature names something at a given point. -// -// If a feature could not be named, the name is empty. -type Feature struct { - // The name of the feature. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The point where the feature is detected. - Location *Point `protobuf:"bytes,2,opt,name=location" json:"location,omitempty"` -} - -func (m *Feature) Reset() { *m = Feature{} } -func (m *Feature) String() string { return proto.CompactTextString(m) } -func (*Feature) ProtoMessage() {} -func (*Feature) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *Feature) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Feature) GetLocation() *Point { - if m != nil { - return m.Location - } - return nil -} - -// A RouteNote is a message sent while at a given point. -type RouteNote struct { - // The location from which the message is sent. - Location *Point `protobuf:"bytes,1,opt,name=location" json:"location,omitempty"` - // The message to be sent. - Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` -} - -func (m *RouteNote) Reset() { *m = RouteNote{} } -func (m *RouteNote) String() string { return proto.CompactTextString(m) } -func (*RouteNote) ProtoMessage() {} -func (*RouteNote) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *RouteNote) GetLocation() *Point { - if m != nil { - return m.Location - } - return nil -} - -func (m *RouteNote) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -// A RouteSummary is received in response to a RecordRoute rpc. -// -// It contains the number of individual points received, the number of -// detected features, and the total distance covered as the cumulative sum of -// the distance between each point. -type RouteSummary struct { - // The number of points received. - PointCount int32 `protobuf:"varint,1,opt,name=point_count,json=pointCount" json:"point_count,omitempty"` - // The number of known features passed while traversing the route. - FeatureCount int32 `protobuf:"varint,2,opt,name=feature_count,json=featureCount" json:"feature_count,omitempty"` - // The distance covered in metres. - Distance int32 `protobuf:"varint,3,opt,name=distance" json:"distance,omitempty"` - // The duration of the traversal in seconds. - ElapsedTime int32 `protobuf:"varint,4,opt,name=elapsed_time,json=elapsedTime" json:"elapsed_time,omitempty"` -} - -func (m *RouteSummary) Reset() { *m = RouteSummary{} } -func (m *RouteSummary) String() string { return proto.CompactTextString(m) } -func (*RouteSummary) ProtoMessage() {} -func (*RouteSummary) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -func (m *RouteSummary) GetPointCount() int32 { - if m != nil { - return m.PointCount - } - return 0 -} - -func (m *RouteSummary) GetFeatureCount() int32 { - if m != nil { - return m.FeatureCount - } - return 0 -} - -func (m *RouteSummary) GetDistance() int32 { - if m != nil { - return m.Distance - } - return 0 -} - -func (m *RouteSummary) GetElapsedTime() int32 { - if m != nil { - return m.ElapsedTime - } - return 0 -} - -func init() { - proto.RegisterType((*Point)(nil), "routeguide.Point") - proto.RegisterType((*Rectangle)(nil), "routeguide.Rectangle") - proto.RegisterType((*Feature)(nil), "routeguide.Feature") - proto.RegisterType((*RouteNote)(nil), "routeguide.RouteNote") - proto.RegisterType((*RouteSummary)(nil), "routeguide.RouteSummary") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for RouteGuide service - -type RouteGuideClient interface { - // A simple RPC. - // - // Obtains the feature at a given position. - // - // A feature with an empty name is returned if there's no feature at the given - // position. - GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) - // A server-to-client streaming RPC. - // - // Obtains the Features available within the given Rectangle. Results are - // streamed rather than returned at once (e.g. in a response message with a - // repeated field), as the rectangle may cover a large area and contain a - // huge number of features. - ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) - // A client-to-server streaming RPC. - // - // Accepts a stream of Points on a route being traversed, returning a - // RouteSummary when traversal is completed. - RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) - // A Bidirectional streaming RPC. - // - // Accepts a stream of RouteNotes sent while a route is being traversed, - // while receiving other RouteNotes (e.g. from other users). - RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) -} - -type routeGuideClient struct { - cc *grpc.ClientConn -} - -func NewRouteGuideClient(cc *grpc.ClientConn) RouteGuideClient { - return &routeGuideClient{cc} -} - -func (c *routeGuideClient) GetFeature(ctx context.Context, in *Point, opts ...grpc.CallOption) (*Feature, error) { - out := new(Feature) - err := grpc.Invoke(ctx, "/routeguide.RouteGuide/GetFeature", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *routeGuideClient) ListFeatures(ctx context.Context, in *Rectangle, opts ...grpc.CallOption) (RouteGuide_ListFeaturesClient, error) { - stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[0], c.cc, "/routeguide.RouteGuide/ListFeatures", opts...) - if err != nil { - return nil, err - } - x := &routeGuideListFeaturesClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type RouteGuide_ListFeaturesClient interface { - Recv() (*Feature, error) - grpc.ClientStream -} - -type routeGuideListFeaturesClient struct { - grpc.ClientStream -} - -func (x *routeGuideListFeaturesClient) Recv() (*Feature, error) { - m := new(Feature) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *routeGuideClient) RecordRoute(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RecordRouteClient, error) { - stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[1], c.cc, "/routeguide.RouteGuide/RecordRoute", opts...) - if err != nil { - return nil, err - } - x := &routeGuideRecordRouteClient{stream} - return x, nil -} - -type RouteGuide_RecordRouteClient interface { - Send(*Point) error - CloseAndRecv() (*RouteSummary, error) - grpc.ClientStream -} - -type routeGuideRecordRouteClient struct { - grpc.ClientStream -} - -func (x *routeGuideRecordRouteClient) Send(m *Point) error { - return x.ClientStream.SendMsg(m) -} - -func (x *routeGuideRecordRouteClient) CloseAndRecv() (*RouteSummary, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(RouteSummary) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *routeGuideClient) RouteChat(ctx context.Context, opts ...grpc.CallOption) (RouteGuide_RouteChatClient, error) { - stream, err := grpc.NewClientStream(ctx, &_RouteGuide_serviceDesc.Streams[2], c.cc, "/routeguide.RouteGuide/RouteChat", opts...) - if err != nil { - return nil, err - } - x := &routeGuideRouteChatClient{stream} - return x, nil -} - -type RouteGuide_RouteChatClient interface { - Send(*RouteNote) error - Recv() (*RouteNote, error) - grpc.ClientStream -} - -type routeGuideRouteChatClient struct { - grpc.ClientStream -} - -func (x *routeGuideRouteChatClient) Send(m *RouteNote) error { - return x.ClientStream.SendMsg(m) -} - -func (x *routeGuideRouteChatClient) Recv() (*RouteNote, error) { - m := new(RouteNote) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for RouteGuide service - -type RouteGuideServer interface { - // A simple RPC. - // - // Obtains the feature at a given position. - // - // A feature with an empty name is returned if there's no feature at the given - // position. - GetFeature(context.Context, *Point) (*Feature, error) - // A server-to-client streaming RPC. - // - // Obtains the Features available within the given Rectangle. Results are - // streamed rather than returned at once (e.g. in a response message with a - // repeated field), as the rectangle may cover a large area and contain a - // huge number of features. - ListFeatures(*Rectangle, RouteGuide_ListFeaturesServer) error - // A client-to-server streaming RPC. - // - // Accepts a stream of Points on a route being traversed, returning a - // RouteSummary when traversal is completed. - RecordRoute(RouteGuide_RecordRouteServer) error - // A Bidirectional streaming RPC. - // - // Accepts a stream of RouteNotes sent while a route is being traversed, - // while receiving other RouteNotes (e.g. from other users). - RouteChat(RouteGuide_RouteChatServer) error -} - -func RegisterRouteGuideServer(s *grpc.Server, srv RouteGuideServer) { - s.RegisterService(&_RouteGuide_serviceDesc, srv) -} - -func _RouteGuide_GetFeature_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Point) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(RouteGuideServer).GetFeature(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/routeguide.RouteGuide/GetFeature", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RouteGuideServer).GetFeature(ctx, req.(*Point)) - } - return interceptor(ctx, in, info, handler) -} - -func _RouteGuide_ListFeatures_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(Rectangle) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(RouteGuideServer).ListFeatures(m, &routeGuideListFeaturesServer{stream}) -} - -type RouteGuide_ListFeaturesServer interface { - Send(*Feature) error - grpc.ServerStream -} - -type routeGuideListFeaturesServer struct { - grpc.ServerStream -} - -func (x *routeGuideListFeaturesServer) Send(m *Feature) error { - return x.ServerStream.SendMsg(m) -} - -func _RouteGuide_RecordRoute_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(RouteGuideServer).RecordRoute(&routeGuideRecordRouteServer{stream}) -} - -type RouteGuide_RecordRouteServer interface { - SendAndClose(*RouteSummary) error - Recv() (*Point, error) - grpc.ServerStream -} - -type routeGuideRecordRouteServer struct { - grpc.ServerStream -} - -func (x *routeGuideRecordRouteServer) SendAndClose(m *RouteSummary) error { - return x.ServerStream.SendMsg(m) -} - -func (x *routeGuideRecordRouteServer) Recv() (*Point, error) { - m := new(Point) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _RouteGuide_RouteChat_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(RouteGuideServer).RouteChat(&routeGuideRouteChatServer{stream}) -} - -type RouteGuide_RouteChatServer interface { - Send(*RouteNote) error - Recv() (*RouteNote, error) - grpc.ServerStream -} - -type routeGuideRouteChatServer struct { - grpc.ServerStream -} - -func (x *routeGuideRouteChatServer) Send(m *RouteNote) error { - return x.ServerStream.SendMsg(m) -} - -func (x *routeGuideRouteChatServer) Recv() (*RouteNote, error) { - m := new(RouteNote) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _RouteGuide_serviceDesc = grpc.ServiceDesc{ - ServiceName: "routeguide.RouteGuide", - HandlerType: (*RouteGuideServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetFeature", - Handler: _RouteGuide_GetFeature_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ListFeatures", - Handler: _RouteGuide_ListFeatures_Handler, - ServerStreams: true, - }, - { - StreamName: "RecordRoute", - Handler: _RouteGuide_RecordRoute_Handler, - ClientStreams: true, - }, - { - StreamName: "RouteChat", - Handler: _RouteGuide_RouteChat_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "route_guide.proto", -} - -func init() { proto.RegisterFile("route_guide.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 404 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0xdd, 0xca, 0xd3, 0x40, - 0x10, 0xfd, 0x36, 0x7e, 0x9f, 0x6d, 0x26, 0x11, 0xe9, 0x88, 0x10, 0xa2, 0xa0, 0x8d, 0x37, 0xbd, - 0x31, 0x94, 0x0a, 0x5e, 0x56, 0x6c, 0xc1, 0xde, 0x14, 0xa9, 0xb1, 0xf7, 0x65, 0x4d, 0xc6, 0x74, - 0x61, 0x93, 0x0d, 0xc9, 0x06, 0xf4, 0x01, 0x7c, 0x02, 0x5f, 0x58, 0xb2, 0x49, 0xda, 0x54, 0x5b, - 0xbc, 0xdb, 0x39, 0x73, 0xce, 0xfc, 0x9c, 0x61, 0x61, 0x52, 0xaa, 0x5a, 0xd3, 0x21, 0xad, 0x45, - 0x42, 0x61, 0x51, 0x2a, 0xad, 0x10, 0x0c, 0x64, 0x90, 0xe0, 0x23, 0x3c, 0xec, 0x94, 0xc8, 0x35, - 0xfa, 0x30, 0x96, 0x5c, 0x0b, 0x5d, 0x27, 0xe4, 0xb1, 0xd7, 0x6c, 0xf6, 0x10, 0x9d, 0x62, 0x7c, - 0x09, 0xb6, 0x54, 0x79, 0xda, 0x26, 0x2d, 0x93, 0x3c, 0x03, 0xc1, 0x17, 0xb0, 0x23, 0x8a, 0x35, - 0xcf, 0x53, 0x49, 0x38, 0x05, 0x4b, 0x2a, 0x53, 0xc0, 0x59, 0x4c, 0xc2, 0x73, 0xa3, 0xd0, 0x74, - 0x89, 0x2c, 0xa9, 0x1a, 0xca, 0x51, 0x98, 0x32, 0xd7, 0x29, 0x47, 0x11, 0x6c, 0x61, 0xf4, 0x89, - 0xb8, 0xae, 0x4b, 0x42, 0x84, 0xfb, 0x9c, 0x67, 0xed, 0x4c, 0x76, 0x64, 0xde, 0xf8, 0x16, 0xc6, - 0x52, 0xc5, 0x5c, 0x0b, 0x95, 0xdf, 0xae, 0x73, 0xa2, 0x04, 0x7b, 0xb0, 0xa3, 0x26, 0xfb, 0x59, - 0xe9, 0x4b, 0x2d, 0xfb, 0xaf, 0x16, 0x3d, 0x18, 0x65, 0x54, 0x55, 0x3c, 0x6d, 0x17, 0xb7, 0xa3, - 0x3e, 0x0c, 0x7e, 0x33, 0x70, 0x4d, 0xd9, 0xaf, 0x75, 0x96, 0xf1, 0xf2, 0x27, 0xbe, 0x02, 0xa7, - 0x68, 0xd4, 0x87, 0x58, 0xd5, 0xb9, 0xee, 0x4c, 0x04, 0x03, 0xad, 0x1b, 0x04, 0xdf, 0xc0, 0x93, - 0xef, 0xed, 0x56, 0x1d, 0xa5, 0xb5, 0xd2, 0xed, 0xc0, 0x96, 0xe4, 0xc3, 0x38, 0x11, 0x95, 0xe6, - 0x79, 0x4c, 0xde, 0xa3, 0xf6, 0x0e, 0x7d, 0x8c, 0x53, 0x70, 0x49, 0xf2, 0xa2, 0xa2, 0xe4, 0xa0, - 0x45, 0x46, 0xde, 0xbd, 0xc9, 0x3b, 0x1d, 0xb6, 0x17, 0x19, 0x2d, 0x7e, 0x59, 0x00, 0x66, 0xaa, - 0x4d, 0xb3, 0x0e, 0xbe, 0x07, 0xd8, 0x90, 0xee, 0xbd, 0xfc, 0x77, 0x53, 0xff, 0xd9, 0x10, 0xea, - 0x78, 0xc1, 0x1d, 0x2e, 0xc1, 0xdd, 0x8a, 0xaa, 0x17, 0x56, 0xf8, 0x7c, 0x48, 0x3b, 0x5d, 0xfb, - 0x86, 0x7a, 0xce, 0x70, 0x09, 0x4e, 0x44, 0xb1, 0x2a, 0x13, 0x33, 0xcb, 0xb5, 0xc6, 0xde, 0x45, - 0xc5, 0x81, 0x8f, 0xc1, 0xdd, 0x8c, 0xe1, 0x87, 0xee, 0x64, 0xeb, 0x23, 0xd7, 0x7f, 0x35, 0xef, - 0x2f, 0xe9, 0x5f, 0x87, 0x1b, 0xf9, 0x9c, 0xad, 0xe6, 0xf0, 0x42, 0xa8, 0x30, 0x2d, 0x8b, 0x38, - 0xa4, 0x1f, 0x3c, 0x2b, 0x24, 0x55, 0x03, 0xfa, 0xea, 0xe9, 0xd9, 0xa3, 0x5d, 0xf3, 0x27, 0x76, - 0xec, 0xdb, 0x63, 0xf3, 0x39, 0xde, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0xc8, 0xe4, 0xef, 0xe6, - 0x31, 0x03, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto b/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto deleted file mode 100644 index fe21e437a..000000000 --- a/vendor/google.golang.org/grpc/examples/route_guide/routeguide/route_guide.proto +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2015 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -option java_multiple_files = true; -option java_package = "io.grpc.examples.routeguide"; -option java_outer_classname = "RouteGuideProto"; - -package routeguide; - -// Interface exported by the server. -service RouteGuide { - // A simple RPC. - // - // Obtains the feature at a given position. - // - // A feature with an empty name is returned if there's no feature at the given - // position. - rpc GetFeature(Point) returns (Feature) {} - - // A server-to-client streaming RPC. - // - // Obtains the Features available within the given Rectangle. Results are - // streamed rather than returned at once (e.g. in a response message with a - // repeated field), as the rectangle may cover a large area and contain a - // huge number of features. - rpc ListFeatures(Rectangle) returns (stream Feature) {} - - // A client-to-server streaming RPC. - // - // Accepts a stream of Points on a route being traversed, returning a - // RouteSummary when traversal is completed. - rpc RecordRoute(stream Point) returns (RouteSummary) {} - - // A Bidirectional streaming RPC. - // - // Accepts a stream of RouteNotes sent while a route is being traversed, - // while receiving other RouteNotes (e.g. from other users). - rpc RouteChat(stream RouteNote) returns (stream RouteNote) {} -} - -// Points are represented as latitude-longitude pairs in the E7 representation -// (degrees multiplied by 10**7 and rounded to the nearest integer). -// Latitudes should be in the range +/- 90 degrees and longitude should be in -// the range +/- 180 degrees (inclusive). -message Point { - int32 latitude = 1; - int32 longitude = 2; -} - -// A latitude-longitude rectangle, represented as two diagonally opposite -// points "lo" and "hi". -message Rectangle { - // One corner of the rectangle. - Point lo = 1; - - // The other corner of the rectangle. - Point hi = 2; -} - -// A feature names something at a given point. -// -// If a feature could not be named, the name is empty. -message Feature { - // The name of the feature. - string name = 1; - - // The point where the feature is detected. - Point location = 2; -} - -// A RouteNote is a message sent while at a given point. -message RouteNote { - // The location from which the message is sent. - Point location = 1; - - // The message to be sent. - string message = 2; -} - -// A RouteSummary is received in response to a RecordRoute rpc. -// -// It contains the number of individual points received, the number of -// detected features, and the total distance covered as the cumulative sum of -// the distance between each point. -message RouteSummary { - // The number of points received. - int32 point_count = 1; - - // The number of known features passed while traversing the route. - int32 feature_count = 2; - - // The distance covered in metres. - int32 distance = 3; - - // The duration of the traversal in seconds. - int32 elapsed_time = 4; -} diff --git a/vendor/google.golang.org/grpc/examples/route_guide/server/server.go b/vendor/google.golang.org/grpc/examples/route_guide/server/server.go deleted file mode 100644 index 5d919047e..000000000 --- a/vendor/google.golang.org/grpc/examples/route_guide/server/server.go +++ /dev/null @@ -1,233 +0,0 @@ -/* - * - * Copyright 2015 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -//go:generate protoc -I ../routeguide --go_out=plugins=grpc:../routeguide ../routeguide/route_guide.proto - -// Package main implements a simple gRPC server that demonstrates how to use gRPC-Go libraries -// to perform unary, client streaming, server streaming and full duplex RPCs. -// -// It implements the route guide service whose definition can be found in routeguide/route_guide.proto. -package main - -import ( - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "net" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc" - - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/testdata" - - "github.com/golang/protobuf/proto" - - pb "google.golang.org/grpc/examples/route_guide/routeguide" -) - -var ( - tls = flag.Bool("tls", false, "Connection uses TLS if true, else plain TCP") - certFile = flag.String("cert_file", "", "The TLS cert file") - keyFile = flag.String("key_file", "", "The TLS key file") - jsonDBFile = flag.String("json_db_file", "testdata/route_guide_db.json", "A json file containing a list of features") - port = flag.Int("port", 10000, "The server port") -) - -type routeGuideServer struct { - savedFeatures []*pb.Feature - routeNotes map[string][]*pb.RouteNote -} - -// GetFeature returns the feature at the given point. -func (s *routeGuideServer) GetFeature(ctx context.Context, point *pb.Point) (*pb.Feature, error) { - for _, feature := range s.savedFeatures { - if proto.Equal(feature.Location, point) { - return feature, nil - } - } - // No feature was found, return an unnamed feature - return &pb.Feature{Location: point}, nil -} - -// ListFeatures lists all features contained within the given bounding Rectangle. -func (s *routeGuideServer) ListFeatures(rect *pb.Rectangle, stream pb.RouteGuide_ListFeaturesServer) error { - for _, feature := range s.savedFeatures { - if inRange(feature.Location, rect) { - if err := stream.Send(feature); err != nil { - return err - } - } - } - return nil -} - -// RecordRoute records a route composited of a sequence of points. -// -// It gets a stream of points, and responds with statistics about the "trip": -// number of points, number of known features visited, total distance traveled, and -// total time spent. -func (s *routeGuideServer) RecordRoute(stream pb.RouteGuide_RecordRouteServer) error { - var pointCount, featureCount, distance int32 - var lastPoint *pb.Point - startTime := time.Now() - for { - point, err := stream.Recv() - if err == io.EOF { - endTime := time.Now() - return stream.SendAndClose(&pb.RouteSummary{ - PointCount: pointCount, - FeatureCount: featureCount, - Distance: distance, - ElapsedTime: int32(endTime.Sub(startTime).Seconds()), - }) - } - if err != nil { - return err - } - pointCount++ - for _, feature := range s.savedFeatures { - if proto.Equal(feature.Location, point) { - featureCount++ - } - } - if lastPoint != nil { - distance += calcDistance(lastPoint, point) - } - lastPoint = point - } -} - -// RouteChat receives a stream of message/location pairs, and responds with a stream of all -// previous messages at each of those locations. -func (s *routeGuideServer) RouteChat(stream pb.RouteGuide_RouteChatServer) error { - for { - in, err := stream.Recv() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - key := serialize(in.Location) - if _, present := s.routeNotes[key]; !present { - s.routeNotes[key] = []*pb.RouteNote{in} - } else { - s.routeNotes[key] = append(s.routeNotes[key], in) - } - for _, note := range s.routeNotes[key] { - if err := stream.Send(note); err != nil { - return err - } - } - } -} - -// loadFeatures loads features from a JSON file. -func (s *routeGuideServer) loadFeatures(filePath string) { - file, err := ioutil.ReadFile(filePath) - if err != nil { - log.Fatalf("Failed to load default features: %v", err) - } - if err := json.Unmarshal(file, &s.savedFeatures); err != nil { - log.Fatalf("Failed to load default features: %v", err) - } -} - -func toRadians(num float64) float64 { - return num * math.Pi / float64(180) -} - -// calcDistance calculates the distance between two points using the "haversine" formula. -// This code was taken from http://www.movable-type.co.uk/scripts/latlong.html. -func calcDistance(p1 *pb.Point, p2 *pb.Point) int32 { - const CordFactor float64 = 1e7 - const R float64 = float64(6371000) // metres - lat1 := float64(p1.Latitude) / CordFactor - lat2 := float64(p2.Latitude) / CordFactor - lng1 := float64(p1.Longitude) / CordFactor - lng2 := float64(p2.Longitude) / CordFactor - φ1 := toRadians(lat1) - φ2 := toRadians(lat2) - Δφ := toRadians(lat2 - lat1) - Δλ := toRadians(lng2 - lng1) - - a := math.Sin(Δφ/2)*math.Sin(Δφ/2) + - math.Cos(φ1)*math.Cos(φ2)* - math.Sin(Δλ/2)*math.Sin(Δλ/2) - c := 2 * math.Atan2(math.Sqrt(a), math.Sqrt(1-a)) - - distance := R * c - return int32(distance) -} - -func inRange(point *pb.Point, rect *pb.Rectangle) bool { - left := math.Min(float64(rect.Lo.Longitude), float64(rect.Hi.Longitude)) - right := math.Max(float64(rect.Lo.Longitude), float64(rect.Hi.Longitude)) - top := math.Max(float64(rect.Lo.Latitude), float64(rect.Hi.Latitude)) - bottom := math.Min(float64(rect.Lo.Latitude), float64(rect.Hi.Latitude)) - - if float64(point.Longitude) >= left && - float64(point.Longitude) <= right && - float64(point.Latitude) >= bottom && - float64(point.Latitude) <= top { - return true - } - return false -} - -func serialize(point *pb.Point) string { - return fmt.Sprintf("%d %d", point.Latitude, point.Longitude) -} - -func newServer() *routeGuideServer { - s := new(routeGuideServer) - s.loadFeatures(*jsonDBFile) - s.routeNotes = make(map[string][]*pb.RouteNote) - return s -} - -func main() { - flag.Parse() - lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port)) - if err != nil { - log.Fatalf("failed to listen: %v", err) - } - var opts []grpc.ServerOption - if *tls { - if *certFile == "" { - *certFile = testdata.Path("server1.pem") - } - if *keyFile == "" { - *keyFile = testdata.Path("server1.key") - } - creds, err := credentials.NewServerTLSFromFile(*certFile, *keyFile) - if err != nil { - log.Fatalf("Failed to generate credentials %v", err) - } - opts = []grpc.ServerOption{grpc.Creds(creds)} - } - grpcServer := grpc.NewServer(opts...) - pb.RegisterRouteGuideServer(grpcServer, newServer()) - grpcServer.Serve(lis) -} diff --git a/vendor/google.golang.org/grpc/stress/client/main.go b/vendor/google.golang.org/grpc/stress/client/main.go deleted file mode 100644 index 635f1ad38..000000000 --- a/vendor/google.golang.org/grpc/stress/client/main.go +++ /dev/null @@ -1,336 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -//go:generate protoc -I ../grpc_testing --go_out=plugins=grpc:../grpc_testing ../grpc_testing/metrics.proto - -// client starts an interop client to do stress test and a metrics server to report qps. -package main - -import ( - "flag" - "fmt" - "math/rand" - "net" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/interop" - testpb "google.golang.org/grpc/interop/grpc_testing" - metricspb "google.golang.org/grpc/stress/grpc_testing" - "google.golang.org/grpc/testdata" -) - -var ( - serverAddresses = flag.String("server_addresses", "localhost:8080", "a list of server addresses") - testCases = flag.String("test_cases", "", "a list of test cases along with the relative weights") - testDurationSecs = flag.Int("test_duration_secs", -1, "test duration in seconds") - numChannelsPerServer = flag.Int("num_channels_per_server", 1, "Number of channels (i.e connections) to each server") - numStubsPerChannel = flag.Int("num_stubs_per_channel", 1, "Number of client stubs per each connection to server") - metricsPort = flag.Int("metrics_port", 8081, "The port at which the stress client exposes QPS metrics") - useTLS = flag.Bool("use_tls", false, "Connection uses TLS if true, else plain TCP") - testCA = flag.Bool("use_test_ca", false, "Whether to replace platform root CAs with test CA as the CA root") - tlsServerName = flag.String("server_host_override", "foo.test.google.fr", "The server name use to verify the hostname returned by TLS handshake if it is not empty. Otherwise, --server_host is used.") - caFile = flag.String("ca_file", "", "The file containning the CA root cert file") -) - -// testCaseWithWeight contains the test case type and its weight. -type testCaseWithWeight struct { - name string - weight int -} - -// parseTestCases converts test case string to a list of struct testCaseWithWeight. -func parseTestCases(testCaseString string) []testCaseWithWeight { - testCaseStrings := strings.Split(testCaseString, ",") - testCases := make([]testCaseWithWeight, len(testCaseStrings)) - for i, str := range testCaseStrings { - testCase := strings.Split(str, ":") - if len(testCase) != 2 { - panic(fmt.Sprintf("invalid test case with weight: %s", str)) - } - // Check if test case is supported. - switch testCase[0] { - case - "empty_unary", - "large_unary", - "client_streaming", - "server_streaming", - "ping_pong", - "empty_stream", - "timeout_on_sleeping_server", - "cancel_after_begin", - "cancel_after_first_response", - "status_code_and_message", - "custom_metadata": - default: - panic(fmt.Sprintf("unknown test type: %s", testCase[0])) - } - testCases[i].name = testCase[0] - w, err := strconv.Atoi(testCase[1]) - if err != nil { - panic(fmt.Sprintf("%v", err)) - } - testCases[i].weight = w - } - return testCases -} - -// weightedRandomTestSelector defines a weighted random selector for test case types. -type weightedRandomTestSelector struct { - tests []testCaseWithWeight - totalWeight int -} - -// newWeightedRandomTestSelector constructs a weightedRandomTestSelector with the given list of testCaseWithWeight. -func newWeightedRandomTestSelector(tests []testCaseWithWeight) *weightedRandomTestSelector { - var totalWeight int - for _, t := range tests { - totalWeight += t.weight - } - rand.Seed(time.Now().UnixNano()) - return &weightedRandomTestSelector{tests, totalWeight} -} - -func (selector weightedRandomTestSelector) getNextTest() string { - random := rand.Intn(selector.totalWeight) - var weightSofar int - for _, test := range selector.tests { - weightSofar += test.weight - if random < weightSofar { - return test.name - } - } - panic("no test case selected by weightedRandomTestSelector") -} - -// gauge stores the qps of one interop client (one stub). -type gauge struct { - mutex sync.RWMutex - val int64 -} - -func (g *gauge) set(v int64) { - g.mutex.Lock() - defer g.mutex.Unlock() - g.val = v -} - -func (g *gauge) get() int64 { - g.mutex.RLock() - defer g.mutex.RUnlock() - return g.val -} - -// server implements metrics server functions. -type server struct { - mutex sync.RWMutex - // gauges is a map from /stress_test/server_/channel_/stub_/qps to its qps gauge. - gauges map[string]*gauge -} - -// newMetricsServer returns a new metrics server. -func newMetricsServer() *server { - return &server{gauges: make(map[string]*gauge)} -} - -// GetAllGauges returns all gauges. -func (s *server) GetAllGauges(in *metricspb.EmptyMessage, stream metricspb.MetricsService_GetAllGaugesServer) error { - s.mutex.RLock() - defer s.mutex.RUnlock() - - for name, gauge := range s.gauges { - if err := stream.Send(&metricspb.GaugeResponse{Name: name, Value: &metricspb.GaugeResponse_LongValue{LongValue: gauge.get()}}); err != nil { - return err - } - } - return nil -} - -// GetGauge returns the gauge for the given name. -func (s *server) GetGauge(ctx context.Context, in *metricspb.GaugeRequest) (*metricspb.GaugeResponse, error) { - s.mutex.RLock() - defer s.mutex.RUnlock() - - if g, ok := s.gauges[in.Name]; ok { - return &metricspb.GaugeResponse{Name: in.Name, Value: &metricspb.GaugeResponse_LongValue{LongValue: g.get()}}, nil - } - return nil, grpc.Errorf(codes.InvalidArgument, "gauge with name %s not found", in.Name) -} - -// createGauge creates a gauge using the given name in metrics server. -func (s *server) createGauge(name string) *gauge { - s.mutex.Lock() - defer s.mutex.Unlock() - - if _, ok := s.gauges[name]; ok { - // gauge already exists. - panic(fmt.Sprintf("gauge %s already exists", name)) - } - var g gauge - s.gauges[name] = &g - return &g -} - -func startServer(server *server, port int) { - lis, err := net.Listen("tcp", ":"+strconv.Itoa(port)) - if err != nil { - grpclog.Fatalf("failed to listen: %v", err) - } - - s := grpc.NewServer() - metricspb.RegisterMetricsServiceServer(s, server) - s.Serve(lis) - -} - -// performRPCs uses weightedRandomTestSelector to select test case and runs the tests. -func performRPCs(gauge *gauge, conn *grpc.ClientConn, selector *weightedRandomTestSelector, stop <-chan bool) { - client := testpb.NewTestServiceClient(conn) - var numCalls int64 - startTime := time.Now() - for { - test := selector.getNextTest() - switch test { - case "empty_unary": - interop.DoEmptyUnaryCall(client, grpc.FailFast(false)) - case "large_unary": - interop.DoLargeUnaryCall(client, grpc.FailFast(false)) - case "client_streaming": - interop.DoClientStreaming(client, grpc.FailFast(false)) - case "server_streaming": - interop.DoServerStreaming(client, grpc.FailFast(false)) - case "ping_pong": - interop.DoPingPong(client, grpc.FailFast(false)) - case "empty_stream": - interop.DoEmptyStream(client, grpc.FailFast(false)) - case "timeout_on_sleeping_server": - interop.DoTimeoutOnSleepingServer(client, grpc.FailFast(false)) - case "cancel_after_begin": - interop.DoCancelAfterBegin(client, grpc.FailFast(false)) - case "cancel_after_first_response": - interop.DoCancelAfterFirstResponse(client, grpc.FailFast(false)) - case "status_code_and_message": - interop.DoStatusCodeAndMessage(client, grpc.FailFast(false)) - case "custom_metadata": - interop.DoCustomMetadata(client, grpc.FailFast(false)) - } - numCalls++ - gauge.set(int64(float64(numCalls) / time.Since(startTime).Seconds())) - - select { - case <-stop: - return - default: - } - } -} - -func logParameterInfo(addresses []string, tests []testCaseWithWeight) { - grpclog.Printf("server_addresses: %s", *serverAddresses) - grpclog.Printf("test_cases: %s", *testCases) - grpclog.Printf("test_duration_secs: %d", *testDurationSecs) - grpclog.Printf("num_channels_per_server: %d", *numChannelsPerServer) - grpclog.Printf("num_stubs_per_channel: %d", *numStubsPerChannel) - grpclog.Printf("metrics_port: %d", *metricsPort) - grpclog.Printf("use_tls: %t", *useTLS) - grpclog.Printf("use_test_ca: %t", *testCA) - grpclog.Printf("server_host_override: %s", *tlsServerName) - - grpclog.Println("addresses:") - for i, addr := range addresses { - grpclog.Printf("%d. %s\n", i+1, addr) - } - grpclog.Println("tests:") - for i, test := range tests { - grpclog.Printf("%d. %v\n", i+1, test) - } -} - -func newConn(address string, useTLS, testCA bool, tlsServerName string) (*grpc.ClientConn, error) { - var opts []grpc.DialOption - if useTLS { - var sn string - if tlsServerName != "" { - sn = tlsServerName - } - var creds credentials.TransportCredentials - if testCA { - var err error - if *caFile == "" { - *caFile = testdata.Path("ca.pem") - } - creds, err = credentials.NewClientTLSFromFile(*caFile, sn) - if err != nil { - grpclog.Fatalf("Failed to create TLS credentials %v", err) - } - } else { - creds = credentials.NewClientTLSFromCert(nil, sn) - } - opts = append(opts, grpc.WithTransportCredentials(creds)) - } else { - opts = append(opts, grpc.WithInsecure()) - } - return grpc.Dial(address, opts...) -} - -func main() { - flag.Parse() - addresses := strings.Split(*serverAddresses, ",") - tests := parseTestCases(*testCases) - logParameterInfo(addresses, tests) - testSelector := newWeightedRandomTestSelector(tests) - metricsServer := newMetricsServer() - - var wg sync.WaitGroup - wg.Add(len(addresses) * *numChannelsPerServer * *numStubsPerChannel) - stop := make(chan bool) - - for serverIndex, address := range addresses { - for connIndex := 0; connIndex < *numChannelsPerServer; connIndex++ { - conn, err := newConn(address, *useTLS, *testCA, *tlsServerName) - if err != nil { - grpclog.Fatalf("Fail to dial: %v", err) - } - defer conn.Close() - for clientIndex := 0; clientIndex < *numStubsPerChannel; clientIndex++ { - name := fmt.Sprintf("/stress_test/server_%d/channel_%d/stub_%d/qps", serverIndex+1, connIndex+1, clientIndex+1) - go func() { - defer wg.Done() - g := metricsServer.createGauge(name) - performRPCs(g, conn, testSelector, stop) - }() - } - - } - } - go startServer(metricsServer, *metricsPort) - if *testDurationSecs > 0 { - time.Sleep(time.Duration(*testDurationSecs) * time.Second) - close(stop) - } - wg.Wait() - grpclog.Printf(" ===== ALL DONE ===== ") - -} diff --git a/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go deleted file mode 100644 index 466668a4d..000000000 --- a/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.pb.go +++ /dev/null @@ -1,374 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: metrics.proto - -/* -Package grpc_testing is a generated protocol buffer package. - -It is generated from these files: - metrics.proto - -It has these top-level messages: - GaugeResponse - GaugeRequest - EmptyMessage -*/ -package grpc_testing - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Response message containing the gauge name and value -type GaugeResponse struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Types that are valid to be assigned to Value: - // *GaugeResponse_LongValue - // *GaugeResponse_DoubleValue - // *GaugeResponse_StringValue - Value isGaugeResponse_Value `protobuf_oneof:"value"` -} - -func (m *GaugeResponse) Reset() { *m = GaugeResponse{} } -func (m *GaugeResponse) String() string { return proto.CompactTextString(m) } -func (*GaugeResponse) ProtoMessage() {} -func (*GaugeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type isGaugeResponse_Value interface { - isGaugeResponse_Value() -} - -type GaugeResponse_LongValue struct { - LongValue int64 `protobuf:"varint,2,opt,name=long_value,json=longValue,oneof"` -} -type GaugeResponse_DoubleValue struct { - DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,oneof"` -} -type GaugeResponse_StringValue struct { - StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,oneof"` -} - -func (*GaugeResponse_LongValue) isGaugeResponse_Value() {} -func (*GaugeResponse_DoubleValue) isGaugeResponse_Value() {} -func (*GaugeResponse_StringValue) isGaugeResponse_Value() {} - -func (m *GaugeResponse) GetValue() isGaugeResponse_Value { - if m != nil { - return m.Value - } - return nil -} - -func (m *GaugeResponse) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *GaugeResponse) GetLongValue() int64 { - if x, ok := m.GetValue().(*GaugeResponse_LongValue); ok { - return x.LongValue - } - return 0 -} - -func (m *GaugeResponse) GetDoubleValue() float64 { - if x, ok := m.GetValue().(*GaugeResponse_DoubleValue); ok { - return x.DoubleValue - } - return 0 -} - -func (m *GaugeResponse) GetStringValue() string { - if x, ok := m.GetValue().(*GaugeResponse_StringValue); ok { - return x.StringValue - } - return "" -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*GaugeResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _GaugeResponse_OneofMarshaler, _GaugeResponse_OneofUnmarshaler, _GaugeResponse_OneofSizer, []interface{}{ - (*GaugeResponse_LongValue)(nil), - (*GaugeResponse_DoubleValue)(nil), - (*GaugeResponse_StringValue)(nil), - } -} - -func _GaugeResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*GaugeResponse) - // value - switch x := m.Value.(type) { - case *GaugeResponse_LongValue: - b.EncodeVarint(2<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.LongValue)) - case *GaugeResponse_DoubleValue: - b.EncodeVarint(3<<3 | proto.WireFixed64) - b.EncodeFixed64(math.Float64bits(x.DoubleValue)) - case *GaugeResponse_StringValue: - b.EncodeVarint(4<<3 | proto.WireBytes) - b.EncodeStringBytes(x.StringValue) - case nil: - default: - return fmt.Errorf("GaugeResponse.Value has unexpected type %T", x) - } - return nil -} - -func _GaugeResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*GaugeResponse) - switch tag { - case 2: // value.long_value - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Value = &GaugeResponse_LongValue{int64(x)} - return true, err - case 3: // value.double_value - if wire != proto.WireFixed64 { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeFixed64() - m.Value = &GaugeResponse_DoubleValue{math.Float64frombits(x)} - return true, err - case 4: // value.string_value - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.Value = &GaugeResponse_StringValue{x} - return true, err - default: - return false, nil - } -} - -func _GaugeResponse_OneofSizer(msg proto.Message) (n int) { - m := msg.(*GaugeResponse) - // value - switch x := m.Value.(type) { - case *GaugeResponse_LongValue: - n += proto.SizeVarint(2<<3 | proto.WireVarint) - n += proto.SizeVarint(uint64(x.LongValue)) - case *GaugeResponse_DoubleValue: - n += proto.SizeVarint(3<<3 | proto.WireFixed64) - n += 8 - case *GaugeResponse_StringValue: - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(len(x.StringValue))) - n += len(x.StringValue) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// Request message containing the gauge name -type GaugeRequest struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` -} - -func (m *GaugeRequest) Reset() { *m = GaugeRequest{} } -func (m *GaugeRequest) String() string { return proto.CompactTextString(m) } -func (*GaugeRequest) ProtoMessage() {} -func (*GaugeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *GaugeRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -type EmptyMessage struct { -} - -func (m *EmptyMessage) Reset() { *m = EmptyMessage{} } -func (m *EmptyMessage) String() string { return proto.CompactTextString(m) } -func (*EmptyMessage) ProtoMessage() {} -func (*EmptyMessage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func init() { - proto.RegisterType((*GaugeResponse)(nil), "grpc.testing.GaugeResponse") - proto.RegisterType((*GaugeRequest)(nil), "grpc.testing.GaugeRequest") - proto.RegisterType((*EmptyMessage)(nil), "grpc.testing.EmptyMessage") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for MetricsService service - -type MetricsServiceClient interface { - // Returns the values of all the gauges that are currently being maintained by - // the service - GetAllGauges(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (MetricsService_GetAllGaugesClient, error) - // Returns the value of one gauge - GetGauge(ctx context.Context, in *GaugeRequest, opts ...grpc.CallOption) (*GaugeResponse, error) -} - -type metricsServiceClient struct { - cc *grpc.ClientConn -} - -func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { - return &metricsServiceClient{cc} -} - -func (c *metricsServiceClient) GetAllGauges(ctx context.Context, in *EmptyMessage, opts ...grpc.CallOption) (MetricsService_GetAllGaugesClient, error) { - stream, err := grpc.NewClientStream(ctx, &_MetricsService_serviceDesc.Streams[0], c.cc, "/grpc.testing.MetricsService/GetAllGauges", opts...) - if err != nil { - return nil, err - } - x := &metricsServiceGetAllGaugesClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type MetricsService_GetAllGaugesClient interface { - Recv() (*GaugeResponse, error) - grpc.ClientStream -} - -type metricsServiceGetAllGaugesClient struct { - grpc.ClientStream -} - -func (x *metricsServiceGetAllGaugesClient) Recv() (*GaugeResponse, error) { - m := new(GaugeResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *metricsServiceClient) GetGauge(ctx context.Context, in *GaugeRequest, opts ...grpc.CallOption) (*GaugeResponse, error) { - out := new(GaugeResponse) - err := grpc.Invoke(ctx, "/grpc.testing.MetricsService/GetGauge", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for MetricsService service - -type MetricsServiceServer interface { - // Returns the values of all the gauges that are currently being maintained by - // the service - GetAllGauges(*EmptyMessage, MetricsService_GetAllGaugesServer) error - // Returns the value of one gauge - GetGauge(context.Context, *GaugeRequest) (*GaugeResponse, error) -} - -func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { - s.RegisterService(&_MetricsService_serviceDesc, srv) -} - -func _MetricsService_GetAllGauges_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(EmptyMessage) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(MetricsServiceServer).GetAllGauges(m, &metricsServiceGetAllGaugesServer{stream}) -} - -type MetricsService_GetAllGaugesServer interface { - Send(*GaugeResponse) error - grpc.ServerStream -} - -type metricsServiceGetAllGaugesServer struct { - grpc.ServerStream -} - -func (x *metricsServiceGetAllGaugesServer) Send(m *GaugeResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _MetricsService_GetGauge_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GaugeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MetricsServiceServer).GetGauge(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.testing.MetricsService/GetGauge", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MetricsServiceServer).GetGauge(ctx, req.(*GaugeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _MetricsService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.testing.MetricsService", - HandlerType: (*MetricsServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetGauge", - Handler: _MetricsService_GetGauge_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "GetAllGauges", - Handler: _MetricsService_GetAllGauges_Handler, - ServerStreams: true, - }, - }, - Metadata: "metrics.proto", -} - -func init() { proto.RegisterFile("metrics.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 256 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0x3f, 0x4f, 0xc3, 0x30, - 0x10, 0xc5, 0x6b, 0x5a, 0xfe, 0xf4, 0x70, 0x3b, 0x78, 0xaa, 0xca, 0x40, 0x14, 0x96, 0x4c, 0x11, - 0x82, 0x4f, 0x00, 0x08, 0xa5, 0x0c, 0x5d, 0x82, 0xc4, 0x8a, 0xd2, 0x70, 0xb2, 0x22, 0x39, 0x71, - 0xf0, 0x5d, 0x2a, 0xf1, 0x49, 0x58, 0xf9, 0xa8, 0xc8, 0x4e, 0x55, 0xa5, 0x08, 0x75, 0xb3, 0x7e, - 0xf7, 0xfc, 0xfc, 0x9e, 0x0f, 0x66, 0x35, 0xb2, 0xab, 0x4a, 0x4a, 0x5b, 0x67, 0xd9, 0x2a, 0xa9, - 0x5d, 0x5b, 0xa6, 0x8c, 0xc4, 0x55, 0xa3, 0xe3, 0x6f, 0x01, 0xb3, 0xac, 0xe8, 0x34, 0xe6, 0x48, - 0xad, 0x6d, 0x08, 0x95, 0x82, 0x49, 0x53, 0xd4, 0xb8, 0x10, 0x91, 0x48, 0xa6, 0x79, 0x38, 0xab, - 0x6b, 0x00, 0x63, 0x1b, 0xfd, 0xbe, 0x2d, 0x4c, 0x87, 0x8b, 0x93, 0x48, 0x24, 0xe3, 0xd5, 0x28, - 0x9f, 0x7a, 0xf6, 0xe6, 0x91, 0xba, 0x01, 0xf9, 0x61, 0xbb, 0x8d, 0xc1, 0x9d, 0x64, 0x1c, 0x89, - 0x44, 0xac, 0x46, 0xf9, 0x65, 0x4f, 0xf7, 0x22, 0x62, 0x57, 0xed, 0x7d, 0x26, 0xfe, 0x05, 0x2f, - 0xea, 0x69, 0x10, 0x3d, 0x9e, 0xc3, 0x69, 0x98, 0xc6, 0x31, 0xc8, 0x5d, 0xb0, 0xcf, 0x0e, 0x89, - 0xff, 0xcb, 0x15, 0xcf, 0x41, 0x3e, 0xd7, 0x2d, 0x7f, 0xad, 0x91, 0xa8, 0xd0, 0x78, 0xf7, 0x23, - 0x60, 0xbe, 0xee, 0xdb, 0xbe, 0xa2, 0xdb, 0x56, 0x25, 0xaa, 0x17, 0x90, 0x19, 0xf2, 0x83, 0x31, - 0xc1, 0x8c, 0xd4, 0x32, 0x1d, 0xf6, 0x4f, 0x87, 0xd7, 0x97, 0x57, 0x87, 0xb3, 0x83, 0x7f, 0xb9, - 0x15, 0xea, 0x09, 0x2e, 0x32, 0xe4, 0x40, 0xff, 0xda, 0x0c, 0x93, 0x1e, 0xb5, 0xd9, 0x9c, 0x85, - 0x2d, 0xdc, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x7d, 0xb2, 0xc9, 0x96, 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.proto b/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.proto deleted file mode 100644 index 695040064..000000000 --- a/vendor/google.golang.org/grpc/stress/grpc_testing/metrics.proto +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015-2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Contains the definitions for a metrics service and the type of metrics -// exposed by the service. -// -// Currently, 'Gauge' (i.e a metric that represents the measured value of -// something at an instant of time) is the only metric type supported by the -// service. -syntax = "proto3"; - -package grpc.testing; - -// Response message containing the gauge name and value -message GaugeResponse { - string name = 1; - oneof value { - int64 long_value = 2; - double double_value = 3; - string string_value = 4; - } -} - -// Request message containing the gauge name -message GaugeRequest { - string name = 1; -} - -message EmptyMessage {} - -service MetricsService { - // Returns the values of all the gauges that are currently being maintained by - // the service - rpc GetAllGauges(EmptyMessage) returns (stream GaugeResponse); - - // Returns the value of one gauge - rpc GetGauge(GaugeRequest) returns (GaugeResponse); -} diff --git a/vendor/google.golang.org/grpc/stress/metrics_client/main.go b/vendor/google.golang.org/grpc/stress/metrics_client/main.go deleted file mode 100644 index 6405ec85e..000000000 --- a/vendor/google.golang.org/grpc/stress/metrics_client/main.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - * - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "flag" - "fmt" - "io" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/grpclog" - metricspb "google.golang.org/grpc/stress/grpc_testing" -) - -var ( - metricsServerAddress = flag.String("metrics_server_address", "", "The metrics server addresses in the fomrat :") - totalOnly = flag.Bool("total_only", false, "If true, this prints only the total value of all gauges") -) - -func printMetrics(client metricspb.MetricsServiceClient, totalOnly bool) { - stream, err := client.GetAllGauges(context.Background(), &metricspb.EmptyMessage{}) - if err != nil { - grpclog.Fatalf("failed to call GetAllGuages: %v", err) - } - - var ( - overallQPS int64 - rpcStatus error - ) - for { - gaugeResponse, err := stream.Recv() - if err != nil { - rpcStatus = err - break - } - if _, ok := gaugeResponse.GetValue().(*metricspb.GaugeResponse_LongValue); !ok { - panic(fmt.Sprintf("gauge %s is not a long value", gaugeResponse.Name)) - } - v := gaugeResponse.GetLongValue() - if !totalOnly { - grpclog.Printf("%s: %d", gaugeResponse.Name, v) - } - overallQPS += v - } - if rpcStatus != io.EOF { - grpclog.Fatalf("failed to finish server streaming: %v", rpcStatus) - } - grpclog.Printf("overall qps: %d", overallQPS) -} - -func main() { - flag.Parse() - if *metricsServerAddress == "" { - grpclog.Fatalf("Metrics server address is empty.") - } - - conn, err := grpc.Dial(*metricsServerAddress, grpc.WithInsecure()) - if err != nil { - grpclog.Fatalf("cannot connect to metrics server: %v", err) - } - defer conn.Close() - - c := metricspb.NewMetricsServiceClient(conn) - printMetrics(c, *totalOnly) -} diff --git a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go b/vendor/google.golang.org/grpc/test/bufconn/bufconn.go deleted file mode 100644 index bc0ab839f..000000000 --- a/vendor/google.golang.org/grpc/test/bufconn/bufconn.go +++ /dev/null @@ -1,229 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package bufconn provides a net.Conn implemented by a buffer and related -// dialing and listening functionality. -package bufconn - -import ( - "fmt" - "io" - "net" - "sync" - "time" -) - -// Listener implements a net.Listener that creates local, buffered net.Conns -// via its Accept and Dial method. -type Listener struct { - mu sync.Mutex - sz int - ch chan net.Conn - done chan struct{} -} - -var errClosed = fmt.Errorf("Closed") - -// Listen returns a Listener that can only be contacted by its own Dialers and -// creates buffered connections between the two. -func Listen(sz int) *Listener { - return &Listener{sz: sz, ch: make(chan net.Conn), done: make(chan struct{})} -} - -// Accept blocks until Dial is called, then returns a net.Conn for the server -// half of the connection. -func (l *Listener) Accept() (net.Conn, error) { - select { - case <-l.done: - return nil, errClosed - case c := <-l.ch: - return c, nil - } -} - -// Close stops the listener. -func (l *Listener) Close() error { - l.mu.Lock() - defer l.mu.Unlock() - select { - case <-l.done: - // Already closed. - break - default: - close(l.done) - } - return nil -} - -// Addr reports the address of the listener. -func (l *Listener) Addr() net.Addr { return addr{} } - -// Dial creates an in-memory full-duplex network connection, unblocks Accept by -// providing it the server half of the connection, and returns the client half -// of the connection. -func (l *Listener) Dial() (net.Conn, error) { - p1, p2 := newPipe(l.sz), newPipe(l.sz) - select { - case <-l.done: - return nil, errClosed - case l.ch <- &conn{p1, p2}: - return &conn{p2, p1}, nil - } -} - -type pipe struct { - mu sync.Mutex - - // buf contains the data in the pipe. It is a ring buffer of fixed capacity, - // with r and w pointing to the offset to read and write, respsectively. - // - // Data is read between [r, w) and written to [w, r), wrapping around the end - // of the slice if necessary. - // - // The buffer is empty if r == len(buf), otherwise if r == w, it is full. - // - // w and r are always in the range [0, cap(buf)) and [0, len(buf)]. - buf []byte - w, r int - - wwait sync.Cond - rwait sync.Cond - closed bool -} - -func newPipe(sz int) *pipe { - p := &pipe{buf: make([]byte, 0, sz)} - p.wwait.L = &p.mu - p.rwait.L = &p.mu - return p -} - -func (p *pipe) empty() bool { - return p.r == len(p.buf) -} - -func (p *pipe) full() bool { - return p.r < len(p.buf) && p.r == p.w -} - -func (p *pipe) Read(b []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - // Block until p has data. - for { - if p.closed { - return 0, io.ErrClosedPipe - } - if !p.empty() { - break - } - p.rwait.Wait() - } - wasFull := p.full() - - n = copy(b, p.buf[p.r:len(p.buf)]) - p.r += n - if p.r == cap(p.buf) { - p.r = 0 - p.buf = p.buf[:p.w] - } - - // Signal a blocked writer, if any - if wasFull { - p.wwait.Signal() - } - - return n, nil -} - -func (p *pipe) Write(b []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.closed { - return 0, io.ErrClosedPipe - } - for len(b) > 0 { - // Block until p is not full. - for { - if p.closed { - return 0, io.ErrClosedPipe - } - if !p.full() { - break - } - p.wwait.Wait() - } - wasEmpty := p.empty() - - end := cap(p.buf) - if p.w < p.r { - end = p.r - } - x := copy(p.buf[p.w:end], b) - b = b[x:] - n += x - p.w += x - if p.w > len(p.buf) { - p.buf = p.buf[:p.w] - } - if p.w == cap(p.buf) { - p.w = 0 - } - - // Signal a blocked reader, if any. - if wasEmpty { - p.rwait.Signal() - } - } - return n, nil -} - -func (p *pipe) Close() error { - p.mu.Lock() - defer p.mu.Unlock() - p.closed = true - // Signal all blocked readers and writers to return an error. - p.rwait.Broadcast() - p.wwait.Broadcast() - return nil -} - -type conn struct { - io.ReadCloser - io.WriteCloser -} - -func (c *conn) Close() error { - err1 := c.ReadCloser.Close() - err2 := c.WriteCloser.Close() - if err1 != nil { - return err1 - } - return err2 -} - -func (*conn) LocalAddr() net.Addr { return addr{} } -func (*conn) RemoteAddr() net.Addr { return addr{} } -func (c *conn) SetDeadline(t time.Time) error { return fmt.Errorf("unsupported") } -func (c *conn) SetReadDeadline(t time.Time) error { return fmt.Errorf("unsupported") } -func (c *conn) SetWriteDeadline(t time.Time) error { return fmt.Errorf("unsupported") } - -type addr struct{} - -func (addr) Network() string { return "bufconn" } -func (addr) String() string { return "bufconn" } diff --git a/vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go deleted file mode 100644 index bb6efbe7d..000000000 --- a/vendor/google.golang.org/grpc/test/grpc_testing/test.pb.go +++ /dev/null @@ -1,788 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_testing/test.proto - -/* -Package grpc_testing is a generated protocol buffer package. - -It is generated from these files: - grpc_testing/test.proto - -It has these top-level messages: - Empty - Payload - SimpleRequest - SimpleResponse - StreamingInputCallRequest - StreamingInputCallResponse - ResponseParameters - StreamingOutputCallRequest - StreamingOutputCallResponse -*/ -package grpc_testing - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The type of payload that should be returned. -type PayloadType int32 - -const ( - // Compressable text format. - PayloadType_COMPRESSABLE PayloadType = 0 - // Uncompressable binary format. - PayloadType_UNCOMPRESSABLE PayloadType = 1 - // Randomly chosen from all other formats defined in this enum. - PayloadType_RANDOM PayloadType = 2 -) - -var PayloadType_name = map[int32]string{ - 0: "COMPRESSABLE", - 1: "UNCOMPRESSABLE", - 2: "RANDOM", -} -var PayloadType_value = map[string]int32{ - "COMPRESSABLE": 0, - "UNCOMPRESSABLE": 1, - "RANDOM": 2, -} - -func (x PayloadType) Enum() *PayloadType { - p := new(PayloadType) - *p = x - return p -} -func (x PayloadType) String() string { - return proto.EnumName(PayloadType_name, int32(x)) -} -func (x *PayloadType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PayloadType_value, data, "PayloadType") - if err != nil { - return err - } - *x = PayloadType(value) - return nil -} -func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type Empty struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -// A block of data, to simply increase gRPC message size. -type Payload struct { - // The type of data in body. - Type *PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` - // Primary contents of payload. - Body []byte `protobuf:"bytes,2,opt,name=body" json:"body,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Payload) Reset() { *m = Payload{} } -func (m *Payload) String() string { return proto.CompactTextString(m) } -func (*Payload) ProtoMessage() {} -func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *Payload) GetType() PayloadType { - if m != nil && m.Type != nil { - return *m.Type - } - return PayloadType_COMPRESSABLE -} - -func (m *Payload) GetBody() []byte { - if m != nil { - return m.Body - } - return nil -} - -// Unary request. -type SimpleRequest struct { - // Desired payload type in the response from the server. - // If response_type is RANDOM, server randomly chooses one from other formats. - ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` - // Desired payload size in the response from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - ResponseSize *int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize" json:"response_size,omitempty"` - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` - // Whether SimpleResponse should include username. - FillUsername *bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername" json:"fill_username,omitempty"` - // Whether SimpleResponse should include OAuth scope. - FillOauthScope *bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope" json:"fill_oauth_scope,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } -func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } -func (*SimpleRequest) ProtoMessage() {} -func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *SimpleRequest) GetResponseType() PayloadType { - if m != nil && m.ResponseType != nil { - return *m.ResponseType - } - return PayloadType_COMPRESSABLE -} - -func (m *SimpleRequest) GetResponseSize() int32 { - if m != nil && m.ResponseSize != nil { - return *m.ResponseSize - } - return 0 -} - -func (m *SimpleRequest) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *SimpleRequest) GetFillUsername() bool { - if m != nil && m.FillUsername != nil { - return *m.FillUsername - } - return false -} - -func (m *SimpleRequest) GetFillOauthScope() bool { - if m != nil && m.FillOauthScope != nil { - return *m.FillOauthScope - } - return false -} - -// Unary response, as configured by the request. -type SimpleResponse struct { - // Payload to increase message size. - Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` - // The user the request came from, for verifying authentication was - // successful when the client expected it. - Username *string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` - // OAuth scope. - OauthScope *string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope" json:"oauth_scope,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } -func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } -func (*SimpleResponse) ProtoMessage() {} -func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *SimpleResponse) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *SimpleResponse) GetUsername() string { - if m != nil && m.Username != nil { - return *m.Username - } - return "" -} - -func (m *SimpleResponse) GetOauthScope() string { - if m != nil && m.OauthScope != nil { - return *m.OauthScope - } - return "" -} - -// Client-streaming request. -type StreamingInputCallRequest struct { - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } -func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } -func (*StreamingInputCallRequest) ProtoMessage() {} -func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -func (m *StreamingInputCallRequest) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -// Client-streaming response. -type StreamingInputCallResponse struct { - // Aggregated size of payloads received from the client. - AggregatedPayloadSize *int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize" json:"aggregated_payload_size,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } -func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } -func (*StreamingInputCallResponse) ProtoMessage() {} -func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { - if m != nil && m.AggregatedPayloadSize != nil { - return *m.AggregatedPayloadSize - } - return 0 -} - -// Configuration for a particular response. -type ResponseParameters struct { - // Desired payload sizes in responses from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - Size *int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` - // Desired interval between consecutive responses in the response stream in - // microseconds. - IntervalUs *int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs" json:"interval_us,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } -func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } -func (*ResponseParameters) ProtoMessage() {} -func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -func (m *ResponseParameters) GetSize() int32 { - if m != nil && m.Size != nil { - return *m.Size - } - return 0 -} - -func (m *ResponseParameters) GetIntervalUs() int32 { - if m != nil && m.IntervalUs != nil { - return *m.IntervalUs - } - return 0 -} - -// Server-streaming request. -type StreamingOutputCallRequest struct { - // Desired payload type in the response from the server. - // If response_type is RANDOM, the payload from each response in the stream - // might be of different types. This is to simulate a mixed type of payload - // stream. - ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` - // Configuration for each expected response message. - ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters" json:"response_parameters,omitempty"` - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } -func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } -func (*StreamingOutputCallRequest) ProtoMessage() {} -func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { - if m != nil && m.ResponseType != nil { - return *m.ResponseType - } - return PayloadType_COMPRESSABLE -} - -func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { - if m != nil { - return m.ResponseParameters - } - return nil -} - -func (m *StreamingOutputCallRequest) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -// Server-streaming response, as configured by the request and parameters. -type StreamingOutputCallResponse struct { - // Payload to increase response size. - Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } -func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } -func (*StreamingOutputCallResponse) ProtoMessage() {} -func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *StreamingOutputCallResponse) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -func init() { - proto.RegisterType((*Empty)(nil), "grpc.testing.Empty") - proto.RegisterType((*Payload)(nil), "grpc.testing.Payload") - proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") - proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") - proto.RegisterType((*StreamingInputCallRequest)(nil), "grpc.testing.StreamingInputCallRequest") - proto.RegisterType((*StreamingInputCallResponse)(nil), "grpc.testing.StreamingInputCallResponse") - proto.RegisterType((*ResponseParameters)(nil), "grpc.testing.ResponseParameters") - proto.RegisterType((*StreamingOutputCallRequest)(nil), "grpc.testing.StreamingOutputCallRequest") - proto.RegisterType((*StreamingOutputCallResponse)(nil), "grpc.testing.StreamingOutputCallResponse") - proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for TestService service - -type TestServiceClient interface { - // One empty request followed by one empty response. - EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) - // One request followed by one response. - // The server returns the client payload as-is. - UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) -} - -type testServiceClient struct { - cc *grpc.ClientConn -} - -func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { - return &testServiceClient{cc} -} - -func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := grpc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { - out := new(SimpleResponse) - err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingOutputCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceStreamingOutputCallClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type TestService_StreamingOutputCallClient interface { - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceStreamingOutputCallClient struct { - grpc.ClientStream -} - -func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/StreamingInputCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceStreamingInputCallClient{stream} - return x, nil -} - -type TestService_StreamingInputCallClient interface { - Send(*StreamingInputCallRequest) error - CloseAndRecv() (*StreamingInputCallResponse, error) - grpc.ClientStream -} - -type testServiceStreamingInputCallClient struct { - grpc.ClientStream -} - -func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(StreamingInputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/FullDuplexCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceFullDuplexCallClient{stream} - return x, nil -} - -type TestService_FullDuplexCallClient interface { - Send(*StreamingOutputCallRequest) error - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceFullDuplexCallClient struct { - grpc.ClientStream -} - -func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[3], c.cc, "/grpc.testing.TestService/HalfDuplexCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceHalfDuplexCallClient{stream} - return x, nil -} - -type TestService_HalfDuplexCallClient interface { - Send(*StreamingOutputCallRequest) error - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceHalfDuplexCallClient struct { - grpc.ClientStream -} - -func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for TestService service - -type TestServiceServer interface { - // One empty request followed by one empty response. - EmptyCall(context.Context, *Empty) (*Empty, error) - // One request followed by one response. - // The server returns the client payload as-is. - UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - StreamingInputCall(TestService_StreamingInputCallServer) error - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - FullDuplexCall(TestService_FullDuplexCallServer) error - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - HalfDuplexCall(TestService_HalfDuplexCallServer) error -} - -func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { - s.RegisterService(&_TestService_serviceDesc, srv) -} - -func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TestServiceServer).EmptyCall(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.testing.TestService/EmptyCall", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TestServiceServer).EmptyCall(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SimpleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TestServiceServer).UnaryCall(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.testing.TestService/UnaryCall", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(StreamingOutputCallRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) -} - -type TestService_StreamingOutputCallServer interface { - Send(*StreamingOutputCallResponse) error - grpc.ServerStream -} - -type testServiceStreamingOutputCallServer struct { - grpc.ServerStream -} - -func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) -} - -type TestService_StreamingInputCallServer interface { - SendAndClose(*StreamingInputCallResponse) error - Recv() (*StreamingInputCallRequest, error) - grpc.ServerStream -} - -type testServiceStreamingInputCallServer struct { - grpc.ServerStream -} - -func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { - m := new(StreamingInputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) -} - -type TestService_FullDuplexCallServer interface { - Send(*StreamingOutputCallResponse) error - Recv() (*StreamingOutputCallRequest, error) - grpc.ServerStream -} - -type testServiceFullDuplexCallServer struct { - grpc.ServerStream -} - -func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { - m := new(StreamingOutputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) -} - -type TestService_HalfDuplexCallServer interface { - Send(*StreamingOutputCallResponse) error - Recv() (*StreamingOutputCallRequest, error) - grpc.ServerStream -} - -type testServiceHalfDuplexCallServer struct { - grpc.ServerStream -} - -func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { - m := new(StreamingOutputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _TestService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.testing.TestService", - HandlerType: (*TestServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "EmptyCall", - Handler: _TestService_EmptyCall_Handler, - }, - { - MethodName: "UnaryCall", - Handler: _TestService_UnaryCall_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamingOutputCall", - Handler: _TestService_StreamingOutputCall_Handler, - ServerStreams: true, - }, - { - StreamName: "StreamingInputCall", - Handler: _TestService_StreamingInputCall_Handler, - ClientStreams: true, - }, - { - StreamName: "FullDuplexCall", - Handler: _TestService_FullDuplexCall_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "HalfDuplexCall", - Handler: _TestService_HalfDuplexCall_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc_testing/test.proto", -} - -func init() { proto.RegisterFile("grpc_testing/test.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 582 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0xdd, 0x6e, 0xd3, 0x4c, - 0x10, 0xfd, 0xb6, 0x49, 0xbe, 0x34, 0x93, 0xd4, 0x8a, 0x36, 0xaa, 0xea, 0xba, 0x48, 0x58, 0xe6, - 0x02, 0x83, 0x44, 0x8a, 0x22, 0xc1, 0x25, 0xa8, 0xb4, 0xa9, 0xa8, 0x94, 0x26, 0xc1, 0x4e, 0xae, - 0xa3, 0x25, 0xd9, 0x1a, 0x4b, 0x8e, 0xbd, 0xac, 0xd7, 0x15, 0xe9, 0x05, 0x2f, 0xc6, 0xcb, 0xf0, - 0x10, 0x3c, 0x00, 0x5a, 0xff, 0x24, 0x4e, 0xe2, 0x8a, 0x14, 0x04, 0x57, 0xb6, 0x67, 0xce, 0x9c, - 0x39, 0xc7, 0x33, 0xbb, 0x70, 0xe4, 0x70, 0x36, 0x9d, 0x08, 0x1a, 0x0a, 0xd7, 0x77, 0x4e, 0xe5, - 0xb3, 0xcd, 0x78, 0x20, 0x02, 0xdc, 0x90, 0x89, 0x76, 0x9a, 0x30, 0xaa, 0x50, 0xe9, 0xce, 0x99, - 0x58, 0x18, 0x3d, 0xa8, 0x0e, 0xc9, 0xc2, 0x0b, 0xc8, 0x0c, 0xbf, 0x80, 0xb2, 0x58, 0x30, 0xaa, - 0x22, 0x1d, 0x99, 0x4a, 0xe7, 0xb8, 0x9d, 0x2f, 0x68, 0xa7, 0xa0, 0xd1, 0x82, 0x51, 0x2b, 0x86, - 0x61, 0x0c, 0xe5, 0x8f, 0xc1, 0x6c, 0xa1, 0xee, 0xe9, 0xc8, 0x6c, 0x58, 0xf1, 0xbb, 0xf1, 0x03, - 0xc1, 0x81, 0xed, 0xce, 0x99, 0x47, 0x2d, 0xfa, 0x39, 0xa2, 0xa1, 0xc0, 0x6f, 0xe0, 0x80, 0xd3, - 0x90, 0x05, 0x7e, 0x48, 0x27, 0xbb, 0xb1, 0x37, 0x32, 0xbc, 0xfc, 0xc2, 0x4f, 0x72, 0xf5, 0xa1, - 0x7b, 0x47, 0xe3, 0x76, 0x95, 0x15, 0xc8, 0x76, 0xef, 0x28, 0x3e, 0x85, 0x2a, 0x4b, 0x18, 0xd4, - 0x92, 0x8e, 0xcc, 0x7a, 0xe7, 0xb0, 0x90, 0xde, 0xca, 0x50, 0x92, 0xf5, 0xc6, 0xf5, 0xbc, 0x49, - 0x14, 0x52, 0xee, 0x93, 0x39, 0x55, 0xcb, 0x3a, 0x32, 0xf7, 0xad, 0x86, 0x0c, 0x8e, 0xd3, 0x18, - 0x36, 0xa1, 0x19, 0x83, 0x02, 0x12, 0x89, 0x4f, 0x93, 0x70, 0x1a, 0x30, 0xaa, 0x56, 0x62, 0x9c, - 0x22, 0xe3, 0x03, 0x19, 0xb6, 0x65, 0xd4, 0xf8, 0x0a, 0x4a, 0xe6, 0x3a, 0x51, 0x95, 0x57, 0x84, - 0x76, 0x52, 0xa4, 0xc1, 0xfe, 0x52, 0x8c, 0xb4, 0x58, 0xb3, 0x96, 0xdf, 0xf8, 0x31, 0xd4, 0xf3, - 0x1a, 0x4a, 0x71, 0x1a, 0x82, 0x55, 0xff, 0x1e, 0x1c, 0xdb, 0x82, 0x53, 0x32, 0x77, 0x7d, 0xe7, - 0xca, 0x67, 0x91, 0x38, 0x27, 0x9e, 0x97, 0x4d, 0xe0, 0xa1, 0x52, 0x8c, 0x11, 0x68, 0x45, 0x6c, - 0xa9, 0xb3, 0xd7, 0x70, 0x44, 0x1c, 0x87, 0x53, 0x87, 0x08, 0x3a, 0x9b, 0xa4, 0x35, 0xc9, 0x68, - 0x50, 0x3c, 0x9a, 0xc3, 0x55, 0x3a, 0xa5, 0x96, 0x33, 0x32, 0xae, 0x00, 0x67, 0x1c, 0x43, 0xc2, - 0xc9, 0x9c, 0x0a, 0xca, 0x43, 0xb9, 0x44, 0xb9, 0xd2, 0xf8, 0x5d, 0xda, 0x75, 0x7d, 0x41, 0xf9, - 0x2d, 0x91, 0x03, 0x4a, 0x07, 0x0e, 0x59, 0x68, 0x1c, 0x1a, 0xdf, 0x51, 0x4e, 0xe1, 0x20, 0x12, - 0x1b, 0x86, 0xff, 0x74, 0xe5, 0x3e, 0x40, 0x6b, 0x59, 0xcf, 0x96, 0x52, 0xd5, 0x3d, 0xbd, 0x64, - 0xd6, 0x3b, 0xfa, 0x3a, 0xcb, 0xb6, 0x25, 0x0b, 0xf3, 0x6d, 0x9b, 0x0f, 0x5d, 0x50, 0xa3, 0x0f, - 0x27, 0x85, 0x0e, 0x7f, 0x73, 0xbd, 0x9e, 0xbf, 0x85, 0x7a, 0xce, 0x30, 0x6e, 0x42, 0xe3, 0x7c, - 0x70, 0x3d, 0xb4, 0xba, 0xb6, 0x7d, 0xf6, 0xae, 0xd7, 0x6d, 0xfe, 0x87, 0x31, 0x28, 0xe3, 0xfe, - 0x5a, 0x0c, 0x61, 0x80, 0xff, 0xad, 0xb3, 0xfe, 0xc5, 0xe0, 0xba, 0xb9, 0xd7, 0xf9, 0x56, 0x86, - 0xfa, 0x88, 0x86, 0xc2, 0xa6, 0xfc, 0xd6, 0x9d, 0x52, 0xfc, 0x0a, 0x6a, 0xf1, 0x05, 0x22, 0x65, - 0xe1, 0xd6, 0x7a, 0xf7, 0x38, 0xa1, 0x15, 0x05, 0xf1, 0x25, 0xd4, 0xc6, 0x3e, 0xe1, 0x49, 0xd9, - 0xc9, 0x3a, 0x62, 0xed, 0xe2, 0xd0, 0x1e, 0x15, 0x27, 0xd3, 0x1f, 0xe0, 0x41, 0xab, 0xe0, 0xff, - 0x60, 0x73, 0xa3, 0xe8, 0xde, 0x25, 0xd1, 0x9e, 0xed, 0x80, 0x4c, 0x7a, 0xbd, 0x44, 0xd8, 0x05, - 0xbc, 0x7d, 0x22, 0xf0, 0xd3, 0x7b, 0x28, 0x36, 0x4f, 0xa0, 0x66, 0xfe, 0x1a, 0x98, 0xb4, 0x32, - 0x65, 0x2b, 0xe5, 0x32, 0xf2, 0xbc, 0x8b, 0x88, 0x79, 0xf4, 0xcb, 0x5f, 0xf3, 0x64, 0xa2, 0xd8, - 0x95, 0xf2, 0x9e, 0x78, 0x37, 0xff, 0xa0, 0xd5, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb8, 0xa6, - 0x30, 0x01, 0x96, 0x06, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/test/grpc_testing/test.proto b/vendor/google.golang.org/grpc/test/grpc_testing/test.proto deleted file mode 100644 index fbd22dec9..000000000 --- a/vendor/google.golang.org/grpc/test/grpc_testing/test.proto +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2017 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// An integration test service that covers all the method signature permutations -// of unary/streaming requests/responses. -syntax = "proto2"; - -package grpc.testing; - -message Empty {} - -// The type of payload that should be returned. -enum PayloadType { - // Compressable text format. - COMPRESSABLE = 0; - - // Uncompressable binary format. - UNCOMPRESSABLE = 1; - - // Randomly chosen from all other formats defined in this enum. - RANDOM = 2; -} - -// A block of data, to simply increase gRPC message size. -message Payload { - // The type of data in body. - optional PayloadType type = 1; - // Primary contents of payload. - optional bytes body = 2; -} - -// Unary request. -message SimpleRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, server randomly chooses one from other formats. - optional PayloadType response_type = 1; - - // Desired payload size in the response from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - optional int32 response_size = 2; - - // Optional input payload sent along with the request. - optional Payload payload = 3; - - // Whether SimpleResponse should include username. - optional bool fill_username = 4; - - // Whether SimpleResponse should include OAuth scope. - optional bool fill_oauth_scope = 5; -} - -// Unary response, as configured by the request. -message SimpleResponse { - // Payload to increase message size. - optional Payload payload = 1; - - // The user the request came from, for verifying authentication was - // successful when the client expected it. - optional string username = 2; - - // OAuth scope. - optional string oauth_scope = 3; -} - -// Client-streaming request. -message StreamingInputCallRequest { - // Optional input payload sent along with the request. - optional Payload payload = 1; - - // Not expecting any payload from the response. -} - -// Client-streaming response. -message StreamingInputCallResponse { - // Aggregated size of payloads received from the client. - optional int32 aggregated_payload_size = 1; -} - -// Configuration for a particular response. -message ResponseParameters { - // Desired payload sizes in responses from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - optional int32 size = 1; - - // Desired interval between consecutive responses in the response stream in - // microseconds. - optional int32 interval_us = 2; -} - -// Server-streaming request. -message StreamingOutputCallRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, the payload from each response in the stream - // might be of different types. This is to simulate a mixed type of payload - // stream. - optional PayloadType response_type = 1; - - // Configuration for each expected response message. - repeated ResponseParameters response_parameters = 2; - - // Optional input payload sent along with the request. - optional Payload payload = 3; -} - -// Server-streaming response, as configured by the request and parameters. -message StreamingOutputCallResponse { - // Payload to increase response size. - optional Payload payload = 1; -} - -// A simple service to test the various types of RPCs and experiment with -// performance with various types of payload. -service TestService { - // One empty request followed by one empty response. - rpc EmptyCall(Empty) returns (Empty); - - // One request followed by one response. - // The server returns the client payload as-is. - rpc UnaryCall(SimpleRequest) returns (SimpleResponse); - - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - rpc StreamingOutputCall(StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); - - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - rpc StreamingInputCall(stream StreamingInputCallRequest) - returns (StreamingInputCallResponse); - - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - rpc FullDuplexCall(stream StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); - - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - rpc HalfDuplexCall(stream StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); -} diff --git a/vendor/google.golang.org/grpc/test/leakcheck/leakcheck.go b/vendor/google.golang.org/grpc/test/leakcheck/leakcheck.go deleted file mode 100644 index 76f9fc541..000000000 --- a/vendor/google.golang.org/grpc/test/leakcheck/leakcheck.go +++ /dev/null @@ -1,118 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package leakcheck contains functions to check leaked goroutines. -// -// Call "defer leakcheck.Check(t)" at the beginning of tests. -package leakcheck - -import ( - "runtime" - "sort" - "strings" - "time" -) - -var goroutinesToIgnore = []string{ - "testing.Main(", - "testing.tRunner(", - "testing.(*M).", - "runtime.goexit", - "created by runtime.gc", - "created by runtime/trace.Start", - "interestingGoroutines", - "runtime.MHeap_Scavenger", - "signal.signal_recv", - "sigterm.handler", - "runtime_mcall", - "(*loggingT).flushDaemon", - "goroutine in C code", -} - -// RegisterIgnoreGoroutine appends s into the ignore goroutine list. The -// goroutines whose stack trace contains s will not be identified as leaked -// goroutines. Not thread-safe, only call this function in init(). -func RegisterIgnoreGoroutine(s string) { - goroutinesToIgnore = append(goroutinesToIgnore, s) -} - -func ignore(g string) bool { - sl := strings.SplitN(g, "\n", 2) - if len(sl) != 2 { - return true - } - stack := strings.TrimSpace(sl[1]) - if strings.HasPrefix(stack, "testing.RunTests") { - return true - } - - if stack == "" { - return true - } - - for _, s := range goroutinesToIgnore { - if strings.Contains(stack, s) { - return true - } - } - - return false -} - -// interestingGoroutines returns all goroutines we care about for the purpose of -// leak checking. It excludes testing or runtime ones. -func interestingGoroutines() (gs []string) { - buf := make([]byte, 2<<20) - buf = buf[:runtime.Stack(buf, true)] - for _, g := range strings.Split(string(buf), "\n\n") { - if !ignore(g) { - gs = append(gs, g) - } - } - sort.Strings(gs) - return -} - -// Errorfer is the interface that wraps the Errorf method. It's a subset of -// testing.TB to make it easy to use Check. -type Errorfer interface { - Errorf(format string, args ...interface{}) -} - -func check(efer Errorfer, timeout time.Duration) { - // Loop, waiting for goroutines to shut down. - // Wait up to timeout, but finish as quickly as possible. - deadline := time.Now().Add(timeout) - var leaked []string - for time.Now().Before(deadline) { - if leaked = interestingGoroutines(); len(leaked) == 0 { - return - } - time.Sleep(50 * time.Millisecond) - } - for _, g := range leaked { - efer.Errorf("Leaked goroutine: %v", g) - } -} - -// Check looks at the currently-running goroutines and checks if there are any -// interestring (created by gRPC) goroutines leaked. It waits up to 10 seconds -// in the error cases. -func Check(efer Errorfer) { - check(efer, 10*time.Second) -} diff --git a/vendor/google.golang.org/grpc/test/race.go b/vendor/google.golang.org/grpc/test/race.go deleted file mode 100644 index acfa0dfae..000000000 --- a/vendor/google.golang.org/grpc/test/race.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build race - -/* - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package test - -func init() { - raceMode = true -} diff --git a/vendor/google.golang.org/grpc/test/servertester.go b/vendor/google.golang.org/grpc/test/servertester.go deleted file mode 100644 index daeca0622..000000000 --- a/vendor/google.golang.org/grpc/test/servertester.go +++ /dev/null @@ -1,280 +0,0 @@ -/* - * Copyright 2016 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package test - -import ( - "bytes" - "errors" - "io" - "strings" - "testing" - "time" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" -) - -// This is a subset of http2's serverTester type. -// -// serverTester wraps a io.ReadWriter (acting like the underlying -// network connection) and provides utility methods to read and write -// http2 frames. -// -// NOTE(bradfitz): this could eventually be exported somewhere. Others -// have asked for it too. For now I'm still experimenting with the -// API and don't feel like maintaining a stable testing API. - -type serverTester struct { - cc io.ReadWriteCloser // client conn - t testing.TB - fr *http2.Framer - - // writing headers: - headerBuf bytes.Buffer - hpackEnc *hpack.Encoder - - // reading frames: - frc chan http2.Frame - frErrc chan error - readTimer *time.Timer -} - -func newServerTesterFromConn(t testing.TB, cc io.ReadWriteCloser) *serverTester { - st := &serverTester{ - t: t, - cc: cc, - frc: make(chan http2.Frame, 1), - frErrc: make(chan error, 1), - } - st.hpackEnc = hpack.NewEncoder(&st.headerBuf) - st.fr = http2.NewFramer(cc, cc) - st.fr.ReadMetaHeaders = hpack.NewDecoder(4096 /*initialHeaderTableSize*/, nil) - - return st -} - -func (st *serverTester) readFrame() (http2.Frame, error) { - go func() { - fr, err := st.fr.ReadFrame() - if err != nil { - st.frErrc <- err - } else { - st.frc <- fr - } - }() - t := time.NewTimer(2 * time.Second) - defer t.Stop() - select { - case f := <-st.frc: - return f, nil - case err := <-st.frErrc: - return nil, err - case <-t.C: - return nil, errors.New("timeout waiting for frame") - } -} - -// greet initiates the client's HTTP/2 connection into a state where -// frames may be sent. -func (st *serverTester) greet() { - st.writePreface() - st.writeInitialSettings() - st.wantSettings() - st.writeSettingsAck() - for { - f, err := st.readFrame() - if err != nil { - st.t.Fatal(err) - } - switch f := f.(type) { - case *http2.WindowUpdateFrame: - // grpc's transport/http2_server sends this - // before the settings ack. The Go http2 - // server uses a setting instead. - case *http2.SettingsFrame: - if f.IsAck() { - return - } - st.t.Fatalf("during greet, got non-ACK settings frame") - default: - st.t.Fatalf("during greet, unexpected frame type %T", f) - } - } -} - -func (st *serverTester) writePreface() { - n, err := st.cc.Write([]byte(http2.ClientPreface)) - if err != nil { - st.t.Fatalf("Error writing client preface: %v", err) - } - if n != len(http2.ClientPreface) { - st.t.Fatalf("Writing client preface, wrote %d bytes; want %d", n, len(http2.ClientPreface)) - } -} - -func (st *serverTester) writeInitialSettings() { - if err := st.fr.WriteSettings(); err != nil { - st.t.Fatalf("Error writing initial SETTINGS frame from client to server: %v", err) - } -} - -func (st *serverTester) writeSettingsAck() { - if err := st.fr.WriteSettingsAck(); err != nil { - st.t.Fatalf("Error writing ACK of server's SETTINGS: %v", err) - } -} - -func (st *serverTester) wantSettings() *http2.SettingsFrame { - f, err := st.readFrame() - if err != nil { - st.t.Fatalf("Error while expecting a SETTINGS frame: %v", err) - } - sf, ok := f.(*http2.SettingsFrame) - if !ok { - st.t.Fatalf("got a %T; want *SettingsFrame", f) - } - return sf -} - -func (st *serverTester) wantSettingsAck() { - f, err := st.readFrame() - if err != nil { - st.t.Fatal(err) - } - sf, ok := f.(*http2.SettingsFrame) - if !ok { - st.t.Fatalf("Wanting a settings ACK, received a %T", f) - } - if !sf.IsAck() { - st.t.Fatal("Settings Frame didn't have ACK set") - } -} - -// wait for any activity from the server -func (st *serverTester) wantAnyFrame() http2.Frame { - f, err := st.fr.ReadFrame() - if err != nil { - st.t.Fatal(err) - } - return f -} - -func (st *serverTester) encodeHeaderField(k, v string) { - err := st.hpackEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) - if err != nil { - st.t.Fatalf("HPACK encoding error for %q/%q: %v", k, v, err) - } -} - -// encodeHeader encodes headers and returns their HPACK bytes. headers -// must contain an even number of key/value pairs. There may be -// multiple pairs for keys (e.g. "cookie"). The :method, :path, and -// :scheme headers default to GET, / and https. -func (st *serverTester) encodeHeader(headers ...string) []byte { - if len(headers)%2 == 1 { - panic("odd number of kv args") - } - - st.headerBuf.Reset() - - if len(headers) == 0 { - // Fast path, mostly for benchmarks, so test code doesn't pollute - // profiles when we're looking to improve server allocations. - st.encodeHeaderField(":method", "GET") - st.encodeHeaderField(":path", "/") - st.encodeHeaderField(":scheme", "https") - return st.headerBuf.Bytes() - } - - if len(headers) == 2 && headers[0] == ":method" { - // Another fast path for benchmarks. - st.encodeHeaderField(":method", headers[1]) - st.encodeHeaderField(":path", "/") - st.encodeHeaderField(":scheme", "https") - return st.headerBuf.Bytes() - } - - pseudoCount := map[string]int{} - keys := []string{":method", ":path", ":scheme"} - vals := map[string][]string{ - ":method": {"GET"}, - ":path": {"/"}, - ":scheme": {"https"}, - } - for len(headers) > 0 { - k, v := headers[0], headers[1] - headers = headers[2:] - if _, ok := vals[k]; !ok { - keys = append(keys, k) - } - if strings.HasPrefix(k, ":") { - pseudoCount[k]++ - if pseudoCount[k] == 1 { - vals[k] = []string{v} - } else { - // Allows testing of invalid headers w/ dup pseudo fields. - vals[k] = append(vals[k], v) - } - } else { - vals[k] = append(vals[k], v) - } - } - for _, k := range keys { - for _, v := range vals[k] { - st.encodeHeaderField(k, v) - } - } - return st.headerBuf.Bytes() -} - -func (st *serverTester) writeHeadersGRPC(streamID uint32, path string) { - st.writeHeaders(http2.HeadersFrameParam{ - StreamID: streamID, - BlockFragment: st.encodeHeader( - ":method", "POST", - ":path", path, - "content-type", "application/grpc", - "te", "trailers", - ), - EndStream: false, - EndHeaders: true, - }) -} - -func (st *serverTester) writeHeaders(p http2.HeadersFrameParam) { - if err := st.fr.WriteHeaders(p); err != nil { - st.t.Fatalf("Error writing HEADERS: %v", err) - } -} - -func (st *serverTester) writeData(streamID uint32, endStream bool, data []byte) { - if err := st.fr.WriteData(streamID, endStream, data); err != nil { - st.t.Fatalf("Error writing DATA: %v", err) - } -} - -func (st *serverTester) writeRSTStream(streamID uint32, code http2.ErrCode) { - if err := st.fr.WriteRSTStream(streamID, code); err != nil { - st.t.Fatalf("Error writing RST_STREAM: %v", err) - } -} - -func (st *serverTester) writeDataPadded(streamID uint32, endStream bool, data, padding []byte) { - if err := st.fr.WriteDataPadded(streamID, endStream, data, padding); err != nil { - st.t.Fatalf("Error writing DATA with padding: %v", err) - } -} diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh deleted file mode 100755 index d006a4263..000000000 --- a/vendor/google.golang.org/grpc/vet.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash - -set -ex # Exit on error; debugging enabled. -set -o pipefail # Fail a pipe if any sub-command fails. - -die() { - echo "$@" >&2 - exit 1 -} - -# TODO: Remove this check and the mangling below once "context" is imported -# directly. -if git status --porcelain | read; then - die "Uncommitted or untracked files found; commit changes first" -fi - -PATH="$GOPATH/bin:$GOROOT/bin:$PATH" - -# Check proto in manual runs or cron runs. -if [[ "$TRAVIS" != "true" || "$TRAVIS_EVENT_TYPE" = "cron" ]]; then - check_proto="true" -fi - -if [ "$1" = "-install" ]; then - go get -d \ - google.golang.org/grpc/... - go get -u \ - github.com/golang/lint/golint \ - golang.org/x/tools/cmd/goimports \ - honnef.co/go/tools/cmd/staticcheck \ - github.com/golang/protobuf/protoc-gen-go \ - golang.org/x/tools/cmd/stringer - if [[ "$check_proto" = "true" ]]; then - if [[ "$TRAVIS" = "true" ]]; then - PROTOBUF_VERSION=3.3.0 - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/travis - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif ! which protoc > /dev/null; then - die "Please install protoc into your path" - fi - fi - exit 0 -elif [[ "$#" -ne 0 ]]; then - die "Unknown argument(s): $*" -fi - -git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read) -gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read) -goimports -l . 2>&1 | tee /dev/stderr | (! read) -golint ./... 2>&1 | (grep -vE "(_mock|_string|grpc_lb_v1/doc|\.pb)\.go:" || true) | tee /dev/stderr | (! read) - -# Undo any edits made by this script. -cleanup() { - git reset --hard HEAD -} -trap cleanup EXIT - -# Rewrite golang.org/x/net/context -> context imports (see grpc/grpc-go#1484). -# TODO: Remove this mangling once "context" is imported directly (grpc/grpc-go#711). -git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":' -set +o pipefail -# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed. -go tool vet -all . 2>&1 | grep -vF '.pb.go:' | tee /dev/stderr | (! read) -set -o pipefail -git reset --hard HEAD - -if [[ "$check_proto" = "true" ]]; then - PATH="/home/travis/bin:$PATH" make proto && \ - git status --porcelain 2>&1 | (! read) || \ - (git status; git --no-pager diff; exit 1) -fi - -# TODO(menghanl): fix errors in transport_test. -staticcheck -ignore google.golang.org/grpc/transport/transport_test.go:SA2002 ./... diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/register.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/register.go deleted file mode 100644 index 6eb299eca..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/register.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cr - -const ( - GroupName = "cr.example.apiextensions.k8s.io" -) diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/doc.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/doc.go deleted file mode 100644 index 73d79a45d..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package - -// Package v1 is the v1 version of the API. -// +groupName=cr.example.apiextensions.k8s.io -package v1 diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/register.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/register.go deleted file mode 100644 index 65b65fb32..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - cr "k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr" -) - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: cr.GroupName, Version: "v1"} - -// Kind takes an unqualified kind and returns back a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Example{}, - &ExampleList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/types.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/types.go deleted file mode 100644 index 274b30f61..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/types.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Example is a specification for an Example resource -type Example struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` - - Spec ExampleSpec `json:"spec"` - Status ExampleStatus `json:"status,omitempty"` -} - -// ExampleSpec is the spec for an Example resource -type ExampleSpec struct { - Foo string `json:"foo"` - Bar bool `json:"bar"` -} - -// ExampleStatus is the status for an Example resource -type ExampleStatus struct { - State ExampleState `json:"state,omitempty"` - Message string `json:"message,omitempty"` -} - -type ExampleState string - -const ( - ExampleStateCreated ExampleState = "Created" - ExampleStateProcessed ExampleState = "Processed" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ExampleList is a list of Example resources -type ExampleList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata"` - - Items []Example `json:"items"` -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/zz_generated.deepcopy.go deleted file mode 100644 index 50828809d..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Example) DeepCopyInto(out *Example) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Example. -func (in *Example) DeepCopy() *Example { - if in == nil { - return nil - } - out := new(Example) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Example) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExampleList) DeepCopyInto(out *ExampleList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Example, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExampleList. -func (in *ExampleList) DeepCopy() *ExampleList { - if in == nil { - return nil - } - out := new(ExampleList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ExampleList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExampleSpec) DeepCopyInto(out *ExampleSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExampleSpec. -func (in *ExampleSpec) DeepCopy() *ExampleSpec { - if in == nil { - return nil - } - out := new(ExampleSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExampleStatus) DeepCopyInto(out *ExampleStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExampleStatus. -func (in *ExampleStatus) DeepCopy() *ExampleStatus { - if in == nil { - return nil - } - out := new(ExampleStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/clientset.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/clientset.go deleted file mode 100644 index 92ff15819..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/clientset.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package versioned - -import ( - crv1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1" - discovery "k8s.io/client-go/discovery" - rest "k8s.io/client-go/rest" - flowcontrol "k8s.io/client-go/util/flowcontrol" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - CrV1() crv1.CrV1Interface - // Deprecated: please explicitly pick a version if possible. - Cr() crv1.CrV1Interface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - crV1 *crv1.CrV1Client -} - -// CrV1 retrieves the CrV1Client -func (c *Clientset) CrV1() crv1.CrV1Interface { - return c.crV1 -} - -// Deprecated: Cr retrieves the default version of CrClient. -// Please explicitly pick a version. -func (c *Clientset) Cr() crv1.CrV1Interface { - return c.crV1 -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - if c == nil { - return nil - } - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -func NewForConfig(c *rest.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var cs Clientset - var err error - cs.crV1, err = crv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - return &cs, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.crV1 = crv1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs -} - -// New creates a new Clientset for the given RESTClient. -func New(c rest.Interface) *Clientset { - var cs Clientset - cs.crV1 = crv1.New(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &cs -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/doc.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/doc.go deleted file mode 100644 index 41721ca52..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package versioned diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/clientset_generated.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/clientset_generated.go deleted file mode 100644 index 28089890d..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/clientset_generated.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - clientset "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned" - crv1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1" - fakecrv1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/discovery" - fakediscovery "k8s.io/client-go/discovery/fake" - "k8s.io/client-go/testing" -) - -// NewSimpleClientset returns a clientset that will respond with the provided objects. -// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement -// for a real clientset and is mostly useful in simple unit tests. -func NewSimpleClientset(objects ...runtime.Object) *Clientset { - o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) - for _, obj := range objects { - if err := o.Add(obj); err != nil { - panic(err) - } - } - - cs := &Clientset{} - cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} - cs.AddReactor("*", "*", testing.ObjectReaction(o)) - cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { - gvr := action.GetResource() - ns := action.GetNamespace() - watch, err := o.Watch(gvr, ns) - if err != nil { - return false, nil, err - } - return true, watch, nil - }) - - return cs -} - -// Clientset implements clientset.Interface. Meant to be embedded into a -// struct to get a default implementation. This makes faking out just the method -// you want to test easier. -type Clientset struct { - testing.Fake - discovery *fakediscovery.FakeDiscovery -} - -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - return c.discovery -} - -var _ clientset.Interface = &Clientset{} - -// CrV1 retrieves the CrV1Client -func (c *Clientset) CrV1() crv1.CrV1Interface { - return &fakecrv1.FakeCrV1{Fake: &c.Fake} -} - -// Cr retrieves the CrV1Client -func (c *Clientset) Cr() crv1.CrV1Interface { - return &fakecrv1.FakeCrV1{Fake: &c.Fake} -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/doc.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/doc.go deleted file mode 100644 index 9b99e7167..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated fake clientset. -package fake diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/register.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/register.go deleted file mode 100644 index 1260d0d3a..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/fake/register.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - crv1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" -) - -var scheme = runtime.NewScheme() -var codecs = serializer.NewCodecFactory(scheme) -var parameterCodec = runtime.NewParameterCodec(scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - crv1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(scheme)) -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme/doc.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme/doc.go deleted file mode 100644 index 7dc375616..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package contains the scheme of the automatically generated clientset. -package scheme diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme/register.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme/register.go deleted file mode 100644 index 5743937a9..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme/register.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package scheme - -import ( - crv1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" -) - -var Scheme = runtime.NewScheme() -var Codecs = serializer.NewCodecFactory(Scheme) -var ParameterCodec = runtime.NewParameterCodec(Scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - crv1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(Scheme)) -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/cr_client.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/cr_client.go deleted file mode 100644 index 04e3930e1..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/cr_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1" - "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - rest "k8s.io/client-go/rest" -) - -type CrV1Interface interface { - RESTClient() rest.Interface - ExamplesGetter -} - -// CrV1Client is used to interact with features provided by the cr.example.apiextensions.k8s.io group. -type CrV1Client struct { - restClient rest.Interface -} - -func (c *CrV1Client) Examples(namespace string) ExampleInterface { - return newExamples(c, namespace) -} - -// NewForConfig creates a new CrV1Client for the given config. -func NewForConfig(c *rest.Config) (*CrV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CrV1Client{client}, nil -} - -// NewForConfigOrDie creates a new CrV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *CrV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CrV1Client for the given RESTClient. -func New(c rest.Interface) *CrV1Client { - return &CrV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CrV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/doc.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/doc.go deleted file mode 100644 index 3af5d054f..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/example.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/example.go deleted file mode 100644 index b9408f1c5..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/example.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1" - scheme "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/scheme" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// ExamplesGetter has a method to return a ExampleInterface. -// A group's client should implement this interface. -type ExamplesGetter interface { - Examples(namespace string) ExampleInterface -} - -// ExampleInterface has methods to work with Example resources. -type ExampleInterface interface { - Create(*v1.Example) (*v1.Example, error) - Update(*v1.Example) (*v1.Example, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Example, error) - List(opts metav1.ListOptions) (*v1.ExampleList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Example, err error) - ExampleExpansion -} - -// examples implements ExampleInterface -type examples struct { - client rest.Interface - ns string -} - -// newExamples returns a Examples -func newExamples(c *CrV1Client, namespace string) *examples { - return &examples{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the example, and returns the corresponding example object, and an error if there is any. -func (c *examples) Get(name string, options metav1.GetOptions) (result *v1.Example, err error) { - result = &v1.Example{} - err = c.client.Get(). - Namespace(c.ns). - Resource("examples"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Examples that match those selectors. -func (c *examples) List(opts metav1.ListOptions) (result *v1.ExampleList, err error) { - result = &v1.ExampleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("examples"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested examples. -func (c *examples) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("examples"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a example and creates it. Returns the server's representation of the example, and an error, if there is any. -func (c *examples) Create(example *v1.Example) (result *v1.Example, err error) { - result = &v1.Example{} - err = c.client.Post(). - Namespace(c.ns). - Resource("examples"). - Body(example). - Do(). - Into(result) - return -} - -// Update takes the representation of a example and updates it. Returns the server's representation of the example, and an error, if there is any. -func (c *examples) Update(example *v1.Example) (result *v1.Example, err error) { - result = &v1.Example{} - err = c.client.Put(). - Namespace(c.ns). - Resource("examples"). - Name(example.Name). - Body(example). - Do(). - Into(result) - return -} - -// Delete takes name of the example and deletes it. Returns an error if one occurs. -func (c *examples) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("examples"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *examples) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("examples"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched example. -func (c *examples) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Example, err error) { - result = &v1.Example{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("examples"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/doc.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/doc.go deleted file mode 100644 index 16f443990..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// Package fake has the automatically generated clients. -package fake diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/fake_cr_client.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/fake_cr_client.go deleted file mode 100644 index 5ce902313..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/fake_cr_client.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - v1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1" - rest "k8s.io/client-go/rest" - testing "k8s.io/client-go/testing" -) - -type FakeCrV1 struct { - *testing.Fake -} - -func (c *FakeCrV1) Examples(namespace string) v1.ExampleInterface { - return &FakeExamples{c, namespace} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *FakeCrV1) RESTClient() rest.Interface { - var ret *rest.RESTClient - return ret -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/fake_example.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/fake_example.go deleted file mode 100644 index f7f5691ee..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/fake/fake_example.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - crv1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeExamples implements ExampleInterface -type FakeExamples struct { - Fake *FakeCrV1 - ns string -} - -var examplesResource = schema.GroupVersionResource{Group: "cr.example.apiextensions.k8s.io", Version: "v1", Resource: "examples"} - -var examplesKind = schema.GroupVersionKind{Group: "cr.example.apiextensions.k8s.io", Version: "v1", Kind: "Example"} - -// Get takes name of the example, and returns the corresponding example object, and an error if there is any. -func (c *FakeExamples) Get(name string, options v1.GetOptions) (result *crv1.Example, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(examplesResource, c.ns, name), &crv1.Example{}) - - if obj == nil { - return nil, err - } - return obj.(*crv1.Example), err -} - -// List takes label and field selectors, and returns the list of Examples that match those selectors. -func (c *FakeExamples) List(opts v1.ListOptions) (result *crv1.ExampleList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(examplesResource, examplesKind, c.ns, opts), &crv1.ExampleList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &crv1.ExampleList{ListMeta: obj.(*crv1.ExampleList).ListMeta} - for _, item := range obj.(*crv1.ExampleList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested examples. -func (c *FakeExamples) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(examplesResource, c.ns, opts)) - -} - -// Create takes the representation of a example and creates it. Returns the server's representation of the example, and an error, if there is any. -func (c *FakeExamples) Create(example *crv1.Example) (result *crv1.Example, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(examplesResource, c.ns, example), &crv1.Example{}) - - if obj == nil { - return nil, err - } - return obj.(*crv1.Example), err -} - -// Update takes the representation of a example and updates it. Returns the server's representation of the example, and an error, if there is any. -func (c *FakeExamples) Update(example *crv1.Example) (result *crv1.Example, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(examplesResource, c.ns, example), &crv1.Example{}) - - if obj == nil { - return nil, err - } - return obj.(*crv1.Example), err -} - -// Delete takes name of the example and deletes it. Returns an error if one occurs. -func (c *FakeExamples) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteAction(examplesResource, c.ns, name), &crv1.Example{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeExamples) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(examplesResource, c.ns, listOptions) - - _, err := c.Fake.Invokes(action, &crv1.ExampleList{}) - return err -} - -// Patch applies the patch and returns the patched example. -func (c *FakeExamples) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *crv1.Example, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(examplesResource, c.ns, name, data, subresources...), &crv1.Example{}) - - if obj == nil { - return nil, err - } - return obj.(*crv1.Example), err -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/generated_expansion.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/generated_expansion.go deleted file mode 100644 index 755021ec4..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned/typed/cr/v1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -type ExampleExpansion interface{} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/interface.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/interface.go deleted file mode 100644 index 0c633e3ef..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/interface.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package cr - -import ( - v1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/v1" - internalinterfaces "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/internalinterfaces" -) - -// Interface provides access to each of this group's versions. -type Interface interface { - // V1 provides access to shared informers for resources in V1. - V1() v1.Interface -} - -type group struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// V1 returns a new v1.Interface. -func (g *group) V1() v1.Interface { - return v1.New(g.factory, g.namespace, g.tweakListOptions) -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/v1/example.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/v1/example.go deleted file mode 100644 index 2496cd2ce..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/v1/example.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - time "time" - - crv1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1" - versioned "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned" - internalinterfaces "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/internalinterfaces" - v1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/listers/cr/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// ExampleInformer provides access to a shared informer and lister for -// Examples. -type ExampleInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1.ExampleLister -} - -type exampleInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewExampleInformer constructs a new informer for Example type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewExampleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredExampleInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredExampleInformer constructs a new informer for Example type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredExampleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CrV1().Examples(namespace).List(options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.CrV1().Examples(namespace).Watch(options) - }, - }, - &crv1.Example{}, - resyncPeriod, - indexers, - ) -} - -func (f *exampleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredExampleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *exampleInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&crv1.Example{}, f.defaultInformer) -} - -func (f *exampleInformer) Lister() v1.ExampleLister { - return v1.NewExampleLister(f.Informer().GetIndexer()) -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/v1/interface.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/v1/interface.go deleted file mode 100644 index 96ee1514c..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr/v1/interface.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1 - -import ( - internalinterfaces "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/internalinterfaces" -) - -// Interface provides access to all the informers in this group version. -type Interface interface { - // Examples returns a ExampleInformer. - Examples() ExampleInformer -} - -type version struct { - factory internalinterfaces.SharedInformerFactory - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// New returns a new Interface. -func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { - return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} -} - -// Examples returns a ExampleInformer. -func (v *version) Examples() ExampleInformer { - return &exampleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/factory.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/factory.go deleted file mode 100644 index 67c55af39..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/factory.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package externalversions - -import ( - reflect "reflect" - sync "sync" - time "time" - - versioned "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned" - cr "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/cr" - internalinterfaces "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/internalinterfaces" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" -) - -// SharedInformerOption defines the functional option type for SharedInformerFactory. -type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory - -type sharedInformerFactory struct { - client versioned.Interface - namespace string - tweakListOptions internalinterfaces.TweakListOptionsFunc - lock sync.Mutex - defaultResync time.Duration - customResync map[reflect.Type]time.Duration - - informers map[reflect.Type]cache.SharedIndexInformer - // startedInformers is used for tracking which informers have been started. - // This allows Start() to be called multiple times safely. - startedInformers map[reflect.Type]bool -} - -// WithCustomResyncConfig sets a custom resync period for the specified informer types. -func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - for k, v := range resyncConfig { - factory.customResync[reflect.TypeOf(k)] = v - } - return factory - } -} - -// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. -func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.tweakListOptions = tweakListOptions - return factory - } -} - -// WithNamespace limits the SharedInformerFactory to the specified namespace. -func WithNamespace(namespace string) SharedInformerOption { - return func(factory *sharedInformerFactory) *sharedInformerFactory { - factory.namespace = namespace - return factory - } -} - -// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. -func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync) -} - -// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. -// Listers obtained via this SharedInformerFactory will be subject to the same filters -// as specified here. -// Deprecated: Please use NewSharedInformerFactoryWithOptions instead -func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { - return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) -} - -// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. -func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { - factory := &sharedInformerFactory{ - client: client, - namespace: v1.NamespaceAll, - defaultResync: defaultResync, - informers: make(map[reflect.Type]cache.SharedIndexInformer), - startedInformers: make(map[reflect.Type]bool), - customResync: make(map[reflect.Type]time.Duration), - } - - // Apply all options - for _, opt := range options { - factory = opt(factory) - } - - return factory -} - -// Start initializes all requested informers. -func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { - f.lock.Lock() - defer f.lock.Unlock() - - for informerType, informer := range f.informers { - if !f.startedInformers[informerType] { - go informer.Run(stopCh) - f.startedInformers[informerType] = true - } - } -} - -// WaitForCacheSync waits for all started informers' cache were synced. -func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { - informers := func() map[reflect.Type]cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informers := map[reflect.Type]cache.SharedIndexInformer{} - for informerType, informer := range f.informers { - if f.startedInformers[informerType] { - informers[informerType] = informer - } - } - return informers - }() - - res := map[reflect.Type]bool{} - for informType, informer := range informers { - res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) - } - return res -} - -// InternalInformerFor returns the SharedIndexInformer for obj using an internal -// client. -func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { - f.lock.Lock() - defer f.lock.Unlock() - - informerType := reflect.TypeOf(obj) - informer, exists := f.informers[informerType] - if exists { - return informer - } - - resyncPeriod, exists := f.customResync[informerType] - if !exists { - resyncPeriod = f.defaultResync - } - - informer = newFunc(f.client, resyncPeriod) - f.informers[informerType] = informer - - return informer -} - -// SharedInformerFactory provides shared informers for resources in all known -// API group versions. -type SharedInformerFactory interface { - internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) - WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool - - Cr() cr.Interface -} - -func (f *sharedInformerFactory) Cr() cr.Interface { - return cr.New(f, f.namespace, f.tweakListOptions) -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/generic.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/generic.go deleted file mode 100644 index 2bf6c5a47..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/generic.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package externalversions - -import ( - "fmt" - - v1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1" - schema "k8s.io/apimachinery/pkg/runtime/schema" - cache "k8s.io/client-go/tools/cache" -) - -// GenericInformer is type of SharedIndexInformer which will locate and delegate to other -// sharedInformers based on type -type GenericInformer interface { - Informer() cache.SharedIndexInformer - Lister() cache.GenericLister -} - -type genericInformer struct { - informer cache.SharedIndexInformer - resource schema.GroupResource -} - -// Informer returns the SharedIndexInformer. -func (f *genericInformer) Informer() cache.SharedIndexInformer { - return f.informer -} - -// Lister returns the GenericLister. -func (f *genericInformer) Lister() cache.GenericLister { - return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) -} - -// ForResource gives generic access to a shared informer of the matching type -// TODO extend this to unknown resources with a client pool -func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { - switch resource { - // Group=cr.example.apiextensions.k8s.io, Version=v1 - case v1.SchemeGroupVersion.WithResource("examples"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Cr().V1().Examples().Informer()}, nil - - } - - return nil, fmt.Errorf("no informer found for %v", resource) -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go deleted file mode 100644 index 5b81f037b..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package internalinterfaces - -import ( - time "time" - - versioned "k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/clientset/versioned" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - cache "k8s.io/client-go/tools/cache" -) - -type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer - -// SharedInformerFactory a small interface to allow for adding an informer without an import cycle -type SharedInformerFactory interface { - Start(stopCh <-chan struct{}) - InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer -} - -type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/listers/cr/v1/example.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/listers/cr/v1/example.go deleted file mode 100644 index 8a64d09cc..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/listers/cr/v1/example.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/apiextensions-apiserver/examples/client-go/pkg/apis/cr/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ExampleLister helps list Examples. -type ExampleLister interface { - // List lists all Examples in the indexer. - List(selector labels.Selector) (ret []*v1.Example, err error) - // Examples returns an object that can list and get Examples. - Examples(namespace string) ExampleNamespaceLister - ExampleListerExpansion -} - -// exampleLister implements the ExampleLister interface. -type exampleLister struct { - indexer cache.Indexer -} - -// NewExampleLister returns a new ExampleLister. -func NewExampleLister(indexer cache.Indexer) ExampleLister { - return &exampleLister{indexer: indexer} -} - -// List lists all Examples in the indexer. -func (s *exampleLister) List(selector labels.Selector) (ret []*v1.Example, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Example)) - }) - return ret, err -} - -// Examples returns an object that can list and get Examples. -func (s *exampleLister) Examples(namespace string) ExampleNamespaceLister { - return exampleNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ExampleNamespaceLister helps list and get Examples. -type ExampleNamespaceLister interface { - // List lists all Examples in the indexer for a given namespace. - List(selector labels.Selector) (ret []*v1.Example, err error) - // Get retrieves the Example from the indexer for a given namespace and name. - Get(name string) (*v1.Example, error) - ExampleNamespaceListerExpansion -} - -// exampleNamespaceLister implements the ExampleNamespaceLister -// interface. -type exampleNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Examples in the indexer for a given namespace. -func (s exampleNamespaceLister) List(selector labels.Selector) (ret []*v1.Example, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Example)) - }) - return ret, err -} - -// Get retrieves the Example from the indexer for a given namespace and name. -func (s exampleNamespaceLister) Get(name string) (*v1.Example, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("example"), name) - } - return obj.(*v1.Example), nil -} diff --git a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/listers/cr/v1/expansion_generated.go b/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/listers/cr/v1/expansion_generated.go deleted file mode 100644 index 5291ebaec..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/examples/client-go/pkg/client/listers/cr/v1/expansion_generated.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -// ExampleListerExpansion allows custom methods to be added to -// ExampleLister. -type ExampleListerExpansion interface{} - -// ExampleNamespaceListerExpansion allows custom methods to be added to -// ExampleNamespaceLister. -type ExampleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example/doc.go b/vendor/k8s.io/apiserver/pkg/apis/example/doc.go deleted file mode 100644 index 2676eee81..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +groupName=example.k8s.io -// -// package example contains an example API used to demonstrate how to create api groups. Moreover, this is -// used within tests. -package example // import "k8s.io/apiserver/pkg/apis/example" diff --git a/vendor/k8s.io/apiserver/pkg/apis/example/fuzzer/fuzzer.go b/vendor/k8s.io/apiserver/pkg/apis/example/fuzzer/fuzzer.go deleted file mode 100644 index 58c085406..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example/fuzzer/fuzzer.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fuzzer - -import ( - "fmt" - - "github.com/google/gofuzz" - - apitesting "k8s.io/apimachinery/pkg/api/apitesting" - "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" - "k8s.io/apimachinery/pkg/runtime" - runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apiserver/pkg/apis/example" - examplev1 "k8s.io/apiserver/pkg/apis/example/v1" -) - -// overrideMetaFuncs override some generic fuzzer funcs from k8s.io/apiserver in order to have more realistic -// values in a Kubernetes context. -func overrideMetaFuncs(codecs runtimeserializer.CodecFactory) []interface{} { - return []interface{}{ - func(j *runtime.Object, c fuzz.Continue) { - // TODO: uncomment when round trip starts from a versioned object - if true { //c.RandBool() { - *j = &runtime.Unknown{ - // We do not set TypeMeta here because it is not carried through a round trip - Raw: []byte(`{"apiVersion":"unknown.group/unknown","kind":"Something","someKey":"someValue"}`), - ContentType: runtime.ContentTypeJSON, - } - } else { - types := []runtime.Object{&example.Pod{}} - t := types[c.Rand.Intn(len(types))] - c.Fuzz(t) - *j = t - } - }, - func(r *runtime.RawExtension, c fuzz.Continue) { - // Pick an arbitrary type and fuzz it - types := []runtime.Object{&example.Pod{}} - obj := types[c.Rand.Intn(len(types))] - c.Fuzz(obj) - - // Convert the object to raw bytes - bytes, err := runtime.Encode(apitesting.TestCodec(codecs, examplev1.SchemeGroupVersion), obj) - if err != nil { - panic(fmt.Sprintf("Failed to encode object: %v", err)) - } - - // Set the bytes field on the RawExtension - r.Raw = bytes - }, - } -} - -func exampleFuncs(codecs runtimeserializer.CodecFactory) []interface{} { - return []interface{}{ - func(s *example.PodSpec, c fuzz.Continue) { - c.FuzzNoCustom(s) - // has a default value - ttl := int64(30) - if c.RandBool() { - ttl = int64(c.Uint32()) - } - s.TerminationGracePeriodSeconds = &ttl - - if s.SchedulerName == "" { - s.SchedulerName = "default-scheduler" - } - }, - func(j *example.PodPhase, c fuzz.Continue) { - statuses := []example.PodPhase{"Pending", "Running", "Succeeded", "Failed", "Unknown"} - *j = statuses[c.Rand.Intn(len(statuses))] - }, - func(rp *example.RestartPolicy, c fuzz.Continue) { - policies := []example.RestartPolicy{"Always", "Never", "OnFailure"} - *rp = policies[c.Rand.Intn(len(policies))] - }, - } -} - -// Funcs returns the fuzzer functions for the example api group. -var Funcs = fuzzer.MergeFuzzerFuncs( - overrideMetaFuncs, - exampleFuncs, -) diff --git a/vendor/k8s.io/apiserver/pkg/apis/example/install/install.go b/vendor/k8s.io/apiserver/pkg/apis/example/install/install.go deleted file mode 100644 index 77cceff36..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example/install/install.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the example API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apiserver/pkg/apis/example" - examplev1 "k8s.io/apiserver/pkg/apis/example/v1" -) - -// Install registers the API group and adds types to a scheme -func Install(scheme *runtime.Scheme) { - utilruntime.Must(example.AddToScheme(scheme)) - utilruntime.Must(examplev1.AddToScheme(scheme)) - utilruntime.Must(scheme.SetVersionPriority(examplev1.SchemeGroupVersion)) -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example/register.go b/vendor/k8s.io/apiserver/pkg/apis/example/register.go deleted file mode 100644 index d25456b0c..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example/register.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package example - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// GroupName is the group name use in this package -const GroupName = "example.apiserver.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Pod{}, - &ReplicaSet{}, - ) - return nil -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example/types.go b/vendor/k8s.io/apiserver/pkg/apis/example/types.go deleted file mode 100644 index 243c1c033..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example/types.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package example - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type ( - ConditionStatus string - PodConditionType string - PodPhase string - RestartPolicy string -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Pod is a collection of containers, used as either input (create, update) or as output (list, get). -type Pod struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a pod. - // +optional - Spec PodSpec - - // Status represents the current information about a pod. This data may not be up - // to date. - // +optional - Status PodStatus -} - -// PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system. -type PodStatus struct { - // +optional - Phase PodPhase - // +optional - Conditions []PodCondition - // A human readable message indicating details about why the pod is in this state. - // +optional - Message string - // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk' - // +optional - Reason string - - // +optional - HostIP string - // +optional - PodIP string - - // Date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - // +optional - StartTime *metav1.Time -} - -type PodCondition struct { - Type PodConditionType - Status ConditionStatus - // +optional - LastProbeTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -// PodSpec is a description of a pod -type PodSpec struct { - // +optional - RestartPolicy RestartPolicy - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates delete immediately. - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // +optional - TerminationGracePeriodSeconds *int64 - // Optional duration in seconds relative to the StartTime that the pod may be active on a node - // before the system actively tries to terminate the pod; value must be positive integer - // +optional - ActiveDeadlineSeconds *int64 - // NodeSelector is a selector which must be true for the pod to fit on a node - // +optional - NodeSelector map[string]string - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod - // The pod will be allowed to use secrets referenced by the ServiceAccount - ServiceAccountName string - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - // +optional - NodeName string - // Specifies the hostname of the Pod. - // If not specified, the pod's hostname will be set to a system-defined value. - // +optional - Hostname string - // If specified, the fully qualified Pod hostname will be "...svc.". - // If not specified, the pod will not have a domainname at all. - // +optional - Subdomain string - // If specified, the pod will be dispatched by specified scheduler. - // If not specified, the pod will be dispatched by default scheduler. - // +optional - SchedulerName string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodList is a list of Pods. -type PodList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Pod -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicaSet ensures that a specified number of pod replicas are running at any given time. -type ReplicaSet struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired behavior of this ReplicaSet. - // +optional - Spec ReplicaSetSpec - - // Status is the current status of this ReplicaSet. This data may be - // out of date by some window of time. - // +optional - Status ReplicaSetStatus -} - -// ReplicaSetSpec is the specification of a ReplicaSet. -// As the internal representation of a ReplicaSet, it must have -// a Template set. -type ReplicaSetSpec struct { - // Replicas is the number of desired replicas. - Replicas int32 -} - -// ReplicaSetStatus represents the current status of a ReplicaSet. -type ReplicaSetStatus struct { - // Replicas is the number of actual replicas. - Replicas int32 -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example/v1/conversion.go b/vendor/k8s.io/apiserver/pkg/apis/example/v1/conversion.go deleted file mode 100644 index 3e8448f4c..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example/v1/conversion.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions here. Currently there are none. - return nil -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example/v1/defaults.go b/vendor/k8s.io/apiserver/pkg/apis/example/v1/defaults.go deleted file mode 100644 index 436ccde29..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example/v1/defaults.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - // return RegisterDefaults(scheme) - return nil -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example/v1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/example/v1/doc.go deleted file mode 100644 index 4b22d37fb..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example/v1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/example -// +k8s:openapi-gen=false -// +k8s:defaulter-gen=TypeMeta - -// +groupName=example.apiserver.k8s.io -package v1 // import "k8s.io/apiserver/pkg/apis/example/v1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/example/v1/generated.pb.go b/vendor/k8s.io/apiserver/pkg/apis/example/v1/generated.pb.go deleted file mode 100644 index b2f517e55..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example/v1/generated.pb.go +++ /dev/null @@ -1,2008 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example/v1/generated.proto - - It has these top-level messages: - Pod - PodCondition - PodList - PodSpec - PodStatus -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *Pod) Reset() { *m = Pod{} } -func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *PodCondition) Reset() { *m = PodCondition{} } -func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *PodList) Reset() { *m = PodList{} } -func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *PodSpec) Reset() { *m = PodSpec{} } -func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *PodStatus) Reset() { *m = PodStatus{} } -func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func init() { - proto.RegisterType((*Pod)(nil), "k8s.io.apiserver.pkg.apis.example.v1.Pod") - proto.RegisterType((*PodCondition)(nil), "k8s.io.apiserver.pkg.apis.example.v1.PodCondition") - proto.RegisterType((*PodList)(nil), "k8s.io.apiserver.pkg.apis.example.v1.PodList") - proto.RegisterType((*PodSpec)(nil), "k8s.io.apiserver.pkg.apis.example.v1.PodSpec") - proto.RegisterType((*PodStatus)(nil), "k8s.io.apiserver.pkg.apis.example.v1.PodStatus") -} -func (m *Pod) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Pod) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *PodCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n4, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n5, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *PodList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) - i += copy(dAtA[i:], m.RestartPolicy) - if m.TerminationGracePeriodSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) - } - if m.ActiveDeadlineSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) - } - if len(m.NodeSelector) > 0 { - keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) - for k := range m.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - for _, k := range keysForNodeSelector { - dAtA[i] = 0x3a - i++ - v := m.NodeSelector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) - i += copy(dAtA[i:], m.ServiceAccountName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) - i += copy(dAtA[i:], m.DeprecatedServiceAccount) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - dAtA[i] = 0x58 - i++ - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x60 - i++ - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x68 - i++ - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) - i += copy(dAtA[i:], m.Subdomain) - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) - i += copy(dAtA[i:], m.SchedulerName) - return i, nil -} - -func (m *PodStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) - i += copy(dAtA[i:], m.HostIP) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) - i += copy(dAtA[i:], m.PodIP) - if m.StartTime != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n7, err := m.StartTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Pod) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSpec) Size() (n int) { - var l int - _ = l - l = len(m.RestartPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.TerminationGracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) - } - if m.ActiveDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) - } - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DeprecatedServiceAccount) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - n += 2 - l = len(m.Hostname) - n += 2 + l + sovGenerated(uint64(l)) - l = len(m.Subdomain) - n += 2 + l + sovGenerated(uint64(l)) - l = len(m.SchedulerName) - n += 2 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HostIP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PodIP) - n += 1 + l + sovGenerated(uint64(l)) - if m.StartTime != nil { - l = m.StartTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Pod) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Pod{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *PodList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Pod", "Pod", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSpec) String() string { - if this == nil { - return "nil" - } - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) - } - mapStringForNodeSelector += "}" - s := strings.Join([]string{`&PodSpec{`, - `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, - `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, - `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, - `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, - `}`, - }, "") - return s -} -func (this *PodStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PodCondition", "PodCondition", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, - `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Pod) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Pod: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = PodConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Pod{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RestartPolicy = RestartPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TerminationGracePeriodSeconds = &v - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ActiveDeadlineSeconds = &v - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.NodeSelector == nil { - m.NodeSelector = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.NodeSelector[mapkey] = mapvalue - } else { - var mapvalue string - m.NodeSelector[mapkey] = mapvalue - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedServiceAccount", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeprecatedServiceAccount = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostNetwork = bool(v != 0) - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostPID = bool(v != 0) - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostIPC = bool(v != 0) - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hostname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subdomain = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchedulerName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Phase = PodPhase(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, PodCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostIP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodIP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StartTime == nil { - m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - } - if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1052 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xcf, 0x6e, 0xdb, 0xc6, - 0x13, 0x36, 0x2d, 0xcb, 0x92, 0xd6, 0x56, 0x62, 0x6f, 0x62, 0x80, 0x31, 0x10, 0xc9, 0xf1, 0xef, - 0x07, 0xc3, 0x29, 0x1a, 0xb2, 0x36, 0xd2, 0x22, 0x6d, 0x0f, 0x41, 0x68, 0x17, 0xb5, 0x0b, 0xff, - 0x21, 0x56, 0x06, 0x02, 0x14, 0x3d, 0x74, 0x45, 0x4e, 0x64, 0x56, 0x22, 0x97, 0x58, 0xae, 0xd4, - 0xea, 0xd6, 0x47, 0x68, 0x1f, 0xa0, 0x4f, 0xd1, 0x43, 0x81, 0x3e, 0x81, 0x8f, 0x39, 0xe6, 0x24, - 0xd4, 0xea, 0x5b, 0xf8, 0x54, 0xec, 0xf2, 0x8f, 0x48, 0x4b, 0x75, 0xe5, 0xdb, 0xee, 0xcc, 0xf7, - 0x7d, 0x33, 0x9c, 0x1d, 0xce, 0xa0, 0xd3, 0xee, 0xab, 0xc8, 0xf0, 0x98, 0xd9, 0xed, 0xb7, 0x81, - 0x07, 0x20, 0x20, 0x32, 0x07, 0x10, 0xb8, 0x8c, 0x9b, 0x89, 0x83, 0x86, 0x5e, 0x04, 0x7c, 0x00, - 0xdc, 0x0c, 0xbb, 0x1d, 0x75, 0x33, 0xe1, 0x27, 0xea, 0x87, 0x3d, 0x30, 0x07, 0x7b, 0x66, 0x07, - 0x02, 0xe0, 0x54, 0x80, 0x6b, 0x84, 0x9c, 0x09, 0x86, 0xff, 0x1f, 0xb3, 0x8c, 0x8c, 0x65, 0x84, - 0xdd, 0x8e, 0xba, 0x19, 0x09, 0xcb, 0x18, 0xec, 0x6d, 0xbe, 0xe8, 0x78, 0xe2, 0xb2, 0xdf, 0x36, - 0x1c, 0xe6, 0x9b, 0x1d, 0xd6, 0x61, 0xa6, 0x22, 0xb7, 0xfb, 0xef, 0xd4, 0x4d, 0x5d, 0xd4, 0x29, - 0x16, 0xdd, 0x7c, 0x39, 0x49, 0xc5, 0xa7, 0xce, 0xa5, 0x17, 0x00, 0x1f, 0x4e, 0xb2, 0xf1, 0x41, - 0xd0, 0x19, 0xa9, 0x6c, 0x9a, 0xff, 0xc6, 0xe2, 0xfd, 0x40, 0x78, 0x3e, 0x4c, 0x11, 0x3e, 0xfb, - 0x2f, 0x42, 0xe4, 0x5c, 0x82, 0x4f, 0x6f, 0xf3, 0xb6, 0x7f, 0x5d, 0x44, 0x25, 0x9b, 0xb9, 0xf8, - 0x7b, 0x54, 0x95, 0xb9, 0xb8, 0x54, 0x50, 0x5d, 0xdb, 0xd2, 0x76, 0x57, 0xf6, 0x3f, 0x31, 0x26, - 0xe5, 0xc8, 0x24, 0x27, 0x15, 0x91, 0x68, 0x63, 0xb0, 0x67, 0x9c, 0xb7, 0x7f, 0x00, 0x47, 0x9c, - 0x82, 0xa0, 0x16, 0xbe, 0x1a, 0x35, 0x17, 0xc6, 0xa3, 0x26, 0x9a, 0xd8, 0x48, 0xa6, 0x8a, 0xcf, - 0xd1, 0x52, 0x14, 0x82, 0xa3, 0x2f, 0x2a, 0xf5, 0x17, 0xc6, 0x3c, 0xc5, 0x36, 0x6c, 0xe6, 0xb6, - 0x42, 0x70, 0xac, 0xd5, 0x44, 0x7a, 0x49, 0xde, 0x88, 0x12, 0xc2, 0x6f, 0xd1, 0x72, 0x24, 0xa8, - 0xe8, 0x47, 0x7a, 0x49, 0x49, 0x9a, 0xf3, 0x4b, 0x2a, 0x9a, 0xf5, 0x20, 0x11, 0x5d, 0x8e, 0xef, - 0x24, 0x91, 0xdb, 0xfe, 0xbd, 0x84, 0x56, 0x6d, 0xe6, 0x1e, 0xb0, 0xc0, 0xf5, 0x84, 0xc7, 0x02, - 0xfc, 0x12, 0x2d, 0x89, 0x61, 0x08, 0xaa, 0x30, 0x35, 0x6b, 0x2b, 0xcd, 0xe5, 0x62, 0x18, 0xc2, - 0xcd, 0xa8, 0xb9, 0x96, 0xc7, 0x4a, 0x1b, 0x51, 0x68, 0xfc, 0x79, 0x96, 0xdf, 0xa2, 0xe2, 0x3d, - 0x2b, 0x86, 0xbb, 0x19, 0x35, 0x1f, 0x66, 0xb4, 0x62, 0x06, 0xb8, 0x83, 0xea, 0x3d, 0x1a, 0x09, - 0x9b, 0xb3, 0x36, 0x5c, 0x78, 0x3e, 0x24, 0x5f, 0xf8, 0xd1, 0x7c, 0x4f, 0x22, 0x19, 0xd6, 0x46, - 0x12, 0xad, 0x7e, 0x92, 0x17, 0x22, 0x45, 0x5d, 0x3c, 0x40, 0x58, 0x1a, 0x2e, 0x38, 0x0d, 0xa2, - 0x38, 0x7f, 0x19, 0x6d, 0xe9, 0xde, 0xd1, 0x36, 0x93, 0x68, 0xf8, 0x64, 0x4a, 0x8d, 0xcc, 0x88, - 0x80, 0x77, 0xd0, 0x32, 0x07, 0x1a, 0xb1, 0x40, 0x2f, 0xab, 0xda, 0x64, 0x4f, 0x41, 0x94, 0x95, - 0x24, 0x5e, 0xfc, 0x1c, 0x55, 0x7c, 0x88, 0x22, 0xda, 0x01, 0x7d, 0x59, 0x01, 0x1f, 0x26, 0xc0, - 0xca, 0x69, 0x6c, 0x26, 0xa9, 0x7f, 0xfb, 0x0f, 0x0d, 0x55, 0x6c, 0xe6, 0x9e, 0x78, 0x91, 0xc0, - 0xdf, 0x4d, 0x75, 0xb3, 0x31, 0xdf, 0xc7, 0x48, 0xb6, 0xea, 0xe5, 0xb5, 0x24, 0x4e, 0x35, 0xb5, - 0xe4, 0x3a, 0xf9, 0x0c, 0x95, 0x3d, 0x01, 0xbe, 0x7c, 0xd7, 0xd2, 0xee, 0xca, 0xfe, 0xf3, 0xb9, - 0xfb, 0xce, 0xaa, 0x27, 0xaa, 0xe5, 0x63, 0xc9, 0x27, 0xb1, 0xcc, 0xf6, 0x9f, 0x15, 0x95, 0xb9, - 0x6c, 0x6d, 0x7c, 0x82, 0xea, 0x1c, 0x22, 0x41, 0xb9, 0xb0, 0x59, 0xcf, 0x73, 0x86, 0xea, 0xe5, - 0x6b, 0xd6, 0x4e, 0xfa, 0x9a, 0x24, 0xef, 0xbc, 0xb9, 0x6d, 0x20, 0x45, 0x32, 0xee, 0xa0, 0xa7, - 0x02, 0xb8, 0xef, 0x05, 0x54, 0x56, 0xfe, 0x6b, 0x4e, 0x1d, 0xb0, 0x81, 0x7b, 0xcc, 0x6d, 0x81, - 0xc3, 0x02, 0x37, 0x52, 0x2f, 0x5d, 0xb2, 0x9e, 0x8d, 0x47, 0xcd, 0xa7, 0x17, 0x77, 0x01, 0xc9, - 0xdd, 0x3a, 0xf8, 0x1c, 0x6d, 0x50, 0x47, 0x78, 0x03, 0x38, 0x04, 0xea, 0xf6, 0xbc, 0x00, 0xd2, - 0x00, 0x65, 0x15, 0xe0, 0xc9, 0x78, 0xd4, 0xdc, 0x78, 0x33, 0x0b, 0x40, 0x66, 0xf3, 0xf0, 0x10, - 0xad, 0x06, 0xcc, 0x85, 0x16, 0xf4, 0xc0, 0x11, 0x8c, 0xeb, 0x15, 0x55, 0xea, 0xd7, 0xf7, 0x9a, - 0x1a, 0xc6, 0x59, 0x4e, 0xe1, 0xab, 0x40, 0xf0, 0xa1, 0xf5, 0x38, 0xa9, 0xe3, 0x6a, 0xde, 0x45, - 0x0a, 0xa1, 0xf0, 0x37, 0x08, 0x4b, 0x6d, 0xcf, 0x81, 0x37, 0x8e, 0xc3, 0xfa, 0x81, 0x38, 0xa3, - 0x3e, 0xe8, 0x55, 0xf5, 0x0e, 0x59, 0x9f, 0xb7, 0xa6, 0x10, 0x64, 0x06, 0x0b, 0x1f, 0xa1, 0x07, - 0x45, 0xab, 0x5e, 0x2b, 0xcc, 0x10, 0xfd, 0x10, 0x42, 0x0e, 0x8e, 0x1c, 0xc8, 0x45, 0x45, 0x72, - 0x8b, 0x87, 0x3f, 0x46, 0x55, 0x99, 0xa5, 0xca, 0x05, 0x29, 0x8d, 0xac, 0x45, 0xcf, 0x12, 0x3b, - 0xc9, 0x10, 0xf8, 0x53, 0xb4, 0x72, 0xc9, 0x22, 0x71, 0x06, 0xe2, 0x47, 0xc6, 0xbb, 0xfa, 0xca, - 0x96, 0xb6, 0x5b, 0xb5, 0x1e, 0x25, 0x84, 0x95, 0xa3, 0x89, 0x8b, 0xe4, 0x71, 0xf2, 0x77, 0x93, - 0x57, 0xfb, 0xf8, 0x50, 0x5f, 0x55, 0x94, 0xec, 0x77, 0x3b, 0x8a, 0xcd, 0x24, 0xf5, 0xa7, 0xd0, - 0x63, 0xfb, 0x40, 0xaf, 0x4f, 0x43, 0x8f, 0xed, 0x03, 0x92, 0xfa, 0x65, 0xea, 0xf2, 0x18, 0xc8, - 0xd4, 0xd7, 0x8a, 0xa9, 0x1f, 0x25, 0x76, 0x92, 0x21, 0xb0, 0x89, 0x6a, 0x51, 0xbf, 0xed, 0x32, - 0x9f, 0x7a, 0x81, 0xbe, 0xae, 0xe0, 0xeb, 0x09, 0xbc, 0xd6, 0x4a, 0x1d, 0x64, 0x82, 0xc1, 0x5f, - 0xa2, 0xba, 0x5c, 0x6e, 0x6e, 0xbf, 0x07, 0x5c, 0xc5, 0x78, 0xa4, 0x48, 0xd9, 0x00, 0x6c, 0xa5, - 0x4e, 0x55, 0xa3, 0x22, 0x76, 0xf3, 0x35, 0x5a, 0x9f, 0xea, 0x12, 0xbc, 0x86, 0x4a, 0x5d, 0x18, - 0xc6, 0xe3, 0x9e, 0xc8, 0x23, 0x7e, 0x8c, 0xca, 0x03, 0xda, 0xeb, 0x43, 0x3c, 0xca, 0x49, 0x7c, - 0xf9, 0x62, 0xf1, 0x95, 0xb6, 0xfd, 0x5b, 0x09, 0xd5, 0xb2, 0x95, 0x82, 0x4d, 0x54, 0x0e, 0x2f, - 0x69, 0x94, 0xae, 0x8a, 0x27, 0xe9, 0xff, 0x6e, 0x4b, 0xe3, 0xcd, 0xa8, 0x59, 0xb5, 0x99, 0xab, - 0xce, 0x24, 0xc6, 0xe1, 0x77, 0x08, 0x39, 0xe9, 0x12, 0x48, 0x07, 0xca, 0xfe, 0xdc, 0x5d, 0x9e, - 0xed, 0x8f, 0xc9, 0xee, 0xcd, 0x4c, 0x11, 0xc9, 0x29, 0xe7, 0x07, 0x69, 0xe9, 0xee, 0x41, 0x9a, - 0x9b, 0xcd, 0x4b, 0x77, 0xce, 0xe6, 0x1d, 0xb4, 0x1c, 0xbf, 0xf0, 0xed, 0x19, 0x1e, 0x37, 0x00, - 0x49, 0xbc, 0xf8, 0x7f, 0xa8, 0x1c, 0x32, 0xf7, 0xd8, 0x4e, 0x26, 0x78, 0x36, 0x03, 0x6d, 0x69, - 0x24, 0xb1, 0x0f, 0xbf, 0x45, 0x35, 0x35, 0xb8, 0xd4, 0xfe, 0xa9, 0xdc, 0x7b, 0xff, 0xd4, 0x55, - 0x77, 0xa4, 0x02, 0x64, 0xa2, 0x65, 0xed, 0x5e, 0x5d, 0x37, 0x16, 0xde, 0x5f, 0x37, 0x16, 0x3e, - 0x5c, 0x37, 0x16, 0x7e, 0x1e, 0x37, 0xb4, 0xab, 0x71, 0x43, 0x7b, 0x3f, 0x6e, 0x68, 0x1f, 0xc6, - 0x0d, 0xed, 0xaf, 0x71, 0x43, 0xfb, 0xe5, 0xef, 0xc6, 0xc2, 0xb7, 0x8b, 0x83, 0xbd, 0x7f, 0x02, - 0x00, 0x00, 0xff, 0xff, 0xdf, 0x7a, 0x1b, 0x54, 0x4e, 0x0a, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example/v1/register.go b/vendor/k8s.io/apiserver/pkg/apis/example/v1/register.go deleted file mode 100644 index cfb74eea4..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example/v1/register.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "example.apiserver.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes, addConversionFuncs, addDefaultingFuncs) -} - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Pod{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example/v1/types.go b/vendor/k8s.io/apiserver/pkg/apis/example/v1/types.go deleted file mode 100644 index 06c3f9f88..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example/v1/types.go +++ /dev/null @@ -1,196 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type ( - ConditionStatus string - PodConditionType string - PodPhase string - RestartPolicy string -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Pod is a collection of containers, used as either input (create, update) or as output (list, get). -type Pod struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Most recently observed status of the pod. - // This data may not be up to date. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system. -type PodStatus struct { - // Current condition of the pod. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-phase - // +optional - Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"` - // Current service state of pod. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions - // +optional - Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` - // A human readable message indicating details about why the pod is in this condition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` - // A brief CamelCase message indicating details about why the pod is in this state. - // e.g. 'OutOfDisk' - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - - // IP address of the host to which the pod is assigned. Empty if not yet scheduled. - // +optional - HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` - // IP address allocated to the pod. Routable at least within the cluster. - // Empty if not yet allocated. - // +optional - PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` - - // RFC 3339 date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - // +optional - StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"` -} - -type PodCondition struct { - // Type is the type of the condition. - // Currently only Ready. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions - Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"` - // Status is the status of the condition. - // Can be True, False, Unknown. - // More info: http://kubernetes.io/docs/user-guide/pod-states#pod-conditions - Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // Last time we probed the condition. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // Unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human-readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - -// PodSpec is a description of a pod -type PodSpec struct { - // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. - // Default to Always. - // More info: http://kubernetes.io/docs/user-guide/pod-states#restartpolicy - // +optional - RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"` - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates delete immediately. - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // Defaults to 30 seconds. - // +optional - TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"` - // Optional duration in seconds the pod may be active on the node relative to - // StartTime before the system will actively try to mark it failed and kill associated containers. - // Value must be a positive integer. - // +optional - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` - // NodeSelector is a selector which must be true for the pod to fit on a node. - // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: http://kubernetes.io/docs/user-guide/node-selection/README - // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: https://git.k8s.io/community/contributors/design-proposals/auth/service_accounts.md - // +optional - ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` - // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. - // Deprecated: Use serviceAccountName instead. - // +k8s:conversion-gen=false - // +optional - DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"` - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - // +optional - NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` - // Host networking requested for this pod. Use the host's network namespace. - // If this option is set, the ports that will be used must be specified. - // Default to false. - // +k8s:conversion-gen=false - // +optional - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"` - // Use the host's pid namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"` - // Use the host's ipc namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"` - // Specifies the hostname of the Pod - // If not specified, the pod's hostname will be set to a system-defined value. - // +optional - Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"` - // If specified, the fully qualified Pod hostname will be "...svc.". - // If not specified, the pod will not have a domainname at all. - // +optional - Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"` - // If specified, the pod will be dispatched by specified scheduler. - // If not specified, the pod will be dispatched by default scheduler. - // +optional - SchedulerName string `json:"schedulername,omitempty" protobuf:"bytes,19,opt,name=schedulername"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodList is a list of Pods. -type PodList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of pods. - // More info: http://kubernetes.io/docs/user-guide/pods - Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.conversion.go deleted file mode 100644 index bcc2d24dc..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.conversion.go +++ /dev/null @@ -1,266 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1 - -import ( - unsafe "unsafe" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - example "k8s.io/apiserver/pkg/apis/example" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*Pod)(nil), (*example.Pod)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Pod_To_example_Pod(a.(*Pod), b.(*example.Pod), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*example.Pod)(nil), (*Pod)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_example_Pod_To_v1_Pod(a.(*example.Pod), b.(*Pod), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*PodCondition)(nil), (*example.PodCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodCondition_To_example_PodCondition(a.(*PodCondition), b.(*example.PodCondition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*example.PodCondition)(nil), (*PodCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_example_PodCondition_To_v1_PodCondition(a.(*example.PodCondition), b.(*PodCondition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*PodList)(nil), (*example.PodList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodList_To_example_PodList(a.(*PodList), b.(*example.PodList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*example.PodList)(nil), (*PodList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_example_PodList_To_v1_PodList(a.(*example.PodList), b.(*PodList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*PodSpec)(nil), (*example.PodSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodSpec_To_example_PodSpec(a.(*PodSpec), b.(*example.PodSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*example.PodSpec)(nil), (*PodSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_example_PodSpec_To_v1_PodSpec(a.(*example.PodSpec), b.(*PodSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*PodStatus)(nil), (*example.PodStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodStatus_To_example_PodStatus(a.(*PodStatus), b.(*example.PodStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*example.PodStatus)(nil), (*PodStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_example_PodStatus_To_v1_PodStatus(a.(*example.PodStatus), b.(*PodStatus), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1_Pod_To_example_Pod(in *Pod, out *example.Pod, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodSpec_To_example_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PodStatus_To_example_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Pod_To_example_Pod is an autogenerated conversion function. -func Convert_v1_Pod_To_example_Pod(in *Pod, out *example.Pod, s conversion.Scope) error { - return autoConvert_v1_Pod_To_example_Pod(in, out, s) -} - -func autoConvert_example_Pod_To_v1_Pod(in *example.Pod, out *Pod, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_example_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_example_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_example_Pod_To_v1_Pod is an autogenerated conversion function. -func Convert_example_Pod_To_v1_Pod(in *example.Pod, out *Pod, s conversion.Scope) error { - return autoConvert_example_Pod_To_v1_Pod(in, out, s) -} - -func autoConvert_v1_PodCondition_To_example_PodCondition(in *PodCondition, out *example.PodCondition, s conversion.Scope) error { - out.Type = example.PodConditionType(in.Type) - out.Status = example.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_PodCondition_To_example_PodCondition is an autogenerated conversion function. -func Convert_v1_PodCondition_To_example_PodCondition(in *PodCondition, out *example.PodCondition, s conversion.Scope) error { - return autoConvert_v1_PodCondition_To_example_PodCondition(in, out, s) -} - -func autoConvert_example_PodCondition_To_v1_PodCondition(in *example.PodCondition, out *PodCondition, s conversion.Scope) error { - out.Type = PodConditionType(in.Type) - out.Status = ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_example_PodCondition_To_v1_PodCondition is an autogenerated conversion function. -func Convert_example_PodCondition_To_v1_PodCondition(in *example.PodCondition, out *PodCondition, s conversion.Scope) error { - return autoConvert_example_PodCondition_To_v1_PodCondition(in, out, s) -} - -func autoConvert_v1_PodList_To_example_PodList(in *PodList, out *example.PodList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]example.Pod, len(*in)) - for i := range *in { - if err := Convert_v1_Pod_To_example_Pod(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_PodList_To_example_PodList is an autogenerated conversion function. -func Convert_v1_PodList_To_example_PodList(in *PodList, out *example.PodList, s conversion.Scope) error { - return autoConvert_v1_PodList_To_example_PodList(in, out, s) -} - -func autoConvert_example_PodList_To_v1_PodList(in *example.PodList, out *PodList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Pod, len(*in)) - for i := range *in { - if err := Convert_example_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_example_PodList_To_v1_PodList is an autogenerated conversion function. -func Convert_example_PodList_To_v1_PodList(in *example.PodList, out *PodList, s conversion.Scope) error { - return autoConvert_example_PodList_To_v1_PodList(in, out, s) -} - -func autoConvert_v1_PodSpec_To_example_PodSpec(in *PodSpec, out *example.PodSpec, s conversion.Scope) error { - out.RestartPolicy = example.RestartPolicy(in.RestartPolicy) - out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) - out.ServiceAccountName = in.ServiceAccountName - // INFO: in.DeprecatedServiceAccount opted out of conversion generation - out.NodeName = in.NodeName - // INFO: in.HostNetwork opted out of conversion generation - // INFO: in.HostPID opted out of conversion generation - // INFO: in.HostIPC opted out of conversion generation - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain - out.SchedulerName = in.SchedulerName - return nil -} - -// Convert_v1_PodSpec_To_example_PodSpec is an autogenerated conversion function. -func Convert_v1_PodSpec_To_example_PodSpec(in *PodSpec, out *example.PodSpec, s conversion.Scope) error { - return autoConvert_v1_PodSpec_To_example_PodSpec(in, out, s) -} - -func autoConvert_example_PodSpec_To_v1_PodSpec(in *example.PodSpec, out *PodSpec, s conversion.Scope) error { - out.RestartPolicy = RestartPolicy(in.RestartPolicy) - out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) - out.ServiceAccountName = in.ServiceAccountName - out.NodeName = in.NodeName - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain - out.SchedulerName = in.SchedulerName - return nil -} - -// Convert_example_PodSpec_To_v1_PodSpec is an autogenerated conversion function. -func Convert_example_PodSpec_To_v1_PodSpec(in *example.PodSpec, out *PodSpec, s conversion.Scope) error { - return autoConvert_example_PodSpec_To_v1_PodSpec(in, out, s) -} - -func autoConvert_v1_PodStatus_To_example_PodStatus(in *PodStatus, out *example.PodStatus, s conversion.Scope) error { - out.Phase = example.PodPhase(in.Phase) - out.Conditions = *(*[]example.PodCondition)(unsafe.Pointer(&in.Conditions)) - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP - out.StartTime = (*metav1.Time)(unsafe.Pointer(in.StartTime)) - return nil -} - -// Convert_v1_PodStatus_To_example_PodStatus is an autogenerated conversion function. -func Convert_v1_PodStatus_To_example_PodStatus(in *PodStatus, out *example.PodStatus, s conversion.Scope) error { - return autoConvert_v1_PodStatus_To_example_PodStatus(in, out, s) -} - -func autoConvert_example_PodStatus_To_v1_PodStatus(in *example.PodStatus, out *PodStatus, s conversion.Scope) error { - out.Phase = PodPhase(in.Phase) - out.Conditions = *(*[]PodCondition)(unsafe.Pointer(&in.Conditions)) - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP - out.StartTime = (*metav1.Time)(unsafe.Pointer(in.StartTime)) - return nil -} - -// Convert_example_PodStatus_To_v1_PodStatus is an autogenerated conversion function. -func Convert_example_PodStatus_To_v1_PodStatus(in *example.PodStatus, out *PodStatus, s conversion.Scope) error { - return autoConvert_example_PodStatus_To_v1_PodStatus(in, out, s) -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.deepcopy.go deleted file mode 100644 index 758aa3055..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,164 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Pod) DeepCopyInto(out *Pod) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. -func (in *Pod) DeepCopy() *Pod { - if in == nil { - return nil - } - out := new(Pod) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Pod) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodCondition) DeepCopyInto(out *PodCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition. -func (in *PodCondition) DeepCopy() *PodCondition { - if in == nil { - return nil - } - out := new(PodCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodList) DeepCopyInto(out *PodList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Pod, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList. -func (in *PodList) DeepCopy() *PodList { - if in == nil { - return nil - } - out := new(PodList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSpec) DeepCopyInto(out *PodSpec) { - *out = *in - if in.TerminationGracePeriodSeconds != nil { - in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - *out = new(int64) - **out = **in - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. -func (in *PodSpec) DeepCopy() *PodSpec { - if in == nil { - return nil - } - out := new(PodSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodStatus) DeepCopyInto(out *PodStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PodCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus. -func (in *PodStatus) DeepCopy() *PodStatus { - if in == nil { - return nil - } - out := new(PodStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.defaults.go deleted file mode 100644 index cce2e603a..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.defaults.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - return nil -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/example/zz_generated.deepcopy.go deleted file mode 100644 index c37c0aacd..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example/zz_generated.deepcopy.go +++ /dev/null @@ -1,224 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package example - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Pod) DeepCopyInto(out *Pod) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. -func (in *Pod) DeepCopy() *Pod { - if in == nil { - return nil - } - out := new(Pod) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Pod) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodCondition) DeepCopyInto(out *PodCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition. -func (in *PodCondition) DeepCopy() *PodCondition { - if in == nil { - return nil - } - out := new(PodCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodList) DeepCopyInto(out *PodList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Pod, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList. -func (in *PodList) DeepCopy() *PodList { - if in == nil { - return nil - } - out := new(PodList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSpec) DeepCopyInto(out *PodSpec) { - *out = *in - if in.TerminationGracePeriodSeconds != nil { - in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - *out = new(int64) - **out = **in - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. -func (in *PodSpec) DeepCopy() *PodSpec { - if in == nil { - return nil - } - out := new(PodSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodStatus) DeepCopyInto(out *PodStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PodCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus. -func (in *PodStatus) DeepCopy() *PodStatus { - if in == nil { - return nil - } - out := new(PodStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. -func (in *ReplicaSet) DeepCopy() *ReplicaSet { - if in == nil { - return nil - } - out := new(ReplicaSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicaSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. -func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { - if in == nil { - return nil - } - out := new(ReplicaSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. -func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { - if in == nil { - return nil - } - out := new(ReplicaSetStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example2/doc.go b/vendor/k8s.io/apiserver/pkg/apis/example2/doc.go deleted file mode 100644 index ae0ecc109..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example2/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +groupName=example2.k8s.io -// -// package example2 contains an example API whose internal version is defined in -// another group ("example"). This happens if a type is moved to a different -// group. It's not recommended to move types across groups, though Kubernetes -// have a few cases due to historical reasons. This package is for tests. -package example2 // import "k8s.io/apiserver/pkg/apis/example2" diff --git a/vendor/k8s.io/apiserver/pkg/apis/example2/install/install.go b/vendor/k8s.io/apiserver/pkg/apis/example2/install/install.go deleted file mode 100644 index 473895998..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example2/install/install.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package install installs the example2 API group, making it available as -// an option to all of the API encoding/decoding machinery. -package install - -import ( - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apiserver/pkg/apis/example" - example2v1 "k8s.io/apiserver/pkg/apis/example2/v1" -) - -// Install registers the API group and adds types to a scheme -func Install(scheme *runtime.Scheme) { - utilruntime.Must(example.AddToScheme(scheme)) - utilruntime.Must(example2v1.AddToScheme(scheme)) - utilruntime.Must(scheme.SetVersionPriority(example2v1.SchemeGroupVersion)) -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example2/register.go b/vendor/k8s.io/apiserver/pkg/apis/example2/register.go deleted file mode 100644 index c9b2fd79a..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example2/register.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package example2 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apiserver/pkg/apis/example" -) - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// GroupName is the group name use in this package -const GroupName = "example2.apiserver.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &example.ReplicaSet{}, - ) - return nil -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/conversion.go b/vendor/k8s.io/apiserver/pkg/apis/example2/v1/conversion.go deleted file mode 100644 index 21abdefd5..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/conversion.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - example "k8s.io/apiserver/pkg/apis/example" -) - -func addConversionFuncs(scheme *runtime.Scheme) error { - // Add non-generated conversion functions to handle the *int32 -> int32 - // conversion. A pointer is useful in the versioned type so we can default - // it, but a plain int32 is more convenient in the internal type. These - // functions are the same as the autogenerated ones in every other way. - err := scheme.AddConversionFuncs( - Convert_example_ReplicaSetSpec_To_v1_ReplicaSetSpec, - Convert_v1_ReplicaSetSpec_To_example_ReplicaSetSpec, - ) - if err != nil { - return err - } - return nil -} - -func Convert_example_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *example.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { - out.Replicas = new(int32) - *out.Replicas = int32(in.Replicas) - return nil -} - -func Convert_v1_ReplicaSetSpec_To_example_ReplicaSetSpec(in *ReplicaSetSpec, out *example.ReplicaSetSpec, s conversion.Scope) error { - if in.Replicas != nil { - out.Replicas = *in.Replicas - } - return nil -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/defaults.go b/vendor/k8s.io/apiserver/pkg/apis/example2/v1/defaults.go deleted file mode 100644 index 436ccde29..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/defaults.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - // return RegisterDefaults(scheme) - return nil -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/doc.go b/vendor/k8s.io/apiserver/pkg/apis/example2/v1/doc.go deleted file mode 100644 index 5784d44f3..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/example2 -// +k8s:conversion-gen=k8s.io/apiserver/pkg/apis/example -// +k8s:openapi-gen=false -// +k8s:defaulter-gen=TypeMeta - -// +groupName=example2.apiserver.k8s.io -package v1 // import "k8s.io/apiserver/pkg/apis/example2/v1" diff --git a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.pb.go b/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.pb.go deleted file mode 100644 index f1b385b1d..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.pb.go +++ /dev/null @@ -1,680 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto - - It has these top-level messages: - ReplicaSet - ReplicaSetSpec - ReplicaSetStatus -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func init() { - proto.RegisterType((*ReplicaSet)(nil), "k8s.io.apiserver.pkg.apis.example2.v1.ReplicaSet") - proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.apiserver.pkg.apis.example2.v1.ReplicaSetSpec") - proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.apiserver.pkg.apis.example2.v1.ReplicaSetStatus") -} -func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - return i, nil -} - -func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *ReplicaSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicaSetSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - return n -} - -func (m *ReplicaSetStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ReplicaSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ReplicaSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apiserver/pkg/apis/example2/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 400 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x90, 0x3f, 0x8f, 0xda, 0x30, - 0x18, 0xc6, 0x13, 0x4a, 0x11, 0x72, 0x11, 0x42, 0x99, 0x10, 0x83, 0xa9, 0x90, 0x2a, 0x31, 0xb4, - 0x76, 0x83, 0xfa, 0x4f, 0x9d, 0xaa, 0xec, 0x6d, 0xa5, 0x30, 0x54, 0xea, 0xd2, 0x3a, 0xe1, 0x6d, - 0x48, 0x43, 0x12, 0xcb, 0x76, 0xa2, 0x76, 0xeb, 0x47, 0xe8, 0xc7, 0xb8, 0x8f, 0xc2, 0xc8, 0xc8, - 0x84, 0x8e, 0xdc, 0x17, 0x39, 0xe1, 0xe4, 0x12, 0x1d, 0x70, 0xba, 0xbb, 0xcd, 0x8f, 0xed, 0xdf, - 0xef, 0x7d, 0xf4, 0xa2, 0x2f, 0xd1, 0x07, 0x49, 0xc2, 0x94, 0x46, 0x99, 0x07, 0x22, 0x01, 0x05, - 0x92, 0xe6, 0x90, 0x2c, 0x52, 0x41, 0xab, 0x07, 0xc6, 0x43, 0x09, 0x22, 0x07, 0x41, 0x79, 0x14, - 0xe8, 0x44, 0xe1, 0x0f, 0x8b, 0xf9, 0x0a, 0x66, 0x34, 0xb7, 0x69, 0x00, 0x09, 0x08, 0xa6, 0x60, - 0x41, 0xb8, 0x48, 0x55, 0x6a, 0xbd, 0x28, 0x31, 0x52, 0x63, 0x84, 0x47, 0x81, 0x4e, 0xe4, 0x06, - 0x23, 0xb9, 0x3d, 0x7a, 0x15, 0x84, 0x6a, 0x99, 0x79, 0xc4, 0x4f, 0x63, 0x1a, 0xa4, 0x41, 0x4a, - 0x35, 0xed, 0x65, 0xbf, 0x74, 0xd2, 0x41, 0x9f, 0x4a, 0xeb, 0xe8, 0x4d, 0x53, 0x26, 0x66, 0xfe, - 0x32, 0x4c, 0x40, 0xfc, 0x6d, 0xfa, 0xc4, 0xa0, 0xd8, 0x99, 0x2e, 0x23, 0x7a, 0x17, 0x25, 0xb2, - 0x44, 0x85, 0x31, 0x9c, 0x00, 0xef, 0xee, 0x03, 0xa4, 0xbf, 0x84, 0x98, 0x1d, 0x73, 0x93, 0x8b, - 0x16, 0x42, 0x2e, 0xf0, 0x55, 0xe8, 0xb3, 0x39, 0x28, 0xeb, 0x27, 0xea, 0x1e, 0x2a, 0x2d, 0x98, - 0x62, 0x43, 0xf3, 0xb9, 0x39, 0x7d, 0x36, 0x7b, 0x4d, 0x9a, 0xb5, 0xd4, 0xe6, 0x66, 0x33, 0x87, - 0xdf, 0x24, 0xb7, 0xc9, 0x57, 0xef, 0x37, 0xf8, 0xea, 0x33, 0x28, 0xe6, 0x58, 0xeb, 0xdd, 0xd8, - 0x28, 0x76, 0x63, 0xd4, 0xdc, 0xb9, 0xb5, 0xd5, 0xfa, 0x86, 0xda, 0x92, 0x83, 0x3f, 0x6c, 0x69, - 0xfb, 0x5b, 0xf2, 0xa0, 0xa5, 0x93, 0xa6, 0xe2, 0x9c, 0x83, 0xef, 0xf4, 0xaa, 0x11, 0xed, 0x43, - 0x72, 0xb5, 0xd0, 0xfa, 0x81, 0x3a, 0x52, 0x31, 0x95, 0xc9, 0xe1, 0x13, 0xad, 0x7e, 0xff, 0x78, - 0xb5, 0xc6, 0x9d, 0x7e, 0x25, 0xef, 0x94, 0xd9, 0xad, 0xb4, 0x93, 0x8f, 0xa8, 0x7f, 0xbb, 0x86, - 0x35, 0x45, 0x5d, 0x51, 0xde, 0x48, 0xbd, 0xad, 0xa7, 0x4e, 0xaf, 0xd8, 0x8d, 0xbb, 0xd5, 0x2f, - 0xe9, 0xd6, 0xaf, 0x93, 0x4f, 0x68, 0x70, 0x3c, 0xc7, 0x7a, 0x79, 0x42, 0x0f, 0xaa, 0xc9, 0x67, - 0x0c, 0xce, 0x74, 0xbd, 0xc7, 0xc6, 0x66, 0x8f, 0x8d, 0xed, 0x1e, 0x1b, 0xff, 0x0a, 0x6c, 0xae, - 0x0b, 0x6c, 0x6e, 0x0a, 0x6c, 0x6e, 0x0b, 0x6c, 0x5e, 0x16, 0xd8, 0xfc, 0x7f, 0x85, 0x8d, 0xef, - 0xad, 0xdc, 0xbe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x03, 0x3d, 0xdb, 0x1b, 0x18, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/register.go b/vendor/k8s.io/apiserver/pkg/apis/example2/v1/register.go deleted file mode 100644 index 1cb0f5eb8..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/register.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "example2.apiserver.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes, addConversionFuncs, addDefaultingFuncs) -} - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &ReplicaSet{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/types.go b/vendor/k8s.io/apiserver/pkg/apis/example2/v1/types.go deleted file mode 100644 index e6e6fb00c..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/types.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicaSet ensures that a specified number of pod replicas are running at any given time. -type ReplicaSet struct { - metav1.TypeMeta `json:",inline"` - - // If the Labels of a ReplicaSet are empty, they are defaulted to - // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the most recently observed status of the ReplicaSet. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ReplicaSetSpec is the specification of a ReplicaSet. -type ReplicaSetSpec struct { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller - // +optional - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` -} - -// ReplicaSetStatus represents the current status of a ReplicaSet. -type ReplicaSetStatus struct { - // Replicas is the most recently oberved number of replicas. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller - Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.conversion.go b/vendor/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.conversion.go deleted file mode 100644 index 91884503c..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.conversion.go +++ /dev/null @@ -1,144 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - example "k8s.io/apiserver/pkg/apis/example" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*ReplicaSet)(nil), (*example.ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSet_To_example_ReplicaSet(a.(*ReplicaSet), b.(*example.ReplicaSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*example.ReplicaSet)(nil), (*ReplicaSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_example_ReplicaSet_To_v1_ReplicaSet(a.(*example.ReplicaSet), b.(*ReplicaSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ReplicaSetSpec)(nil), (*example.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSetSpec_To_example_ReplicaSetSpec(a.(*ReplicaSetSpec), b.(*example.ReplicaSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*example.ReplicaSetSpec)(nil), (*ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_example_ReplicaSetSpec_To_v1_ReplicaSetSpec(a.(*example.ReplicaSetSpec), b.(*ReplicaSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ReplicaSetStatus)(nil), (*example.ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSetStatus_To_example_ReplicaSetStatus(a.(*ReplicaSetStatus), b.(*example.ReplicaSetStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*example.ReplicaSetStatus)(nil), (*ReplicaSetStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_example_ReplicaSetStatus_To_v1_ReplicaSetStatus(a.(*example.ReplicaSetStatus), b.(*ReplicaSetStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*example.ReplicaSetSpec)(nil), (*ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_example_ReplicaSetSpec_To_v1_ReplicaSetSpec(a.(*example.ReplicaSetSpec), b.(*ReplicaSetSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*ReplicaSetSpec)(nil), (*example.ReplicaSetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ReplicaSetSpec_To_example_ReplicaSetSpec(a.(*ReplicaSetSpec), b.(*example.ReplicaSetSpec), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1_ReplicaSet_To_example_ReplicaSet(in *ReplicaSet, out *example.ReplicaSet, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ReplicaSetSpec_To_example_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ReplicaSetStatus_To_example_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_ReplicaSet_To_example_ReplicaSet is an autogenerated conversion function. -func Convert_v1_ReplicaSet_To_example_ReplicaSet(in *ReplicaSet, out *example.ReplicaSet, s conversion.Scope) error { - return autoConvert_v1_ReplicaSet_To_example_ReplicaSet(in, out, s) -} - -func autoConvert_example_ReplicaSet_To_v1_ReplicaSet(in *example.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_example_ReplicaSetSpec_To_v1_ReplicaSetSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_example_ReplicaSetStatus_To_v1_ReplicaSetStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_example_ReplicaSet_To_v1_ReplicaSet is an autogenerated conversion function. -func Convert_example_ReplicaSet_To_v1_ReplicaSet(in *example.ReplicaSet, out *ReplicaSet, s conversion.Scope) error { - return autoConvert_example_ReplicaSet_To_v1_ReplicaSet(in, out, s) -} - -func autoConvert_v1_ReplicaSetSpec_To_example_ReplicaSetSpec(in *ReplicaSetSpec, out *example.ReplicaSetSpec, s conversion.Scope) error { - if err := metav1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - return nil -} - -func autoConvert_example_ReplicaSetSpec_To_v1_ReplicaSetSpec(in *example.ReplicaSetSpec, out *ReplicaSetSpec, s conversion.Scope) error { - if err := metav1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_ReplicaSetStatus_To_example_ReplicaSetStatus(in *ReplicaSetStatus, out *example.ReplicaSetStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -// Convert_v1_ReplicaSetStatus_To_example_ReplicaSetStatus is an autogenerated conversion function. -func Convert_v1_ReplicaSetStatus_To_example_ReplicaSetStatus(in *ReplicaSetStatus, out *example.ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_v1_ReplicaSetStatus_To_example_ReplicaSetStatus(in, out, s) -} - -func autoConvert_example_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *example.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -// Convert_example_ReplicaSetStatus_To_v1_ReplicaSetStatus is an autogenerated conversion function. -func Convert_example_ReplicaSetStatus_To_v1_ReplicaSetStatus(in *example.ReplicaSetStatus, out *ReplicaSetStatus, s conversion.Scope) error { - return autoConvert_example_ReplicaSetStatus_To_v1_ReplicaSetStatus(in, out, s) -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.deepcopy.go deleted file mode 100644 index 754ac2f9e..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,90 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. -func (in *ReplicaSet) DeepCopy() *ReplicaSet { - if in == nil { - return nil - } - out := new(ReplicaSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicaSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. -func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { - if in == nil { - return nil - } - out := new(ReplicaSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. -func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { - if in == nil { - return nil - } - out := new(ReplicaSetStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.defaults.go b/vendor/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.defaults.go deleted file mode 100644 index cce2e603a..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example2/v1/zz_generated.defaults.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - return nil -} diff --git a/vendor/k8s.io/apiserver/pkg/apis/example2/zz_generated.deepcopy.go b/vendor/k8s.io/apiserver/pkg/apis/example2/zz_generated.deepcopy.go deleted file mode 100644 index 9c3b4480e..000000000 --- a/vendor/k8s.io/apiserver/pkg/apis/example2/zz_generated.deepcopy.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package example2 diff --git a/vendor/k8s.io/client-go/deprecated-dynamic/bad_debt.go b/vendor/k8s.io/client-go/deprecated-dynamic/bad_debt.go deleted file mode 100644 index 51e4a5830..000000000 --- a/vendor/k8s.io/client-go/deprecated-dynamic/bad_debt.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package deprecated_dynamic - -import ( - "encoding/json" - "io" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" -) - -// dynamicCodec is a codec that wraps the standard unstructured codec -// with special handling for Status objects. -// Deprecated only used by test code and its wrong -type dynamicCodec struct{} - -func (dynamicCodec) Decode(data []byte, gvk *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - obj, gvk, err := unstructured.UnstructuredJSONScheme.Decode(data, gvk, obj) - if err != nil { - return nil, nil, err - } - - if _, ok := obj.(*metav1.Status); !ok && strings.ToLower(gvk.Kind) == "status" { - obj = &metav1.Status{} - err := json.Unmarshal(data, obj) - if err != nil { - return nil, nil, err - } - } - - return obj, gvk, nil -} - -func (dynamicCodec) Encode(obj runtime.Object, w io.Writer) error { - return unstructured.UnstructuredJSONScheme.Encode(obj, w) -} - -// ContentConfig returns a rest.ContentConfig for dynamic types. -// Deprecated only used by test code and its wrong -func ContentConfig() rest.ContentConfig { - var jsonInfo runtime.SerializerInfo - // TODO: scheme.Codecs here should become "pkg/apis/server/scheme" which is the minimal core you need - // to talk to a kubernetes server - for _, info := range scheme.Codecs.SupportedMediaTypes() { - if info.MediaType == runtime.ContentTypeJSON { - jsonInfo = info - break - } - } - - jsonInfo.Serializer = dynamicCodec{} - jsonInfo.PrettySerializer = nil - return rest.ContentConfig{ - AcceptContentTypes: runtime.ContentTypeJSON, - ContentType: runtime.ContentTypeJSON, - NegotiatedSerializer: serializer.NegotiatedSerializerWrapper(jsonInfo), - } -} diff --git a/vendor/k8s.io/client-go/deprecated-dynamic/client.go b/vendor/k8s.io/client-go/deprecated-dynamic/client.go deleted file mode 100644 index 3b8efffab..000000000 --- a/vendor/k8s.io/client-go/deprecated-dynamic/client.go +++ /dev/null @@ -1,131 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package dynamic provides a client interface to arbitrary Kubernetes -// APIs that exposes common high level operations and exposes common -// metadata. -package deprecated_dynamic - -import ( - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/dynamic" - restclient "k8s.io/client-go/rest" -) - -// Interface is a Kubernetes client that allows you to access metadata -// and manipulate metadata of a Kubernetes API group. -type Interface interface { - // Resource returns an API interface to the specified resource for this client's - // group and version. If resource is not a namespaced resource, then namespace - // is ignored. The ResourceInterface inherits the parameter codec of this client. - Resource(resource *metav1.APIResource, namespace string) ResourceInterface -} - -// ResourceInterface is an API interface to a specific resource under a -// dynamic client. -type ResourceInterface interface { - // List returns a list of objects for this resource. - List(opts metav1.ListOptions) (runtime.Object, error) - // Get gets the resource with the specified name. - Get(name string, opts metav1.GetOptions) (*unstructured.Unstructured, error) - // Delete deletes the resource with the specified name. - Delete(name string, opts *metav1.DeleteOptions) error - // DeleteCollection deletes a collection of objects. - DeleteCollection(deleteOptions *metav1.DeleteOptions, listOptions metav1.ListOptions) error - // Create creates the provided resource. - Create(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) - // Update updates the provided resource. - Update(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) - // Watch returns a watch.Interface that watches the resource. - Watch(opts metav1.ListOptions) (watch.Interface, error) - // Patch patches the provided resource. - Patch(name string, pt types.PatchType, data []byte) (*unstructured.Unstructured, error) -} - -// Client is a Kubernetes client that allows you to access metadata -// and manipulate metadata of a Kubernetes API group, and implements Interface. -type Client struct { - version schema.GroupVersion - delegate dynamic.Interface -} - -// NewClient returns a new client based on the passed in config. The -// codec is ignored, as the dynamic client uses it's own codec. -func NewClient(conf *restclient.Config, version schema.GroupVersion) (*Client, error) { - delegate, err := dynamic.NewForConfig(conf) - if err != nil { - return nil, err - } - - return &Client{version: version, delegate: delegate}, nil -} - -// Resource returns an API interface to the specified resource for this client's -// group and version. If resource is not a namespaced resource, then namespace -// is ignored. The ResourceInterface inherits the parameter codec of c. -func (c *Client) Resource(resource *metav1.APIResource, namespace string) ResourceInterface { - resourceTokens := strings.SplitN(resource.Name, "/", 2) - subresources := []string{} - if len(resourceTokens) > 1 { - subresources = strings.Split(resourceTokens[1], "/") - } - - if len(namespace) == 0 { - return oldResourceShim(c.delegate.Resource(c.version.WithResource(resourceTokens[0])), subresources) - } - return oldResourceShim(c.delegate.Resource(c.version.WithResource(resourceTokens[0])).Namespace(namespace), subresources) -} - -// the old interfaces used the wrong type for lists. this fixes that -func oldResourceShim(in dynamic.ResourceInterface, subresources []string) ResourceInterface { - return oldResourceShimType{ResourceInterface: in, subresources: subresources} -} - -type oldResourceShimType struct { - dynamic.ResourceInterface - subresources []string -} - -func (s oldResourceShimType) Create(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { - return s.ResourceInterface.Create(obj, metav1.CreateOptions{}, s.subresources...) -} - -func (s oldResourceShimType) Update(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { - return s.ResourceInterface.Update(obj, metav1.UpdateOptions{}, s.subresources...) -} - -func (s oldResourceShimType) Delete(name string, opts *metav1.DeleteOptions) error { - return s.ResourceInterface.Delete(name, opts, s.subresources...) -} - -func (s oldResourceShimType) Get(name string, opts metav1.GetOptions) (*unstructured.Unstructured, error) { - return s.ResourceInterface.Get(name, opts, s.subresources...) -} - -func (s oldResourceShimType) List(opts metav1.ListOptions) (runtime.Object, error) { - return s.ResourceInterface.List(opts) -} - -func (s oldResourceShimType) Patch(name string, pt types.PatchType, data []byte) (*unstructured.Unstructured, error) { - return s.ResourceInterface.Patch(name, pt, data, metav1.UpdateOptions{}, s.subresources...) -} diff --git a/vendor/k8s.io/client-go/deprecated-dynamic/client_pool.go b/vendor/k8s.io/client-go/deprecated-dynamic/client_pool.go deleted file mode 100644 index 36dc54ce4..000000000 --- a/vendor/k8s.io/client-go/deprecated-dynamic/client_pool.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package deprecated_dynamic - -import ( - "sync" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" - restclient "k8s.io/client-go/rest" -) - -// ClientPool manages a pool of dynamic clients. -type ClientPool interface { - // ClientForGroupVersionResource returns a client configured for the specified groupVersionResource. - // Resource may be empty. - ClientForGroupVersionResource(resource schema.GroupVersionResource) (Interface, error) - // ClientForGroupVersionKind returns a client configured for the specified groupVersionKind. - // Kind may be empty. - ClientForGroupVersionKind(kind schema.GroupVersionKind) (Interface, error) -} - -// APIPathResolverFunc knows how to convert a groupVersion to its API path. The Kind field is -// optional. -type APIPathResolverFunc func(kind schema.GroupVersionKind) string - -// LegacyAPIPathResolverFunc can resolve paths properly with the legacy API. -func LegacyAPIPathResolverFunc(kind schema.GroupVersionKind) string { - if len(kind.Group) == 0 { - return "/api" - } - return "/apis" -} - -// clientPoolImpl implements ClientPool and caches clients for the resource group versions -// is asked to retrieve. This type is thread safe. -type clientPoolImpl struct { - lock sync.RWMutex - config *restclient.Config - clients map[schema.GroupVersion]*Client - apiPathResolverFunc APIPathResolverFunc - mapper meta.RESTMapper -} - -// NewClientPool returns a ClientPool from the specified config. It reuses clients for the same -// group version. It is expected this type may be wrapped by specific logic that special cases certain -// resources or groups. -func NewClientPool(config *restclient.Config, mapper meta.RESTMapper, apiPathResolverFunc APIPathResolverFunc) ClientPool { - confCopy := *config - - return &clientPoolImpl{ - config: &confCopy, - clients: map[schema.GroupVersion]*Client{}, - apiPathResolverFunc: apiPathResolverFunc, - mapper: mapper, - } -} - -// Instantiates a new dynamic client pool with the given config. -func NewDynamicClientPool(cfg *restclient.Config) ClientPool { - // restMapper is not needed when using LegacyAPIPathResolverFunc - emptyMapper := meta.MultiRESTMapper{} - return NewClientPool(cfg, emptyMapper, LegacyAPIPathResolverFunc) -} - -// ClientForGroupVersionResource uses the provided RESTMapper to identify the appropriate resource. Resource may -// be empty. If no matching kind is found the underlying client for that group is still returned. -func (c *clientPoolImpl) ClientForGroupVersionResource(resource schema.GroupVersionResource) (Interface, error) { - kinds, err := c.mapper.KindsFor(resource) - if err != nil { - if meta.IsNoMatchError(err) { - return c.ClientForGroupVersionKind(schema.GroupVersionKind{Group: resource.Group, Version: resource.Version}) - } - return nil, err - } - return c.ClientForGroupVersionKind(kinds[0]) -} - -// ClientForGroupVersion returns a client for the specified groupVersion, creates one if none exists. Kind -// in the GroupVersionKind may be empty. -func (c *clientPoolImpl) ClientForGroupVersionKind(kind schema.GroupVersionKind) (Interface, error) { - c.lock.Lock() - defer c.lock.Unlock() - - gv := kind.GroupVersion() - - // do we have a client already configured? - if existingClient, found := c.clients[gv]; found { - return existingClient, nil - } - - // avoid changing the original config - confCopy := *c.config - conf := &confCopy - - // we need to set the api path based on group version, if no group, default to legacy path - conf.APIPath = c.apiPathResolverFunc(kind) - - // we need to make a client - conf.GroupVersion = &gv - - dynamicClient, err := NewClient(conf, gv) - if err != nil { - return nil, err - } - c.clients[gv] = dynamicClient - return dynamicClient, nil -} diff --git a/vendor/k8s.io/client-go/examples/create-update-delete-deployment/main.go b/vendor/k8s.io/client-go/examples/create-update-delete-deployment/main.go deleted file mode 100644 index 333ada018..000000000 --- a/vendor/k8s.io/client-go/examples/create-update-delete-deployment/main.go +++ /dev/null @@ -1,171 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Note: the example only works with the code within the same release/branch. -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "path/filepath" - - appsv1 "k8s.io/api/apps/v1" - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/util/homedir" - "k8s.io/client-go/util/retry" - // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" -) - -func main() { - var kubeconfig *string - if home := homedir.HomeDir(); home != "" { - kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file") - } else { - kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file") - } - flag.Parse() - - config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig) - if err != nil { - panic(err) - } - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - panic(err) - } - - deploymentsClient := clientset.AppsV1().Deployments(apiv1.NamespaceDefault) - - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: "demo-deployment", - }, - Spec: appsv1.DeploymentSpec{ - Replicas: int32Ptr(2), - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "app": "demo", - }, - }, - Template: apiv1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "app": "demo", - }, - }, - Spec: apiv1.PodSpec{ - Containers: []apiv1.Container{ - { - Name: "web", - Image: "nginx:1.12", - Ports: []apiv1.ContainerPort{ - { - Name: "http", - Protocol: apiv1.ProtocolTCP, - ContainerPort: 80, - }, - }, - }, - }, - }, - }, - }, - } - - // Create Deployment - fmt.Println("Creating deployment...") - result, err := deploymentsClient.Create(deployment) - if err != nil { - panic(err) - } - fmt.Printf("Created deployment %q.\n", result.GetObjectMeta().GetName()) - - // Update Deployment - prompt() - fmt.Println("Updating deployment...") - // You have two options to Update() this Deployment: - // - // 1. Modify the "deployment" variable and call: Update(deployment). - // This works like the "kubectl replace" command and it overwrites/loses changes - // made by other clients between you Create() and Update() the object. - // 2. Modify the "result" returned by Get() and retry Update(result) until - // you no longer get a conflict error. This way, you can preserve changes made - // by other clients between Create() and Update(). This is implemented below - // using the retry utility package included with client-go. (RECOMMENDED) - // - // More Info: - // https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency - - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - // Retrieve the latest version of Deployment before attempting update - // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver - result, getErr := deploymentsClient.Get("demo-deployment", metav1.GetOptions{}) - if getErr != nil { - panic(fmt.Errorf("Failed to get latest version of Deployment: %v", getErr)) - } - - result.Spec.Replicas = int32Ptr(1) // reduce replica count - result.Spec.Template.Spec.Containers[0].Image = "nginx:1.13" // change nginx version - _, updateErr := deploymentsClient.Update(result) - return updateErr - }) - if retryErr != nil { - panic(fmt.Errorf("Update failed: %v", retryErr)) - } - fmt.Println("Updated deployment...") - - // List Deployments - prompt() - fmt.Printf("Listing deployments in namespace %q:\n", apiv1.NamespaceDefault) - list, err := deploymentsClient.List(metav1.ListOptions{}) - if err != nil { - panic(err) - } - for _, d := range list.Items { - fmt.Printf(" * %s (%d replicas)\n", d.Name, *d.Spec.Replicas) - } - - // Delete Deployment - prompt() - fmt.Println("Deleting deployment...") - deletePolicy := metav1.DeletePropagationForeground - if err := deploymentsClient.Delete("demo-deployment", &metav1.DeleteOptions{ - PropagationPolicy: &deletePolicy, - }); err != nil { - panic(err) - } - fmt.Println("Deleted deployment.") -} - -func prompt() { - fmt.Printf("-> Press Return key to continue.") - scanner := bufio.NewScanner(os.Stdin) - for scanner.Scan() { - break - } - if err := scanner.Err(); err != nil { - panic(err) - } - fmt.Println() -} - -func int32Ptr(i int32) *int32 { return &i } diff --git a/vendor/k8s.io/client-go/examples/fake-client/doc.go b/vendor/k8s.io/client-go/examples/fake-client/doc.go deleted file mode 100644 index 1c02e5ea5..000000000 --- a/vendor/k8s.io/client-go/examples/fake-client/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package fakeclient contains examples on how to use fakeclient in tests. -// Note: This file is here to avoid warnings on go build since there are no -// non-test files in this package. -package fakeclient diff --git a/vendor/k8s.io/client-go/examples/in-cluster-client-configuration/main.go b/vendor/k8s.io/client-go/examples/in-cluster-client-configuration/main.go deleted file mode 100644 index f71506817..000000000 --- a/vendor/k8s.io/client-go/examples/in-cluster-client-configuration/main.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Note: the example only works with the code within the same release/branch. -package main - -import ( - "fmt" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -func main() { - // creates the in-cluster config - config, err := rest.InClusterConfig() - if err != nil { - panic(err.Error()) - } - // creates the clientset - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - panic(err.Error()) - } - for { - pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{}) - if err != nil { - panic(err.Error()) - } - fmt.Printf("There are %d pods in the cluster\n", len(pods.Items)) - - // Examples for error handling: - // - Use helper functions like e.g. errors.IsNotFound() - // - And/or cast to StatusError and use its properties like e.g. ErrStatus.Message - _, err = clientset.CoreV1().Pods("default").Get("example-xxxxx", metav1.GetOptions{}) - if errors.IsNotFound(err) { - fmt.Printf("Pod not found\n") - } else if statusError, isStatus := err.(*errors.StatusError); isStatus { - fmt.Printf("Error getting pod %v\n", statusError.ErrStatus.Message) - } else if err != nil { - panic(err.Error()) - } else { - fmt.Printf("Found pod\n") - } - - time.Sleep(10 * time.Second) - } -} diff --git a/vendor/k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go b/vendor/k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go deleted file mode 100644 index 9d79b9776..000000000 --- a/vendor/k8s.io/client-go/examples/out-of-cluster-client-configuration/main.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Note: the example only works with the code within the same release/branch. -package main - -import ( - "flag" - "fmt" - "os" - "path/filepath" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). - // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" -) - -func main() { - var kubeconfig *string - if home := homeDir(); home != "" { - kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file") - } else { - kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file") - } - flag.Parse() - - // use the current context in kubeconfig - config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig) - if err != nil { - panic(err.Error()) - } - - // create the clientset - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - panic(err.Error()) - } - for { - pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{}) - if err != nil { - panic(err.Error()) - } - fmt.Printf("There are %d pods in the cluster\n", len(pods.Items)) - - // Examples for error handling: - // - Use helper functions like e.g. errors.IsNotFound() - // - And/or cast to StatusError and use its properties like e.g. ErrStatus.Message - namespace := "default" - pod := "example-xxxxx" - _, err = clientset.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{}) - if errors.IsNotFound(err) { - fmt.Printf("Pod %s in namespace %s not found\n", pod, namespace) - } else if statusError, isStatus := err.(*errors.StatusError); isStatus { - fmt.Printf("Error getting pod %s in namespace %s: %v\n", - pod, namespace, statusError.ErrStatus.Message) - } else if err != nil { - panic(err.Error()) - } else { - fmt.Printf("Found pod %s in namespace %s\n", pod, namespace) - } - - time.Sleep(10 * time.Second) - } -} - -func homeDir() string { - if h := os.Getenv("HOME"); h != "" { - return h - } - return os.Getenv("USERPROFILE") // windows -} diff --git a/vendor/k8s.io/client-go/examples/workqueue/main.go b/vendor/k8s.io/client-go/examples/workqueue/main.go deleted file mode 100644 index 6768f5d91..000000000 --- a/vendor/k8s.io/client-go/examples/workqueue/main.go +++ /dev/null @@ -1,217 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "flag" - "fmt" - "time" - - "github.com/golang/glog" - - "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/util/workqueue" -) - -type Controller struct { - indexer cache.Indexer - queue workqueue.RateLimitingInterface - informer cache.Controller -} - -func NewController(queue workqueue.RateLimitingInterface, indexer cache.Indexer, informer cache.Controller) *Controller { - return &Controller{ - informer: informer, - indexer: indexer, - queue: queue, - } -} - -func (c *Controller) processNextItem() bool { - // Wait until there is a new item in the working queue - key, quit := c.queue.Get() - if quit { - return false - } - // Tell the queue that we are done with processing this key. This unblocks the key for other workers - // This allows safe parallel processing because two pods with the same key are never processed in - // parallel. - defer c.queue.Done(key) - - // Invoke the method containing the business logic - err := c.syncToStdout(key.(string)) - // Handle the error if something went wrong during the execution of the business logic - c.handleErr(err, key) - return true -} - -// syncToStdout is the business logic of the controller. In this controller it simply prints -// information about the pod to stdout. In case an error happened, it has to simply return the error. -// The retry logic should not be part of the business logic. -func (c *Controller) syncToStdout(key string) error { - obj, exists, err := c.indexer.GetByKey(key) - if err != nil { - glog.Errorf("Fetching object with key %s from store failed with %v", key, err) - return err - } - - if !exists { - // Below we will warm up our cache with a Pod, so that we will see a delete for one pod - fmt.Printf("Pod %s does not exist anymore\n", key) - } else { - // Note that you also have to check the uid if you have a local controlled resource, which - // is dependent on the actual instance, to detect that a Pod was recreated with the same name - fmt.Printf("Sync/Add/Update for Pod %s\n", obj.(*v1.Pod).GetName()) - } - return nil -} - -// handleErr checks if an error happened and makes sure we will retry later. -func (c *Controller) handleErr(err error, key interface{}) { - if err == nil { - // Forget about the #AddRateLimited history of the key on every successful synchronization. - // This ensures that future processing of updates for this key is not delayed because of - // an outdated error history. - c.queue.Forget(key) - return - } - - // This controller retries 5 times if something goes wrong. After that, it stops trying. - if c.queue.NumRequeues(key) < 5 { - glog.Infof("Error syncing pod %v: %v", key, err) - - // Re-enqueue the key rate limited. Based on the rate limiter on the - // queue and the re-enqueue history, the key will be processed later again. - c.queue.AddRateLimited(key) - return - } - - c.queue.Forget(key) - // Report to an external entity that, even after several retries, we could not successfully process this key - runtime.HandleError(err) - glog.Infof("Dropping pod %q out of the queue: %v", key, err) -} - -func (c *Controller) Run(threadiness int, stopCh chan struct{}) { - defer runtime.HandleCrash() - - // Let the workers stop when we are done - defer c.queue.ShutDown() - glog.Info("Starting Pod controller") - - go c.informer.Run(stopCh) - - // Wait for all involved caches to be synced, before processing items from the queue is started - if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) { - runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync")) - return - } - - for i := 0; i < threadiness; i++ { - go wait.Until(c.runWorker, time.Second, stopCh) - } - - <-stopCh - glog.Info("Stopping Pod controller") -} - -func (c *Controller) runWorker() { - for c.processNextItem() { - } -} - -func main() { - var kubeconfig string - var master string - - flag.StringVar(&kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file") - flag.StringVar(&master, "master", "", "master url") - flag.Parse() - - // creates the connection - config, err := clientcmd.BuildConfigFromFlags(master, kubeconfig) - if err != nil { - glog.Fatal(err) - } - - // creates the clientset - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - glog.Fatal(err) - } - - // create the pod watcher - podListWatcher := cache.NewListWatchFromClient(clientset.CoreV1().RESTClient(), "pods", v1.NamespaceDefault, fields.Everything()) - - // create the workqueue - queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) - - // Bind the workqueue to a cache with the help of an informer. This way we make sure that - // whenever the cache is updated, the pod key is added to the workqueue. - // Note that when we finally process the item from the workqueue, we might see a newer version - // of the Pod than the version which was responsible for triggering the update. - indexer, informer := cache.NewIndexerInformer(podListWatcher, &v1.Pod{}, 0, cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - key, err := cache.MetaNamespaceKeyFunc(obj) - if err == nil { - queue.Add(key) - } - }, - UpdateFunc: func(old interface{}, new interface{}) { - key, err := cache.MetaNamespaceKeyFunc(new) - if err == nil { - queue.Add(key) - } - }, - DeleteFunc: func(obj interface{}) { - // IndexerInformer uses a delta queue, therefore for deletes we have to use this - // key function. - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err == nil { - queue.Add(key) - } - }, - }, cache.Indexers{}) - - controller := NewController(queue, indexer, informer) - - // We can now warm up the cache for initial synchronization. - // Let's suppose that we knew about a pod "mypod" on our last run, therefore add it to the cache. - // If this pod is not there anymore, the controller will be notified about the removal after the - // cache has synchronized. - indexer.Add(&v1.Pod{ - ObjectMeta: meta_v1.ObjectMeta{ - Name: "mypod", - Namespace: v1.NamespaceDefault, - }, - }) - - // Now let's start the controller - stop := make(chan struct{}) - defer close(stop) - go controller.Run(1, stop) - - // Wait forever - select {} -} diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go deleted file mode 100644 index 60304b0f3..000000000 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/azure/azure.go +++ /dev/null @@ -1,359 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package azure - -import ( - "errors" - "fmt" - "net/http" - "os" - "sync" - - "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/azure" - "github.com/golang/glog" - - "k8s.io/apimachinery/pkg/util/net" - restclient "k8s.io/client-go/rest" -) - -const ( - azureTokenKey = "azureTokenKey" - tokenType = "Bearer" - authHeader = "Authorization" - - cfgClientID = "client-id" - cfgTenantID = "tenant-id" - cfgAccessToken = "access-token" - cfgRefreshToken = "refresh-token" - cfgExpiresIn = "expires-in" - cfgExpiresOn = "expires-on" - cfgEnvironment = "environment" - cfgApiserverID = "apiserver-id" -) - -func init() { - if err := restclient.RegisterAuthProviderPlugin("azure", newAzureAuthProvider); err != nil { - glog.Fatalf("Failed to register azure auth plugin: %v", err) - } -} - -var cache = newAzureTokenCache() - -type azureTokenCache struct { - lock sync.Mutex - cache map[string]*azureToken -} - -func newAzureTokenCache() *azureTokenCache { - return &azureTokenCache{cache: make(map[string]*azureToken)} -} - -func (c *azureTokenCache) getToken(tokenKey string) *azureToken { - c.lock.Lock() - defer c.lock.Unlock() - return c.cache[tokenKey] -} - -func (c *azureTokenCache) setToken(tokenKey string, token *azureToken) { - c.lock.Lock() - defer c.lock.Unlock() - c.cache[tokenKey] = token -} - -func newAzureAuthProvider(_ string, cfg map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) { - var ts tokenSource - - environment, err := azure.EnvironmentFromName(cfg[cfgEnvironment]) - if err != nil { - environment = azure.PublicCloud - } - ts, err = newAzureTokenSourceDeviceCode(environment, cfg[cfgClientID], cfg[cfgTenantID], cfg[cfgApiserverID]) - if err != nil { - return nil, fmt.Errorf("creating a new azure token source for device code authentication: %v", err) - } - cacheSource := newAzureTokenSource(ts, cache, cfg, persister) - - return &azureAuthProvider{ - tokenSource: cacheSource, - }, nil -} - -type azureAuthProvider struct { - tokenSource tokenSource -} - -func (p *azureAuthProvider) Login() error { - return errors.New("not yet implemented") -} - -func (p *azureAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper { - return &azureRoundTripper{ - tokenSource: p.tokenSource, - roundTripper: rt, - } -} - -type azureRoundTripper struct { - tokenSource tokenSource - roundTripper http.RoundTripper -} - -var _ net.RoundTripperWrapper = &azureRoundTripper{} - -func (r *azureRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - if len(req.Header.Get(authHeader)) != 0 { - return r.roundTripper.RoundTrip(req) - } - - token, err := r.tokenSource.Token() - if err != nil { - glog.Errorf("Failed to acquire a token: %v", err) - return nil, fmt.Errorf("acquiring a token for authorization header: %v", err) - } - - // clone the request in order to avoid modifying the headers of the original request - req2 := new(http.Request) - *req2 = *req - req2.Header = make(http.Header, len(req.Header)) - for k, s := range req.Header { - req2.Header[k] = append([]string(nil), s...) - } - - req2.Header.Set(authHeader, fmt.Sprintf("%s %s", tokenType, token.token.AccessToken)) - - return r.roundTripper.RoundTrip(req2) -} - -func (r *azureRoundTripper) WrappedRoundTripper() http.RoundTripper { return r.roundTripper } - -type azureToken struct { - token adal.Token - clientID string - tenantID string - apiserverID string -} - -type tokenSource interface { - Token() (*azureToken, error) -} - -type azureTokenSource struct { - source tokenSource - cache *azureTokenCache - lock sync.Mutex - cfg map[string]string - persister restclient.AuthProviderConfigPersister -} - -func newAzureTokenSource(source tokenSource, cache *azureTokenCache, cfg map[string]string, persister restclient.AuthProviderConfigPersister) tokenSource { - return &azureTokenSource{ - source: source, - cache: cache, - cfg: cfg, - persister: persister, - } -} - -// Token fetches a token from the cache of configuration if present otherwise -// acquires a new token from the configured source. Automatically refreshes -// the token if expired. -func (ts *azureTokenSource) Token() (*azureToken, error) { - ts.lock.Lock() - defer ts.lock.Unlock() - - var err error - token := ts.cache.getToken(azureTokenKey) - if token == nil { - token, err = ts.retrieveTokenFromCfg() - if err != nil { - token, err = ts.source.Token() - if err != nil { - return nil, fmt.Errorf("acquiring a new fresh token: %v", err) - } - } - if !token.token.IsExpired() { - ts.cache.setToken(azureTokenKey, token) - err = ts.storeTokenInCfg(token) - if err != nil { - return nil, fmt.Errorf("storing the token in configuration: %v", err) - } - } - } - if token.token.IsExpired() { - token, err = ts.refreshToken(token) - if err != nil { - return nil, fmt.Errorf("refreshing the expired token: %v", err) - } - ts.cache.setToken(azureTokenKey, token) - err = ts.storeTokenInCfg(token) - if err != nil { - return nil, fmt.Errorf("storing the refreshed token in configuration: %v", err) - } - } - return token, nil -} - -func (ts *azureTokenSource) retrieveTokenFromCfg() (*azureToken, error) { - accessToken := ts.cfg[cfgAccessToken] - if accessToken == "" { - return nil, fmt.Errorf("no access token in cfg: %s", cfgAccessToken) - } - refreshToken := ts.cfg[cfgRefreshToken] - if refreshToken == "" { - return nil, fmt.Errorf("no refresh token in cfg: %s", cfgRefreshToken) - } - clientID := ts.cfg[cfgClientID] - if clientID == "" { - return nil, fmt.Errorf("no client ID in cfg: %s", cfgClientID) - } - tenantID := ts.cfg[cfgTenantID] - if tenantID == "" { - return nil, fmt.Errorf("no tenant ID in cfg: %s", cfgTenantID) - } - apiserverID := ts.cfg[cfgApiserverID] - if apiserverID == "" { - return nil, fmt.Errorf("no apiserver ID in cfg: %s", apiserverID) - } - expiresIn := ts.cfg[cfgExpiresIn] - if expiresIn == "" { - return nil, fmt.Errorf("no expiresIn in cfg: %s", cfgExpiresIn) - } - expiresOn := ts.cfg[cfgExpiresOn] - if expiresOn == "" { - return nil, fmt.Errorf("no expiresOn in cfg: %s", cfgExpiresOn) - } - - return &azureToken{ - token: adal.Token{ - AccessToken: accessToken, - RefreshToken: refreshToken, - ExpiresIn: expiresIn, - ExpiresOn: expiresOn, - NotBefore: expiresOn, - Resource: fmt.Sprintf("spn:%s", apiserverID), - Type: tokenType, - }, - clientID: clientID, - tenantID: tenantID, - apiserverID: apiserverID, - }, nil -} - -func (ts *azureTokenSource) storeTokenInCfg(token *azureToken) error { - newCfg := make(map[string]string) - newCfg[cfgAccessToken] = token.token.AccessToken - newCfg[cfgRefreshToken] = token.token.RefreshToken - newCfg[cfgClientID] = token.clientID - newCfg[cfgTenantID] = token.tenantID - newCfg[cfgApiserverID] = token.apiserverID - newCfg[cfgExpiresIn] = token.token.ExpiresIn - newCfg[cfgExpiresOn] = token.token.ExpiresOn - - err := ts.persister.Persist(newCfg) - if err != nil { - return fmt.Errorf("persisting the configuration: %v", err) - } - ts.cfg = newCfg - return nil -} - -func (ts *azureTokenSource) refreshToken(token *azureToken) (*azureToken, error) { - oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, token.tenantID) - if err != nil { - return nil, fmt.Errorf("building the OAuth configuration for token refresh: %v", err) - } - - callback := func(t adal.Token) error { - return nil - } - spt, err := adal.NewServicePrincipalTokenFromManualToken( - *oauthConfig, - token.clientID, - token.apiserverID, - token.token, - callback) - if err != nil { - return nil, fmt.Errorf("creating new service principal for token refresh: %v", err) - } - - if err := spt.Refresh(); err != nil { - return nil, fmt.Errorf("refreshing token: %v", err) - } - - return &azureToken{ - token: spt.Token(), - clientID: token.clientID, - tenantID: token.tenantID, - apiserverID: token.apiserverID, - }, nil -} - -type azureTokenSourceDeviceCode struct { - environment azure.Environment - clientID string - tenantID string - apiserverID string -} - -func newAzureTokenSourceDeviceCode(environment azure.Environment, clientID string, tenantID string, apiserverID string) (tokenSource, error) { - if clientID == "" { - return nil, errors.New("client-id is empty") - } - if tenantID == "" { - return nil, errors.New("tenant-id is empty") - } - if apiserverID == "" { - return nil, errors.New("apiserver-id is empty") - } - return &azureTokenSourceDeviceCode{ - environment: environment, - clientID: clientID, - tenantID: tenantID, - apiserverID: apiserverID, - }, nil -} - -func (ts *azureTokenSourceDeviceCode) Token() (*azureToken, error) { - oauthConfig, err := adal.NewOAuthConfig(ts.environment.ActiveDirectoryEndpoint, ts.tenantID) - if err != nil { - return nil, fmt.Errorf("building the OAuth configuration for device code authentication: %v", err) - } - client := &autorest.Client{} - deviceCode, err := adal.InitiateDeviceAuth(client, *oauthConfig, ts.clientID, ts.apiserverID) - if err != nil { - return nil, fmt.Errorf("initialing the device code authentication: %v", err) - } - - _, err = fmt.Fprintln(os.Stderr, *deviceCode.Message) - if err != nil { - return nil, fmt.Errorf("prompting the device code message: %v", err) - } - - token, err := adal.WaitForUserCompletion(client, deviceCode) - if err != nil { - return nil, fmt.Errorf("waiting for device code authentication to complete: %v", err) - } - - return &azureToken{ - token: *token, - clientID: ts.clientID, - tenantID: ts.tenantID, - apiserverID: ts.apiserverID, - }, nil -} diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go deleted file mode 100644 index e6d7f0493..000000000 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/openstack/openstack.go +++ /dev/null @@ -1,193 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package openstack - -import ( - "fmt" - "net/http" - "sync" - "time" - - "github.com/golang/glog" - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - - "k8s.io/apimachinery/pkg/util/net" - restclient "k8s.io/client-go/rest" -) - -func init() { - if err := restclient.RegisterAuthProviderPlugin("openstack", newOpenstackAuthProvider); err != nil { - glog.Fatalf("Failed to register openstack auth plugin: %s", err) - } -} - -// DefaultTTLDuration is the time before a token gets expired. -const DefaultTTLDuration = 10 * time.Minute - -// openstackAuthProvider is an authprovider for openstack. this provider reads -// the environment variables to determine the client identity, and generates a -// token which will be inserted into the request header later. -type openstackAuthProvider struct { - ttl time.Duration - tokenGetter TokenGetter -} - -// TokenGetter returns a bearer token that can be inserted into request. -type TokenGetter interface { - Token() (string, error) -} - -type tokenGetter struct { - authOpt *gophercloud.AuthOptions -} - -// Token creates a token by authenticate with keystone. -func (t *tokenGetter) Token() (string, error) { - var options gophercloud.AuthOptions - var err error - if t.authOpt == nil { - // reads the config from the environment - glog.V(4).Info("reading openstack config from the environment variables") - options, err = openstack.AuthOptionsFromEnv() - if err != nil { - return "", fmt.Errorf("failed to read openstack env vars: %s", err) - } - } else { - options = *t.authOpt - } - client, err := openstack.AuthenticatedClient(options) - if err != nil { - return "", fmt.Errorf("authentication failed: %s", err) - } - return client.TokenID, nil -} - -// cachedGetter caches a token until it gets expired, after the expiration, it will -// generate another token and cache it. -type cachedGetter struct { - mutex sync.Mutex - tokenGetter TokenGetter - - token string - born time.Time - ttl time.Duration -} - -// Token returns the current available token, create a new one if expired. -func (c *cachedGetter) Token() (string, error) { - c.mutex.Lock() - defer c.mutex.Unlock() - - var err error - // no token or exceeds the TTL - if c.token == "" || time.Since(c.born) > c.ttl { - c.token, err = c.tokenGetter.Token() - if err != nil { - return "", fmt.Errorf("failed to get token: %s", err) - } - c.born = time.Now() - } - return c.token, nil -} - -// tokenRoundTripper implements the RoundTripper interface: adding the bearer token -// into the request header. -type tokenRoundTripper struct { - http.RoundTripper - - tokenGetter TokenGetter -} - -var _ net.RoundTripperWrapper = &tokenRoundTripper{} - -// RoundTrip adds the bearer token into the request. -func (t *tokenRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - // if the authorization header already present, use it. - if req.Header.Get("Authorization") != "" { - return t.RoundTripper.RoundTrip(req) - } - - token, err := t.tokenGetter.Token() - if err == nil { - req.Header.Set("Authorization", "Bearer "+token) - } else { - glog.V(4).Infof("failed to get token: %s", err) - } - - return t.RoundTripper.RoundTrip(req) -} - -func (t *tokenRoundTripper) WrappedRoundTripper() http.RoundTripper { return t.RoundTripper } - -// newOpenstackAuthProvider creates an auth provider which works with openstack -// environment. -func newOpenstackAuthProvider(_ string, config map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) { - var ttlDuration time.Duration - var err error - - glog.Warningf("WARNING: in-tree openstack auth plugin is now deprecated. please use the \"client-keystone-auth\" kubectl/client-go credential plugin instead") - ttl, found := config["ttl"] - if !found { - ttlDuration = DefaultTTLDuration - // persist to config - config["ttl"] = ttlDuration.String() - if err = persister.Persist(config); err != nil { - return nil, fmt.Errorf("failed to persist config: %s", err) - } - } else { - ttlDuration, err = time.ParseDuration(ttl) - if err != nil { - return nil, fmt.Errorf("failed to parse ttl config: %s", err) - } - } - - authOpt := gophercloud.AuthOptions{ - IdentityEndpoint: config["identityEndpoint"], - Username: config["username"], - Password: config["password"], - DomainName: config["name"], - TenantID: config["tenantId"], - TenantName: config["tenantName"], - } - - getter := tokenGetter{} - // not empty - if (authOpt != gophercloud.AuthOptions{}) { - if len(authOpt.IdentityEndpoint) == 0 { - return nil, fmt.Errorf("empty %q in the config for openstack auth provider", "identityEndpoint") - } - getter.authOpt = &authOpt - } - - return &openstackAuthProvider{ - ttl: ttlDuration, - tokenGetter: &getter, - }, nil -} - -func (oap *openstackAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper { - return &tokenRoundTripper{ - RoundTripper: rt, - tokenGetter: &cachedGetter{ - tokenGetter: oap.tokenGetter, - ttl: oap.ttl, - }, - } -} - -func (oap *openstackAuthProvider) Login() error { return nil } diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins.go deleted file mode 100644 index 42085d7ae..000000000 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/plugins.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package auth - -import ( - // Initialize all known client auth plugins. - _ "k8s.io/client-go/plugin/pkg/client/auth/azure" - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" - _ "k8s.io/client-go/plugin/pkg/client/auth/openstack" -) diff --git a/vendor/k8s.io/client-go/scale/client.go b/vendor/k8s.io/client-go/scale/client.go deleted file mode 100644 index 3a3e8fc9e..000000000 --- a/vendor/k8s.io/client-go/scale/client.go +++ /dev/null @@ -1,221 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scale - -import ( - "fmt" - - autoscaling "k8s.io/api/autoscaling/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/dynamic" - restclient "k8s.io/client-go/rest" -) - -var scaleConverter = NewScaleConverter() -var codecs = serializer.NewCodecFactory(scaleConverter.Scheme()) - -// restInterfaceProvider turns a restclient.Config into a restclient.Interface. -// It's overridable for the purposes of testing. -type restInterfaceProvider func(*restclient.Config) (restclient.Interface, error) - -// scaleClient is an implementation of ScalesGetter -// which makes use of a RESTMapper and a generic REST -// client to support an discoverable resource. -// It behaves somewhat similarly to the dynamic ClientPool, -// but is more specifically scoped to Scale. -type scaleClient struct { - mapper PreferredResourceMapper - - apiPathResolverFunc dynamic.APIPathResolverFunc - scaleKindResolver ScaleKindResolver - clientBase restclient.Interface -} - -// NewForConfig creates a new ScalesGetter which resolves kinds -// to resources using the given RESTMapper, and API paths using -// the given dynamic.APIPathResolverFunc. -func NewForConfig(cfg *restclient.Config, mapper PreferredResourceMapper, resolver dynamic.APIPathResolverFunc, scaleKindResolver ScaleKindResolver) (ScalesGetter, error) { - // so that the RESTClientFor doesn't complain - cfg.GroupVersion = &schema.GroupVersion{} - - cfg.NegotiatedSerializer = serializer.DirectCodecFactory{ - CodecFactory: codecs, - } - if len(cfg.UserAgent) == 0 { - cfg.UserAgent = restclient.DefaultKubernetesUserAgent() - } - - client, err := restclient.RESTClientFor(cfg) - if err != nil { - return nil, err - } - - return New(client, mapper, resolver, scaleKindResolver), nil -} - -// New creates a new ScalesGetter using the given client to make requests. -// The GroupVersion on the client is ignored. -func New(baseClient restclient.Interface, mapper PreferredResourceMapper, resolver dynamic.APIPathResolverFunc, scaleKindResolver ScaleKindResolver) ScalesGetter { - return &scaleClient{ - mapper: mapper, - - apiPathResolverFunc: resolver, - scaleKindResolver: scaleKindResolver, - clientBase: baseClient, - } -} - -// pathAndVersionFor returns the appropriate base path and the associated full GroupVersionResource -// for the given GroupResource -func (c *scaleClient) pathAndVersionFor(resource schema.GroupResource) (string, schema.GroupVersionResource, error) { - gvr, err := c.mapper.ResourceFor(resource.WithVersion("")) - if err != nil { - return "", gvr, fmt.Errorf("unable to get full preferred group-version-resource for %s: %v", resource.String(), err) - } - - groupVer := gvr.GroupVersion() - - // we need to set the API path based on GroupVersion (defaulting to the legacy path if none is set) - // TODO: we "cheat" here since the API path really only depends on group ATM, but this should - // *probably* take GroupVersionResource and not GroupVersionKind. - apiPath := c.apiPathResolverFunc(groupVer.WithKind("")) - if apiPath == "" { - apiPath = "/api" - } - - path := restclient.DefaultVersionedAPIPath(apiPath, groupVer) - - return path, gvr, nil -} - -// namespacedScaleClient is an ScaleInterface for fetching -// Scales in a given namespace. -type namespacedScaleClient struct { - client *scaleClient - namespace string -} - -func (c *scaleClient) Scales(namespace string) ScaleInterface { - return &namespacedScaleClient{ - client: c, - namespace: namespace, - } -} - -func (c *namespacedScaleClient) Get(resource schema.GroupResource, name string) (*autoscaling.Scale, error) { - // Currently, a /scale endpoint can return different scale types. - // Until we have support for the alternative API representations proposal, - // we need to deal with accepting different API versions. - // In practice, this is autoscaling/v1.Scale and extensions/v1beta1.Scale - - path, gvr, err := c.client.pathAndVersionFor(resource) - if err != nil { - return nil, fmt.Errorf("unable to get client for %s: %v", resource.String(), err) - } - - result := c.client.clientBase.Get(). - AbsPath(path). - Namespace(c.namespace). - Resource(gvr.Resource). - Name(name). - SubResource("scale"). - Do() - if err := result.Error(); err != nil { - return nil, err - } - - scaleBytes, err := result.Raw() - if err != nil { - return nil, err - } - decoder := scaleConverter.codecs.UniversalDecoder(scaleConverter.ScaleVersions()...) - rawScaleObj, err := runtime.Decode(decoder, scaleBytes) - if err != nil { - return nil, err - } - - // convert whatever this is to autoscaling/v1.Scale - scaleObj, err := scaleConverter.ConvertToVersion(rawScaleObj, autoscaling.SchemeGroupVersion) - if err != nil { - return nil, fmt.Errorf("received an object from a /scale endpoint which was not convertible to autoscaling Scale: %v", err) - } - - return scaleObj.(*autoscaling.Scale), nil -} - -func (c *namespacedScaleClient) Update(resource schema.GroupResource, scale *autoscaling.Scale) (*autoscaling.Scale, error) { - path, gvr, err := c.client.pathAndVersionFor(resource) - if err != nil { - return nil, fmt.Errorf("unable to get client for %s: %v", resource.String(), err) - } - - // Currently, a /scale endpoint can receive and return different scale types. - // Until we have support for the alternative API representations proposal, - // we need to deal with sending and accepting different API versions. - - // figure out what scale we actually need here - desiredGVK, err := c.client.scaleKindResolver.ScaleForResource(gvr) - if err != nil { - return nil, fmt.Errorf("could not find proper group-version for scale subresource of %s: %v", gvr.String(), err) - } - - // convert this to whatever this endpoint wants - scaleUpdate, err := scaleConverter.ConvertToVersion(scale, desiredGVK.GroupVersion()) - if err != nil { - return nil, fmt.Errorf("could not convert scale update to external Scale: %v", err) - } - encoder := scaleConverter.codecs.LegacyCodec(desiredGVK.GroupVersion()) - scaleUpdateBytes, err := runtime.Encode(encoder, scaleUpdate) - if err != nil { - return nil, fmt.Errorf("could not encode scale update to external Scale: %v", err) - } - - result := c.client.clientBase.Put(). - AbsPath(path). - Namespace(c.namespace). - Resource(gvr.Resource). - Name(scale.Name). - SubResource("scale"). - Body(scaleUpdateBytes). - Do() - if err := result.Error(); err != nil { - // propagate "raw" error from the API - // this allows callers to interpret underlying Reason field - // for example: errors.IsConflict(err) - return nil, err - } - - scaleBytes, err := result.Raw() - if err != nil { - return nil, err - } - decoder := scaleConverter.codecs.UniversalDecoder(scaleConverter.ScaleVersions()...) - rawScaleObj, err := runtime.Decode(decoder, scaleBytes) - if err != nil { - return nil, err - } - - // convert whatever this is back to autoscaling/v1.Scale - scaleObj, err := scaleConverter.ConvertToVersion(rawScaleObj, autoscaling.SchemeGroupVersion) - if err != nil { - return nil, fmt.Errorf("received an object from a /scale endpoint which was not convertible to autoscaling Scale: %v", err) - } - - return scaleObj.(*autoscaling.Scale), err -} diff --git a/vendor/k8s.io/client-go/scale/doc.go b/vendor/k8s.io/client-go/scale/doc.go deleted file mode 100644 index 59fd39146..000000000 --- a/vendor/k8s.io/client-go/scale/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package scale provides a polymorphic scale client capable of fetching -// and updating Scale for any resource which implements the `scale` subresource, -// as long as that subresource operates on a version of scale convertable to -// autoscaling.Scale. -package scale diff --git a/vendor/k8s.io/client-go/scale/fake/client.go b/vendor/k8s.io/client-go/scale/fake/client.go deleted file mode 100644 index 1736680f1..000000000 --- a/vendor/k8s.io/client-go/scale/fake/client.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package fake provides a fake client interface to arbitrary Kubernetes -// APIs that exposes common high level operations and exposes common -// metadata. -package fake - -import ( - autoscalingapi "k8s.io/api/autoscaling/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/scale" - "k8s.io/client-go/testing" -) - -// FakeScaleClient provides a fake implementation of scale.ScalesGetter. -type FakeScaleClient struct { - testing.Fake -} - -func (f *FakeScaleClient) Scales(namespace string) scale.ScaleInterface { - return &fakeNamespacedScaleClient{ - namespace: namespace, - fake: &f.Fake, - } -} - -type fakeNamespacedScaleClient struct { - namespace string - fake *testing.Fake -} - -func (f *fakeNamespacedScaleClient) Get(resource schema.GroupResource, name string) (*autoscalingapi.Scale, error) { - obj, err := f.fake. - Invokes(testing.NewGetSubresourceAction(resource.WithVersion(""), f.namespace, "scale", name), &autoscalingapi.Scale{}) - - if err != nil { - return nil, err - } - - return obj.(*autoscalingapi.Scale), err -} - -func (f *fakeNamespacedScaleClient) Update(resource schema.GroupResource, scale *autoscalingapi.Scale) (*autoscalingapi.Scale, error) { - obj, err := f.fake. - Invokes(testing.NewUpdateSubresourceAction(resource.WithVersion(""), f.namespace, "scale", scale), &autoscalingapi.Scale{}) - - if err != nil { - return nil, err - } - - return obj.(*autoscalingapi.Scale), err - -} diff --git a/vendor/k8s.io/client-go/scale/interfaces.go b/vendor/k8s.io/client-go/scale/interfaces.go deleted file mode 100644 index 4668c7417..000000000 --- a/vendor/k8s.io/client-go/scale/interfaces.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scale - -import ( - autoscalingapi "k8s.io/api/autoscaling/v1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// ScalesGetter can produce a ScaleInterface -// for a particular namespace. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface -} - -// ScaleInterface can fetch and update scales for -// resources in a particular namespace which implement -// the scale subresource. -type ScaleInterface interface { - // Get fetches the scale of the given scalable resource. - Get(resource schema.GroupResource, name string) (*autoscalingapi.Scale, error) - - // Update updates the scale of the the given scalable resource. - Update(resource schema.GroupResource, scale *autoscalingapi.Scale) (*autoscalingapi.Scale, error) -} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsint/doc.go b/vendor/k8s.io/client-go/scale/scheme/appsint/doc.go deleted file mode 100644 index 16f29e2af..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/appsint/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package appsint contains the necessary scaffolding of the -// internal version of extensions as required by conversion logic. -// It doesn't have any of its own types -- it's just necessary to -// get the expected behavior out of runtime.Scheme.ConvertToVersion -// and associated methods. -package appsint diff --git a/vendor/k8s.io/client-go/scale/scheme/appsint/register.go b/vendor/k8s.io/client-go/scale/scheme/appsint/register.go deleted file mode 100644 index bbeaedac5..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/appsint/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package appsint - -import ( - appsv1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - scalescheme "k8s.io/client-go/scale/scheme" -) - -// GroupName is the group name use in this package -const GroupName = appsv1beta2.GroupName - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &scalescheme.Scale{}, - ) - return nil -} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go deleted file mode 100644 index af062b3c6..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package appsv1beta1 - -import ( - "fmt" - - v1beta1 "k8s.io/api/apps/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - scheme "k8s.io/client-go/scale/scheme" -) - -// addConversions registers conversions between the internal version -// of Scale and supported external versions of Scale. -func addConversionFuncs(scheme *runtime.Scheme) error { - err := scheme.AddConversionFuncs( - Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus, - Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus, - ) - if err != nil { - return err - } - - return nil -} -func Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.Selector = nil - out.TargetSelector = "" - if in.Selector != nil { - if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { - out.Selector = in.Selector.MatchLabels - } - - selector, err := metav1.LabelSelectorAsSelector(in.Selector) - if err != nil { - return fmt.Errorf("invalid label selector: %v", err) - } - out.TargetSelector = selector.String() - } - - return nil -} - -func Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - - // Normally when 2 fields map to the same internal value we favor the old field, since - // old clients can't be expected to know about new fields but clients that know about the - // new field can be expected to know about the old field (though that's not quite true, due - // to kubectl apply). However, these fields are readonly, so any non-nil value should work. - if in.TargetSelector != "" { - labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) - if err != nil { - out.Selector = nil - return fmt.Errorf("failed to parse target selector: %v", err) - } - out.Selector = labelSelector - } else if in.Selector != nil { - out.Selector = new(metav1.LabelSelector) - selector := make(map[string]string) - for key, val := range in.Selector { - selector[key] = val - } - out.Selector.MatchLabels = selector - } else { - out.Selector = nil - } - - return nil -} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go deleted file mode 100644 index 830619b44..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme -// +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta1 - -package appsv1beta1 // import "k8s.io/client-go/scale/scheme/appsv1beta1" diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go deleted file mode 100644 index a684f2d53..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package appsv1beta1 - -import ( - appsapiv1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = appsapiv1beta1.GroupName - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - localSchemeBuilder = &appsapiv1beta1.SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addConversionFuncs) -} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go deleted file mode 100644 index 16d53c697..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,143 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package appsv1beta1 - -import ( - v1beta1 "k8s.io/api/apps/v1beta1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - scheme "k8s.io/client-go/scale/scheme" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1beta1.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_Scale_To_scheme_Scale(a.(*v1beta1.Scale), b.(*scheme.Scale), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1beta1.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_Scale_To_v1beta1_Scale(a.(*scheme.Scale), b.(*v1beta1.Scale), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(a.(*v1beta1.ScaleSpec), b.(*scheme.ScaleSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1beta1.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1beta1.ScaleSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta1.ScaleStatus), b.(*scheme.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta1.ScaleStatus), b.(*scheme.ScaleStatus), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta1_Scale_To_scheme_Scale is an autogenerated conversion function. -func Convert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { - return autoConvert_v1beta1_Scale_To_scheme_Scale(in, out, s) -} - -func autoConvert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_scheme_Scale_To_v1beta1_Scale is an autogenerated conversion function. -func Convert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { - return autoConvert_scheme_Scale_To_v1beta1_Scale(in, out, s) -} - -func autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -// Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. -func Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { - return autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in, out, s) -} - -func autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -// Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function. -func Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { - return autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) -} - -func autoConvert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) - // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) - return nil -} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go deleted file mode 100644 index f07de6bda..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package appsv1beta2 - -import ( - "fmt" - - v1beta2 "k8s.io/api/apps/v1beta2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - scheme "k8s.io/client-go/scale/scheme" -) - -// addConversions registers conversions between the internal version -// of Scale and supported external versions of Scale. -func addConversionFuncs(scheme *runtime.Scheme) error { - err := scheme.AddConversionFuncs( - Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus, - Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus, - ) - if err != nil { - return err - } - - return nil -} -func Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(in *scheme.ScaleStatus, out *v1beta2.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.Selector = nil - out.TargetSelector = "" - if in.Selector != nil { - if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { - out.Selector = in.Selector.MatchLabels - } - - selector, err := metav1.LabelSelectorAsSelector(in.Selector) - if err != nil { - return fmt.Errorf("invalid label selector: %v", err) - } - out.TargetSelector = selector.String() - } - - return nil -} - -func Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(in *v1beta2.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - - // Normally when 2 fields map to the same internal value we favor the old field, since - // old clients can't be expected to know about new fields but clients that know about the - // new field can be expected to know about the old field (though that's not quite true, due - // to kubectl apply). However, these fields are readonly, so any non-nil value should work. - if in.TargetSelector != "" { - labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) - if err != nil { - out.Selector = nil - return fmt.Errorf("failed to parse target selector: %v", err) - } - out.Selector = labelSelector - } else if in.Selector != nil { - out.Selector = new(metav1.LabelSelector) - selector := make(map[string]string) - for key, val := range in.Selector { - selector[key] = val - } - out.Selector.MatchLabels = selector - } else { - out.Selector = nil - } - - return nil -} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go deleted file mode 100644 index c21a56d56..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme -// +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta2 - -package appsv1beta2 // import "k8s.io/client-go/scale/scheme/appsv1beta2" diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go deleted file mode 100644 index 88de08932..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package appsv1beta2 - -import ( - appsapiv1beta2 "k8s.io/api/apps/v1beta2" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = appsapiv1beta2.GroupName - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - localSchemeBuilder = &appsapiv1beta2.SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addConversionFuncs) -} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go deleted file mode 100644 index 1901be8cb..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go +++ /dev/null @@ -1,143 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package appsv1beta2 - -import ( - v1beta2 "k8s.io/api/apps/v1beta2" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - scheme "k8s.io/client-go/scale/scheme" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1beta2.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_Scale_To_scheme_Scale(a.(*v1beta2.Scale), b.(*scheme.Scale), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1beta2.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_Scale_To_v1beta2_Scale(a.(*scheme.Scale), b.(*v1beta2.Scale), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(a.(*v1beta2.ScaleSpec), b.(*scheme.ScaleSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1beta2.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1beta2.ScaleSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta2.ScaleStatus), b.(*scheme.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta2.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta2.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta2.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta2.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta2.ScaleStatus), b.(*scheme.ScaleStatus), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta2_Scale_To_scheme_Scale(in *v1beta2.Scale, out *scheme.Scale, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_Scale_To_scheme_Scale is an autogenerated conversion function. -func Convert_v1beta2_Scale_To_scheme_Scale(in *v1beta2.Scale, out *scheme.Scale, s conversion.Scope) error { - return autoConvert_v1beta2_Scale_To_scheme_Scale(in, out, s) -} - -func autoConvert_scheme_Scale_To_v1beta2_Scale(in *scheme.Scale, out *v1beta2.Scale, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_scheme_Scale_To_v1beta2_Scale is an autogenerated conversion function. -func Convert_scheme_Scale_To_v1beta2_Scale(in *scheme.Scale, out *v1beta2.Scale, s conversion.Scope) error { - return autoConvert_scheme_Scale_To_v1beta2_Scale(in, out, s) -} - -func autoConvert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(in *v1beta2.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -// Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. -func Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(in *v1beta2.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { - return autoConvert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(in, out, s) -} - -func autoConvert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(in *scheme.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -// Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec is an autogenerated conversion function. -func Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(in *scheme.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error { - return autoConvert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(in, out, s) -} - -func autoConvert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(in *v1beta2.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) - // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(in *scheme.ScaleStatus, out *v1beta2.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) - return nil -} diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go deleted file mode 100644 index a775bcc22..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscalingv1 - -import ( - "fmt" - - v1 "k8s.io/api/autoscaling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - scheme "k8s.io/client-go/scale/scheme" -) - -// addConversions registers conversions between the internal version -// of Scale and supported external versions of Scale. -func addConversionFuncs(scheme *runtime.Scheme) error { - err := scheme.AddConversionFuncs( - Convert_scheme_ScaleStatus_To_v1_ScaleStatus, - Convert_v1_ScaleStatus_To_scheme_ScaleStatus, - ) - if err != nil { - return err - } - - return nil -} - -func Convert_scheme_ScaleStatus_To_v1_ScaleStatus(in *scheme.ScaleStatus, out *v1.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.Selector = "" - if in.Selector != nil { - selector, err := metav1.LabelSelectorAsSelector(in.Selector) - if err != nil { - return fmt.Errorf("invalid label selector: %v", err) - } - out.Selector = selector.String() - } - - return nil -} - -func Convert_v1_ScaleStatus_To_scheme_ScaleStatus(in *v1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - if in.Selector != "" { - labelSelector, err := metav1.ParseToLabelSelector(in.Selector) - if err != nil { - out.Selector = nil - return fmt.Errorf("failed to parse target selector: %v", err) - } - out.Selector = labelSelector - } - - return nil -} diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go deleted file mode 100644 index 03684dd90..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme -// +k8s:conversion-gen-external-types=k8s.io/api/autoscaling/v1 - -package autoscalingv1 // import "k8s.io/client-go/scale/scheme/autoscalingv1" diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go deleted file mode 100644 index b15701c4f..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package autoscalingv1 - -import ( - autoscalingapiv1 "k8s.io/api/autoscaling/v1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = autoscalingapiv1.GroupName - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - localSchemeBuilder = &autoscalingapiv1.SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addConversionFuncs) -} diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go deleted file mode 100644 index 203f8b323..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go +++ /dev/null @@ -1,142 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package autoscalingv1 - -import ( - v1 "k8s.io/api/autoscaling/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - scheme "k8s.io/client-go/scale/scheme" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Scale_To_scheme_Scale(a.(*v1.Scale), b.(*scheme.Scale), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_Scale_To_v1_Scale(a.(*scheme.Scale), b.(*v1.Scale), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ScaleSpec_To_scheme_ScaleSpec(a.(*v1.ScaleSpec), b.(*scheme.ScaleSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleSpec_To_v1_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1.ScaleSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1.ScaleStatus), b.(*scheme.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.ScaleStatus)(nil), (*v1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleStatus_To_v1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleStatus_To_v1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1.ScaleStatus), b.(*scheme.ScaleStatus), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1_Scale_To_scheme_Scale(in *v1.Scale, out *scheme.Scale, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Scale_To_scheme_Scale is an autogenerated conversion function. -func Convert_v1_Scale_To_scheme_Scale(in *v1.Scale, out *scheme.Scale, s conversion.Scope) error { - return autoConvert_v1_Scale_To_scheme_Scale(in, out, s) -} - -func autoConvert_scheme_Scale_To_v1_Scale(in *scheme.Scale, out *v1.Scale, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_scheme_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_scheme_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_scheme_Scale_To_v1_Scale is an autogenerated conversion function. -func Convert_scheme_Scale_To_v1_Scale(in *scheme.Scale, out *v1.Scale, s conversion.Scope) error { - return autoConvert_scheme_Scale_To_v1_Scale(in, out, s) -} - -func autoConvert_v1_ScaleSpec_To_scheme_ScaleSpec(in *v1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -// Convert_v1_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. -func Convert_v1_ScaleSpec_To_scheme_ScaleSpec(in *v1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { - return autoConvert_v1_ScaleSpec_To_scheme_ScaleSpec(in, out, s) -} - -func autoConvert_scheme_ScaleSpec_To_v1_ScaleSpec(in *scheme.ScaleSpec, out *v1.ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -// Convert_scheme_ScaleSpec_To_v1_ScaleSpec is an autogenerated conversion function. -func Convert_scheme_ScaleSpec_To_v1_ScaleSpec(in *scheme.ScaleSpec, out *v1.ScaleSpec, s conversion.Scope) error { - return autoConvert_scheme_ScaleSpec_To_v1_ScaleSpec(in, out, s) -} - -func autoConvert_v1_ScaleStatus_To_scheme_ScaleStatus(in *v1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) - return nil -} - -func autoConvert_scheme_ScaleStatus_To_v1_ScaleStatus(in *scheme.ScaleStatus, out *v1.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs string) - return nil -} diff --git a/vendor/k8s.io/client-go/scale/scheme/doc.go b/vendor/k8s.io/client-go/scale/scheme/doc.go deleted file mode 100644 index 0203d6d5a..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package - -// Package scheme contains a runtime.Scheme to be used for serializing -// and deserializing different versions of Scale, and for converting -// in between them. -package scheme diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go b/vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go deleted file mode 100644 index 9aaac6086..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package extensionsint contains the necessary scaffolding of the -// internal version of extensions as required by conversion logic. -// It doesn't have any of its own types -- it's just necessary to -// get the expected behavior out of runtime.Scheme.ConvertToVersion -// and associated methods. -package extensionsint diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go b/vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go deleted file mode 100644 index 5a96ac561..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package extensionsint - -import ( - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - scalescheme "k8s.io/client-go/scale/scheme" -) - -// GroupName is the group name use in this package -const GroupName = extensionsv1beta1.GroupName - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &scalescheme.Scale{}, - ) - return nil -} diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go deleted file mode 100644 index 1b6b9e610..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package extensionsv1beta1 - -import ( - "fmt" - - v1beta1 "k8s.io/api/extensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - scheme "k8s.io/client-go/scale/scheme" -) - -// addConversions registers conversions between the internal version -// of Scale and supported external versions of Scale. -func addConversionFuncs(scheme *runtime.Scheme) error { - err := scheme.AddConversionFuncs( - Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus, - Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus, - ) - if err != nil { - return err - } - - return nil -} -func Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.Selector = nil - out.TargetSelector = "" - if in.Selector != nil { - if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { - out.Selector = in.Selector.MatchLabels - } - - selector, err := metav1.LabelSelectorAsSelector(in.Selector) - if err != nil { - return fmt.Errorf("invalid label selector: %v", err) - } - out.TargetSelector = selector.String() - } - - return nil -} - -func Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - - // Normally when 2 fields map to the same internal value we favor the old field, since - // old clients can't be expected to know about new fields but clients that know about the - // new field can be expected to know about the old field (though that's not quite true, due - // to kubectl apply). However, these fields are readonly, so any non-nil value should work. - if in.TargetSelector != "" { - labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) - if err != nil { - out.Selector = nil - return fmt.Errorf("failed to parse target selector: %v", err) - } - out.Selector = labelSelector - } else if in.Selector != nil { - out.Selector = new(metav1.LabelSelector) - selector := make(map[string]string) - for key, val := range in.Selector { - selector[key] = val - } - out.Selector.MatchLabels = selector - } else { - out.Selector = nil - } - - return nil -} diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go deleted file mode 100644 index 1e719884f..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme -// +k8s:conversion-gen-external-types=k8s.io/api/extensions/v1beta1 - -package extensionsv1beta1 // import "k8s.io/client-go/scale/scheme/extensionsv1beta1" diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go deleted file mode 100644 index aed1174e0..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package extensionsv1beta1 - -import ( - extensionsapiv1beta1 "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = extensionsapiv1beta1.GroupName - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - localSchemeBuilder = &extensionsapiv1beta1.SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addConversionFuncs) -} diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go deleted file mode 100644 index 99aef8653..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go +++ /dev/null @@ -1,143 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package extensionsv1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - scheme "k8s.io/client-go/scale/scheme" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1beta1.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_Scale_To_scheme_Scale(a.(*v1beta1.Scale), b.(*scheme.Scale), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1beta1.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_Scale_To_v1beta1_Scale(a.(*scheme.Scale), b.(*v1beta1.Scale), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(a.(*v1beta1.ScaleSpec), b.(*scheme.ScaleSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1beta1.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1beta1.ScaleSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta1.ScaleStatus), b.(*scheme.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta1.ScaleStatus), b.(*scheme.ScaleStatus), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta1_Scale_To_scheme_Scale is an autogenerated conversion function. -func Convert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { - return autoConvert_v1beta1_Scale_To_scheme_Scale(in, out, s) -} - -func autoConvert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_scheme_Scale_To_v1beta1_Scale is an autogenerated conversion function. -func Convert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { - return autoConvert_scheme_Scale_To_v1beta1_Scale(in, out, s) -} - -func autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -// Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. -func Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { - return autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in, out, s) -} - -func autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { - out.Replicas = in.Replicas - return nil -} - -// Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function. -func Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { - return autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) -} - -func autoConvert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) - // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) - return nil -} diff --git a/vendor/k8s.io/client-go/scale/scheme/register.go b/vendor/k8s.io/client-go/scale/scheme/register.go deleted file mode 100644 index 7e6decfff..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/register.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheme - -import ( - autoscalingv1 "k8s.io/api/autoscaling/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = autoscalingv1.GroupName - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Scale{}, - ) - return nil -} diff --git a/vendor/k8s.io/client-go/scale/scheme/types.go b/vendor/k8s.io/client-go/scale/scheme/types.go deleted file mode 100644 index 13aec2b3c..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/types.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheme - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// This file contains our own "internal" version of scale that we use for conversions, -// since we can't use the main Kubernetes internal versions. - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Scale represents a scaling request for a resource. -type Scale struct { - metav1.TypeMeta - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta - - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - Spec ScaleSpec - - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. - // +optional - Status ScaleStatus -} - -// ScaleSpec describes the attributes of a scale subresource. -type ScaleSpec struct { - // desired number of instances for the scaled object. - // +optional - Replicas int32 -} - -// ScaleStatus represents the current status of a scale subresource. -type ScaleStatus struct { - // actual number of observed instances of the scaled object. - Replicas int32 - - // label query over pods that should match the replicas count. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector -} diff --git a/vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go deleted file mode 100644 index 3db708154..000000000 --- a/vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package scheme - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Scale) DeepCopyInto(out *Scale) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. -func (in *Scale) DeepCopy() *Scale { - if in == nil { - return nil - } - out := new(Scale) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Scale) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. -func (in *ScaleSpec) DeepCopy() *ScaleSpec { - if in == nil { - return nil - } - out := new(ScaleSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. -func (in *ScaleStatus) DeepCopy() *ScaleStatus { - if in == nil { - return nil - } - out := new(ScaleStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/client-go/scale/util.go b/vendor/k8s.io/client-go/scale/util.go deleted file mode 100644 index 2f43a7a79..000000000 --- a/vendor/k8s.io/client-go/scale/util.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scale - -import ( - "fmt" - "strings" - "sync" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/discovery" - scalescheme "k8s.io/client-go/scale/scheme" - scaleappsint "k8s.io/client-go/scale/scheme/appsint" - scaleappsv1beta1 "k8s.io/client-go/scale/scheme/appsv1beta1" - scaleappsv1beta2 "k8s.io/client-go/scale/scheme/appsv1beta2" - scaleautoscaling "k8s.io/client-go/scale/scheme/autoscalingv1" - scaleextint "k8s.io/client-go/scale/scheme/extensionsint" - scaleext "k8s.io/client-go/scale/scheme/extensionsv1beta1" -) - -// PreferredResourceMapper determines the preferred version of a resource to scale -type PreferredResourceMapper interface { - // ResourceFor takes a partial resource and returns the preferred resource. - ResourceFor(resource schema.GroupVersionResource) (preferredResource schema.GroupVersionResource, err error) -} - -// Ensure a RESTMapper satisfies the PreferredResourceMapper interface -var _ PreferredResourceMapper = meta.RESTMapper(nil) - -// ScaleKindResolver knows about the relationship between -// resources and the GroupVersionKind of their scale subresources. -type ScaleKindResolver interface { - // ScaleForResource returns the GroupVersionKind of the - // scale subresource for the given GroupVersionResource. - ScaleForResource(resource schema.GroupVersionResource) (scaleVersion schema.GroupVersionKind, err error) -} - -// discoveryScaleResolver is a ScaleKindResolver that uses -// a DiscoveryInterface to associate resources with their -// scale-kinds -type discoveryScaleResolver struct { - discoveryClient discovery.ServerResourcesInterface -} - -func (r *discoveryScaleResolver) ScaleForResource(inputRes schema.GroupVersionResource) (scaleVersion schema.GroupVersionKind, err error) { - groupVerResources, err := r.discoveryClient.ServerResourcesForGroupVersion(inputRes.GroupVersion().String()) - if err != nil { - return schema.GroupVersionKind{}, fmt.Errorf("unable to fetch discovery information for %s: %v", inputRes.String(), err) - } - - for _, resource := range groupVerResources.APIResources { - resourceParts := strings.SplitN(resource.Name, "/", 2) - if len(resourceParts) != 2 || resourceParts[0] != inputRes.Resource || resourceParts[1] != "scale" { - // skip non-scale resources, or scales for resources that we're not looking for - continue - } - - scaleGV := inputRes.GroupVersion() - if resource.Group != "" && resource.Version != "" { - scaleGV = schema.GroupVersion{ - Group: resource.Group, - Version: resource.Version, - } - } - - return scaleGV.WithKind(resource.Kind), nil - } - - return schema.GroupVersionKind{}, fmt.Errorf("could not find scale subresource for %s in discovery information", inputRes.String()) -} - -// cachedScaleKindResolver is a ScaleKindResolver that caches results -// from another ScaleKindResolver, re-fetching on cache misses. -type cachedScaleKindResolver struct { - base ScaleKindResolver - - cache map[schema.GroupVersionResource]schema.GroupVersionKind - mu sync.RWMutex -} - -func (r *cachedScaleKindResolver) ScaleForResource(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - r.mu.RLock() - gvk, isCached := r.cache[resource] - r.mu.RUnlock() - if isCached { - return gvk, nil - } - - // we could have multiple fetches of the same resources, but that's probably - // better than limiting to only one reader at once (mu.Mutex), - // or blocking checks for other resources while we fetch - // (mu.Lock before fetch). - gvk, err := r.base.ScaleForResource(resource) - if err != nil { - return schema.GroupVersionKind{}, err - } - - r.mu.Lock() - defer r.mu.Unlock() - r.cache[resource] = gvk - - return gvk, nil -} - -// NewDiscoveryScaleKindResolver creates a new ScaleKindResolver which uses information from the given -// disovery client to resolve the correct Scale GroupVersionKind for different resources. -func NewDiscoveryScaleKindResolver(client discovery.ServerResourcesInterface) ScaleKindResolver { - base := &discoveryScaleResolver{ - discoveryClient: client, - } - - return &cachedScaleKindResolver{ - base: base, - cache: make(map[schema.GroupVersionResource]schema.GroupVersionKind), - } -} - -// ScaleConverter knows how to convert between external scale versions. -type ScaleConverter struct { - scheme *runtime.Scheme - codecs serializer.CodecFactory - internalVersioner runtime.GroupVersioner -} - -// NewScaleConverter creates a new ScaleConverter for converting between -// Scales in autoscaling/v1 and extensions/v1beta1. -func NewScaleConverter() *ScaleConverter { - scheme := runtime.NewScheme() - utilruntime.Must(scaleautoscaling.AddToScheme(scheme)) - utilruntime.Must(scalescheme.AddToScheme(scheme)) - utilruntime.Must(scaleext.AddToScheme(scheme)) - utilruntime.Must(scaleextint.AddToScheme(scheme)) - utilruntime.Must(scaleappsint.AddToScheme(scheme)) - utilruntime.Must(scaleappsv1beta1.AddToScheme(scheme)) - utilruntime.Must(scaleappsv1beta2.AddToScheme(scheme)) - - return &ScaleConverter{ - scheme: scheme, - codecs: serializer.NewCodecFactory(scheme), - internalVersioner: runtime.NewMultiGroupVersioner( - scalescheme.SchemeGroupVersion, - schema.GroupKind{Group: scaleext.GroupName, Kind: "Scale"}, - schema.GroupKind{Group: scaleautoscaling.GroupName, Kind: "Scale"}, - schema.GroupKind{Group: scaleappsv1beta1.GroupName, Kind: "Scale"}, - schema.GroupKind{Group: scaleappsv1beta2.GroupName, Kind: "Scale"}, - ), - } -} - -// Scheme returns the scheme used by this scale converter. -func (c *ScaleConverter) Scheme() *runtime.Scheme { - return c.scheme -} - -func (c *ScaleConverter) Codecs() serializer.CodecFactory { - return c.codecs -} - -func (c *ScaleConverter) ScaleVersions() []schema.GroupVersion { - return []schema.GroupVersion{ - scaleautoscaling.SchemeGroupVersion, - scalescheme.SchemeGroupVersion, - scaleext.SchemeGroupVersion, - scaleextint.SchemeGroupVersion, - scaleappsint.SchemeGroupVersion, - scaleappsv1beta1.SchemeGroupVersion, - scaleappsv1beta2.SchemeGroupVersion, - } -} - -// ConvertToVersion converts the given *external* input object to the given output *external* output group-version. -func (c *ScaleConverter) ConvertToVersion(in runtime.Object, outVersion schema.GroupVersion) (runtime.Object, error) { - scaleInt, err := c.scheme.ConvertToVersion(in, c.internalVersioner) - if err != nil { - return nil, err - } - - return c.scheme.ConvertToVersion(scaleInt, outVersion) -} From ea4fd72278d4ecad0d3ddfee8ee0f55513b56b4b Mon Sep 17 00:00:00 2001 From: Diana Arroyo Date: Sun, 27 Jun 2021 00:28:24 -0500 Subject: [PATCH 2/2] Partial 3 cleanup of vendor folder 2nd set (mostly .go files). Signed-off-by: Diana Arroyo --- .../go-bindata-assetfs/main.go | 97 -- .../evanphx/json-patch/cmd/json-patch/main.go | 56 -- .../googleapis/gnostic/compiler/main.go | 16 - .../stretchr/testify/_codegen/main.go | 316 ------ .../grpc/grpclb/grpc_lb_v1/doc.go | 21 - .../grpclb/grpc_lb_v1/service/service.pb.go | 154 --- .../grpclb/grpc_lb_v1/service/service.proto | 26 - .../grpc/health/grpc_health_v1/health.proto | 34 - .../grpc/interop/grpc_testing/test.pb.go | 905 ------------------ .../grpc/interop/grpc_testing/test.proto | 174 ---- .../grpc_reflection_v1alpha/reflection.proto | 136 --- .../grpc/reflection/grpc_testing/proto2.pb.go | 77 -- .../grpc/reflection/grpc_testing/proto2.proto | 22 - .../reflection/grpc_testing/proto2_ext.pb.go | 82 -- .../reflection/grpc_testing/proto2_ext.proto | 30 - .../reflection/grpc_testing/proto2_ext2.pb.go | 71 -- .../reflection/grpc_testing/proto2_ext2.proto | 28 - .../grpc/reflection/grpc_testing/test.pb.go | 247 ----- .../grpc/reflection/grpc_testing/test.proto | 35 - .../reflection/grpc_testingv3/testv3.proto | 21 - .../grpc/stats/grpc_testing/test.pb.go | 369 ------- .../grpc/stats/grpc_testing/test.proto | 43 - .../grpc/test/codec_perf/perf.pb.go | 62 -- .../grpc/test/codec_perf/perf.proto | 25 - vendor/google.golang.org/grpc/testdata/ca.pem | 15 - .../grpc/testdata/server1.key | 16 - .../grpc/testdata/server1.pem | 16 - .../grpc/testdata/testdata.go | 63 -- vendor/k8s.io/apiextensions-apiserver/main.go | 40 - .../test.0/integration/fixtures/resources.go | 403 -------- .../test.0/integration/fixtures/server.go | 108 --- .../test.0/integration/helpers.go | 94 -- .../k8s.io/apiserver/plugin/pkg/audit/doc.go | 18 - .../apiserver/plugin/pkg/audit/fake/doc.go | 18 - .../apiserver/plugin/pkg/audit/fake/fake.go | 50 - .../tools/bootstrap/token/api/doc.go | 20 - .../tools/bootstrap/token/api/types.go | 112 --- .../tools/bootstrap/token/util/helpers.go | 133 --- .../tools/leaderelection/healthzadaptor.go | 69 -- .../tools/leaderelection/leaderelection.go | 336 ------- .../resourcelock/configmaplock.go | 109 --- .../resourcelock/endpointslock.go | 104 -- .../leaderelection/resourcelock/interface.go | 102 -- .../k8s.io/client-go/tools/portforward/doc.go | 19 - .../tools/portforward/portforward.go | 342 ------- vendor/k8s.io/client-go/tools/record/doc.go | 18 - vendor/k8s.io/client-go/tools/record/event.go | 322 ------- .../client-go/tools/record/events_cache.go | 462 --------- vendor/k8s.io/client-go/tools/record/fake.go | 58 -- .../client-go/tools/remotecommand/doc.go | 20 - .../tools/remotecommand/errorstream.go | 55 -- .../tools/remotecommand/remotecommand.go | 142 --- .../client-go/tools/remotecommand/resize.go | 33 - .../client-go/tools/remotecommand/v1.go | 160 ---- .../client-go/tools/remotecommand/v2.go | 195 ---- .../client-go/tools/remotecommand/v3.go | 111 --- .../client-go/tools/remotecommand/v4.go | 119 --- .../client-go/tools/watch/informerwatcher.go | 114 --- vendor/k8s.io/client-go/tools/watch/until.go | 225 ----- 59 files changed, 7268 deletions(-) delete mode 100644 vendor/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go delete mode 100644 vendor/github.com/evanphx/json-patch/cmd/json-patch/main.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/main.go delete mode 100644 vendor/github.com/stretchr/testify/_codegen/main.go delete mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go delete mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.pb.go delete mode 100644 vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.proto delete mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto delete mode 100644 vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go delete mode 100644 vendor/google.golang.org/grpc/interop/grpc_testing/test.proto delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.pb.go delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.proto delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testing/test.proto delete mode 100644 vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.proto delete mode 100644 vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go delete mode 100644 vendor/google.golang.org/grpc/stats/grpc_testing/test.proto delete mode 100644 vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go delete mode 100644 vendor/google.golang.org/grpc/test/codec_perf/perf.proto delete mode 100644 vendor/google.golang.org/grpc/testdata/ca.pem delete mode 100644 vendor/google.golang.org/grpc/testdata/server1.key delete mode 100644 vendor/google.golang.org/grpc/testdata/server1.pem delete mode 100644 vendor/google.golang.org/grpc/testdata/testdata.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/main.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/test.0/integration/fixtures/resources.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/test.0/integration/fixtures/server.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/test.0/integration/helpers.go delete mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/doc.go delete mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/fake/doc.go delete mode 100644 vendor/k8s.io/apiserver/plugin/pkg/audit/fake/fake.go delete mode 100644 vendor/k8s.io/client-go/tools/bootstrap/token/api/doc.go delete mode 100644 vendor/k8s.io/client-go/tools/bootstrap/token/api/types.go delete mode 100644 vendor/k8s.io/client-go/tools/bootstrap/token/util/helpers.go delete mode 100644 vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go delete mode 100644 vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go delete mode 100644 vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go delete mode 100644 vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go delete mode 100644 vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go delete mode 100644 vendor/k8s.io/client-go/tools/portforward/doc.go delete mode 100644 vendor/k8s.io/client-go/tools/portforward/portforward.go delete mode 100644 vendor/k8s.io/client-go/tools/record/doc.go delete mode 100644 vendor/k8s.io/client-go/tools/record/event.go delete mode 100644 vendor/k8s.io/client-go/tools/record/events_cache.go delete mode 100644 vendor/k8s.io/client-go/tools/record/fake.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/doc.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/errorstream.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/resize.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v1.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v2.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v3.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v4.go delete mode 100644 vendor/k8s.io/client-go/tools/watch/informerwatcher.go delete mode 100644 vendor/k8s.io/client-go/tools/watch/until.go diff --git a/vendor/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go b/vendor/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go deleted file mode 100644 index a5b2b5eef..000000000 --- a/vendor/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go +++ /dev/null @@ -1,97 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "os" - "os/exec" - "strings" -) - -const bindatafile = "bindata.go" - -func isDebug(args []string) bool { - flagset := flag.NewFlagSet("", flag.ContinueOnError) - debug := flagset.Bool("debug", false, "") - debugArgs := make([]string, 0) - for _, arg := range args { - if strings.HasPrefix(arg, "-debug") { - debugArgs = append(debugArgs, arg) - } - } - flagset.Parse(debugArgs) - if debug == nil { - return false - } - return *debug -} - -func main() { - if _, err := exec.LookPath("go-bindata"); err != nil { - fmt.Println("Cannot find go-bindata executable in path") - fmt.Println("Maybe you need: go get github.com/elazarl/go-bindata-assetfs/...") - os.Exit(1) - } - cmd := exec.Command("go-bindata", os.Args[1:]...) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - os.Exit(1) - } - in, err := os.Open(bindatafile) - if err != nil { - fmt.Fprintln(os.Stderr, "Cannot read", bindatafile, err) - return - } - out, err := os.Create("bindata_assetfs.go") - if err != nil { - fmt.Fprintln(os.Stderr, "Cannot write 'bindata_assetfs.go'", err) - return - } - debug := isDebug(os.Args[1:]) - r := bufio.NewReader(in) - done := false - for line, isPrefix, err := r.ReadLine(); err == nil; line, isPrefix, err = r.ReadLine() { - if !isPrefix { - line = append(line, '\n') - } - if _, err := out.Write(line); err != nil { - fmt.Fprintln(os.Stderr, "Cannot write to 'bindata_assetfs.go'", err) - return - } - if !done && !isPrefix && bytes.HasPrefix(line, []byte("import (")) { - if debug { - fmt.Fprintln(out, "\t\"net/http\"") - } else { - fmt.Fprintln(out, "\t\"github.com/elazarl/go-bindata-assetfs\"") - } - done = true - } - } - if debug { - fmt.Fprintln(out, ` -func assetFS() http.FileSystem { - for k := range _bintree.Children { - return http.Dir(k) - } - panic("unreachable") -}`) - } else { - fmt.Fprintln(out, ` -func assetFS() *assetfs.AssetFS { - for k := range _bintree.Children { - return &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: k} - } - panic("unreachable") -}`) - } - // Close files BEFORE remove calls (don't use defer). - in.Close() - out.Close() - if err := os.Remove(bindatafile); err != nil { - fmt.Fprintln(os.Stderr, "Cannot remove", bindatafile, err) - } -} diff --git a/vendor/github.com/evanphx/json-patch/cmd/json-patch/main.go b/vendor/github.com/evanphx/json-patch/cmd/json-patch/main.go deleted file mode 100644 index 2be182635..000000000 --- a/vendor/github.com/evanphx/json-patch/cmd/json-patch/main.go +++ /dev/null @@ -1,56 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "log" - "os" - - jsonpatch "github.com/evanphx/json-patch" - flags "github.com/jessevdk/go-flags" -) - -type opts struct { - PatchFilePaths []FileFlag `long:"patch-file" short:"p" value-name:"PATH" description:"Path to file with one or more operations"` -} - -func main() { - var o opts - _, err := flags.Parse(&o) - if err != nil { - log.Fatalf("error: %s\n", err) - } - - patches := make([]jsonpatch.Patch, len(o.PatchFilePaths)) - - for i, patchFilePath := range o.PatchFilePaths { - var bs []byte - bs, err = ioutil.ReadFile(patchFilePath.Path()) - if err != nil { - log.Fatalf("error reading patch file: %s", err) - } - - var patch jsonpatch.Patch - patch, err = jsonpatch.DecodePatch(bs) - if err != nil { - log.Fatalf("error decoding patch file: %s", err) - } - - patches[i] = patch - } - - doc, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Fatalf("error reading from stdin: %s", err) - } - - mdoc := doc - for _, patch := range patches { - mdoc, err = patch.Apply(mdoc) - if err != nil { - log.Fatalf("error applying patch: %s", err) - } - } - - fmt.Printf("%s", mdoc) -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go deleted file mode 100644 index 9713a21cc..000000000 --- a/vendor/github.com/googleapis/gnostic/compiler/main.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package compiler provides support functions to generated compiler code. -package compiler diff --git a/vendor/github.com/stretchr/testify/_codegen/main.go b/vendor/github.com/stretchr/testify/_codegen/main.go deleted file mode 100644 index 2e5e8124f..000000000 --- a/vendor/github.com/stretchr/testify/_codegen/main.go +++ /dev/null @@ -1,316 +0,0 @@ -// This program reads all assertion functions from the assert package and -// automatically generates the corresponding requires and forwarded assertions - -package main - -import ( - "bytes" - "flag" - "fmt" - "go/ast" - "go/build" - "go/doc" - "go/format" - "go/importer" - "go/parser" - "go/token" - "go/types" - "io" - "io/ioutil" - "log" - "os" - "path" - "regexp" - "strings" - "text/template" - - "github.com/ernesto-jimenez/gogen/imports" -) - -var ( - pkg = flag.String("assert-path", "github.com/stretchr/testify/assert", "Path to the assert package") - includeF = flag.Bool("include-format-funcs", false, "include format functions such as Errorf and Equalf") - outputPkg = flag.String("output-package", "", "package for the resulting code") - tmplFile = flag.String("template", "", "What file to load the function template from") - out = flag.String("out", "", "What file to write the source code to") -) - -func main() { - flag.Parse() - - scope, docs, err := parsePackageSource(*pkg) - if err != nil { - log.Fatal(err) - } - - importer, funcs, err := analyzeCode(scope, docs) - if err != nil { - log.Fatal(err) - } - - if err := generateCode(importer, funcs); err != nil { - log.Fatal(err) - } -} - -func generateCode(importer imports.Importer, funcs []testFunc) error { - buff := bytes.NewBuffer(nil) - - tmplHead, tmplFunc, err := parseTemplates() - if err != nil { - return err - } - - // Generate header - if err := tmplHead.Execute(buff, struct { - Name string - Imports map[string]string - }{ - *outputPkg, - importer.Imports(), - }); err != nil { - return err - } - - // Generate funcs - for _, fn := range funcs { - buff.Write([]byte("\n\n")) - if err := tmplFunc.Execute(buff, &fn); err != nil { - return err - } - } - - code, err := format.Source(buff.Bytes()) - if err != nil { - return err - } - - // Write file - output, err := outputFile() - if err != nil { - return err - } - defer output.Close() - _, err = io.Copy(output, bytes.NewReader(code)) - return err -} - -func parseTemplates() (*template.Template, *template.Template, error) { - tmplHead, err := template.New("header").Parse(headerTemplate) - if err != nil { - return nil, nil, err - } - if *tmplFile != "" { - f, err := ioutil.ReadFile(*tmplFile) - if err != nil { - return nil, nil, err - } - funcTemplate = string(f) - } - tmpl, err := template.New("function").Parse(funcTemplate) - if err != nil { - return nil, nil, err - } - return tmplHead, tmpl, nil -} - -func outputFile() (*os.File, error) { - filename := *out - if filename == "-" || (filename == "" && *tmplFile == "") { - return os.Stdout, nil - } - if filename == "" { - filename = strings.TrimSuffix(strings.TrimSuffix(*tmplFile, ".tmpl"), ".go") + ".go" - } - return os.Create(filename) -} - -// analyzeCode takes the types scope and the docs and returns the import -// information and information about all the assertion functions. -func analyzeCode(scope *types.Scope, docs *doc.Package) (imports.Importer, []testFunc, error) { - testingT := scope.Lookup("TestingT").Type().Underlying().(*types.Interface) - - importer := imports.New(*outputPkg) - var funcs []testFunc - // Go through all the top level functions - for _, fdocs := range docs.Funcs { - // Find the function - obj := scope.Lookup(fdocs.Name) - - fn, ok := obj.(*types.Func) - if !ok { - continue - } - // Check function signature has at least two arguments - sig := fn.Type().(*types.Signature) - if sig.Params().Len() < 2 { - continue - } - // Check first argument is of type testingT - first, ok := sig.Params().At(0).Type().(*types.Named) - if !ok { - continue - } - firstType, ok := first.Underlying().(*types.Interface) - if !ok { - continue - } - if !types.Implements(firstType, testingT) { - continue - } - - // Skip functions ending with f - if strings.HasSuffix(fdocs.Name, "f") && !*includeF { - continue - } - - funcs = append(funcs, testFunc{*outputPkg, fdocs, fn}) - importer.AddImportsFrom(sig.Params()) - } - return importer, funcs, nil -} - -// parsePackageSource returns the types scope and the package documentation from the package -func parsePackageSource(pkg string) (*types.Scope, *doc.Package, error) { - pd, err := build.Import(pkg, ".", 0) - if err != nil { - return nil, nil, err - } - - fset := token.NewFileSet() - files := make(map[string]*ast.File) - fileList := make([]*ast.File, len(pd.GoFiles)) - for i, fname := range pd.GoFiles { - src, err := ioutil.ReadFile(path.Join(pd.SrcRoot, pd.ImportPath, fname)) - if err != nil { - return nil, nil, err - } - f, err := parser.ParseFile(fset, fname, src, parser.ParseComments|parser.AllErrors) - if err != nil { - return nil, nil, err - } - files[fname] = f - fileList[i] = f - } - - cfg := types.Config{ - Importer: importer.Default(), - } - info := types.Info{ - Defs: make(map[*ast.Ident]types.Object), - } - tp, err := cfg.Check(pkg, fset, fileList, &info) - if err != nil { - return nil, nil, err - } - - scope := tp.Scope() - - ap, _ := ast.NewPackage(fset, files, nil, nil) - docs := doc.New(ap, pkg, 0) - - return scope, docs, nil -} - -type testFunc struct { - CurrentPkg string - DocInfo *doc.Func - TypeInfo *types.Func -} - -func (f *testFunc) Qualifier(p *types.Package) string { - if p == nil || p.Name() == f.CurrentPkg { - return "" - } - return p.Name() -} - -func (f *testFunc) Params() string { - sig := f.TypeInfo.Type().(*types.Signature) - params := sig.Params() - p := "" - comma := "" - to := params.Len() - var i int - - if sig.Variadic() { - to-- - } - for i = 1; i < to; i++ { - param := params.At(i) - p += fmt.Sprintf("%s%s %s", comma, param.Name(), types.TypeString(param.Type(), f.Qualifier)) - comma = ", " - } - if sig.Variadic() { - param := params.At(params.Len() - 1) - p += fmt.Sprintf("%s%s ...%s", comma, param.Name(), types.TypeString(param.Type().(*types.Slice).Elem(), f.Qualifier)) - } - return p -} - -func (f *testFunc) ForwardedParams() string { - sig := f.TypeInfo.Type().(*types.Signature) - params := sig.Params() - p := "" - comma := "" - to := params.Len() - var i int - - if sig.Variadic() { - to-- - } - for i = 1; i < to; i++ { - param := params.At(i) - p += fmt.Sprintf("%s%s", comma, param.Name()) - comma = ", " - } - if sig.Variadic() { - param := params.At(params.Len() - 1) - p += fmt.Sprintf("%s%s...", comma, param.Name()) - } - return p -} - -func (f *testFunc) ParamsFormat() string { - return strings.Replace(f.Params(), "msgAndArgs", "msg string, args", 1) -} - -func (f *testFunc) ForwardedParamsFormat() string { - return strings.Replace(f.ForwardedParams(), "msgAndArgs", "append([]interface{}{msg}, args...)", 1) -} - -func (f *testFunc) Comment() string { - return "// " + strings.Replace(strings.TrimSpace(f.DocInfo.Doc), "\n", "\n// ", -1) -} - -func (f *testFunc) CommentFormat() string { - search := fmt.Sprintf("%s", f.DocInfo.Name) - replace := fmt.Sprintf("%sf", f.DocInfo.Name) - comment := strings.Replace(f.Comment(), search, replace, -1) - exp := regexp.MustCompile(replace + `\(((\(\)|[^)])+)\)`) - return exp.ReplaceAllString(comment, replace+`($1, "error message %s", "formatted")`) -} - -func (f *testFunc) CommentWithoutT(receiver string) string { - search := fmt.Sprintf("assert.%s(t, ", f.DocInfo.Name) - replace := fmt.Sprintf("%s.%s(", receiver, f.DocInfo.Name) - return strings.Replace(f.Comment(), search, replace, -1) -} - -var headerTemplate = `/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND -*/ - -package {{.Name}} - -import ( -{{range $path, $name := .Imports}} - {{$name}} "{{$path}}"{{end}} -) -` - -var funcTemplate = `{{.Comment}} -func (fwd *AssertionsForwarder) {{.DocInfo.Name}}({{.Params}}) bool { - return assert.{{.DocInfo.Name}}({{.ForwardedParams}}) -}` diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go deleted file mode 100644 index aba962840..000000000 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package grpc_lb_v1 is the parent package of all gRPC loadbalancer -// message and service protobuf definitions. -package grpc_lb_v1 diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.pb.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.pb.go deleted file mode 100644 index ebcbe56d8..000000000 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.pb.go +++ /dev/null @@ -1,154 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_lb_v1/service/service.proto - -/* -Package service is a generated protocol buffer package. - -It is generated from these files: - grpc_lb_v1/service/service.proto - -It has these top-level messages: -*/ -package service - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import grpc_lb_v1 "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for LoadBalancer service - -type LoadBalancerClient interface { - // Bidirectional rpc to get a list of servers. - BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) -} - -type loadBalancerClient struct { - cc *grpc.ClientConn -} - -func NewLoadBalancerClient(cc *grpc.ClientConn) LoadBalancerClient { - return &loadBalancerClient{cc} -} - -func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...grpc.CallOption) (LoadBalancer_BalanceLoadClient, error) { - stream, err := grpc.NewClientStream(ctx, &_LoadBalancer_serviceDesc.Streams[0], c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) - if err != nil { - return nil, err - } - x := &loadBalancerBalanceLoadClient{stream} - return x, nil -} - -type LoadBalancer_BalanceLoadClient interface { - Send(*grpc_lb_v1.LoadBalanceRequest) error - Recv() (*grpc_lb_v1.LoadBalanceResponse, error) - grpc.ClientStream -} - -type loadBalancerBalanceLoadClient struct { - grpc.ClientStream -} - -func (x *loadBalancerBalanceLoadClient) Send(m *grpc_lb_v1.LoadBalanceRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadClient) Recv() (*grpc_lb_v1.LoadBalanceResponse, error) { - m := new(grpc_lb_v1.LoadBalanceResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for LoadBalancer service - -type LoadBalancerServer interface { - // Bidirectional rpc to get a list of servers. - BalanceLoad(LoadBalancer_BalanceLoadServer) error -} - -func RegisterLoadBalancerServer(s *grpc.Server, srv LoadBalancerServer) { - s.RegisterService(&_LoadBalancer_serviceDesc, srv) -} - -func _LoadBalancer_BalanceLoad_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LoadBalancerServer).BalanceLoad(&loadBalancerBalanceLoadServer{stream}) -} - -type LoadBalancer_BalanceLoadServer interface { - Send(*grpc_lb_v1.LoadBalanceResponse) error - Recv() (*grpc_lb_v1.LoadBalanceRequest, error) - grpc.ServerStream -} - -type loadBalancerBalanceLoadServer struct { - grpc.ServerStream -} - -func (x *loadBalancerBalanceLoadServer) Send(m *grpc_lb_v1.LoadBalanceResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *loadBalancerBalanceLoadServer) Recv() (*grpc_lb_v1.LoadBalanceRequest, error) { - m := new(grpc_lb_v1.LoadBalanceRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _LoadBalancer_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.lb.v1.LoadBalancer", - HandlerType: (*LoadBalancerServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "BalanceLoad", - Handler: _LoadBalancer_BalanceLoad_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc_lb_v1/service/service.proto", -} - -func init() { proto.RegisterFile("grpc_lb_v1/service/service.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 142 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0x2f, 0x2a, 0x48, - 0x8e, 0xcf, 0x49, 0x8a, 0x2f, 0x33, 0xd4, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x85, 0xd1, - 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x5c, 0x20, 0x15, 0x7a, 0x39, 0x49, 0x7a, 0x65, 0x86, - 0x52, 0x4a, 0x48, 0xaa, 0x73, 0x53, 0x8b, 0x8b, 0x13, 0xd3, 0x53, 0x8b, 0xe1, 0x0c, 0x88, 0x7a, - 0xa3, 0x24, 0x2e, 0x1e, 0x9f, 0xfc, 0xc4, 0x14, 0xa7, 0xc4, 0x9c, 0xc4, 0xbc, 0xe4, 0xd4, 0x22, - 0xa1, 0x20, 0x2e, 0x6e, 0x28, 0x1b, 0x24, 0x2c, 0x24, 0xa7, 0x87, 0x30, 0x4f, 0x0f, 0x49, 0x61, - 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x94, 0x3c, 0x4e, 0xf9, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, - 0x54, 0x0d, 0x46, 0x03, 0x46, 0x27, 0xce, 0x28, 0x76, 0xa8, 0x23, 0x93, 0xd8, 0xc0, 0xb6, 0x1a, - 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x39, 0x4e, 0xb0, 0xf8, 0xc9, 0x00, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.proto b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.proto deleted file mode 100644 index 02c1a8b77..000000000 --- a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/service/service.proto +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package grpc.lb.v1; -option go_package = "service"; - -import "grpc_lb_v1/messages/messages.proto"; - -service LoadBalancer { - // Bidirectional rpc to get a list of servers. - rpc BalanceLoad(stream LoadBalanceRequest) - returns (stream LoadBalanceResponse); -} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto deleted file mode 100644 index 6072fdc3b..000000000 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2017 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package grpc.health.v1; - -message HealthCheckRequest { - string service = 1; -} - -message HealthCheckResponse { - enum ServingStatus { - UNKNOWN = 0; - SERVING = 1; - NOT_SERVING = 2; - } - ServingStatus status = 1; -} - -service Health{ - rpc Check(HealthCheckRequest) returns (HealthCheckResponse); -} diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go deleted file mode 100644 index b03c728c3..000000000 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/test.pb.go +++ /dev/null @@ -1,905 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_testing/test.proto - -/* -Package grpc_testing is a generated protocol buffer package. - -It is generated from these files: - grpc_testing/test.proto - -It has these top-level messages: - Empty - Payload - EchoStatus - SimpleRequest - SimpleResponse - StreamingInputCallRequest - StreamingInputCallResponse - ResponseParameters - StreamingOutputCallRequest - StreamingOutputCallResponse -*/ -package grpc_testing - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// The type of payload that should be returned. -type PayloadType int32 - -const ( - // Compressable text format. - PayloadType_COMPRESSABLE PayloadType = 0 - // Uncompressable binary format. - PayloadType_UNCOMPRESSABLE PayloadType = 1 - // Randomly chosen from all other formats defined in this enum. - PayloadType_RANDOM PayloadType = 2 -) - -var PayloadType_name = map[int32]string{ - 0: "COMPRESSABLE", - 1: "UNCOMPRESSABLE", - 2: "RANDOM", -} -var PayloadType_value = map[string]int32{ - "COMPRESSABLE": 0, - "UNCOMPRESSABLE": 1, - "RANDOM": 2, -} - -func (x PayloadType) Enum() *PayloadType { - p := new(PayloadType) - *p = x - return p -} -func (x PayloadType) String() string { - return proto.EnumName(PayloadType_name, int32(x)) -} -func (x *PayloadType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PayloadType_value, data, "PayloadType") - if err != nil { - return err - } - *x = PayloadType(value) - return nil -} -func (PayloadType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type Empty struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -// A block of data, to simply increase gRPC message size. -type Payload struct { - // The type of data in body. - Type *PayloadType `protobuf:"varint,1,opt,name=type,enum=grpc.testing.PayloadType" json:"type,omitempty"` - // Primary contents of payload. - Body []byte `protobuf:"bytes,2,opt,name=body" json:"body,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Payload) Reset() { *m = Payload{} } -func (m *Payload) String() string { return proto.CompactTextString(m) } -func (*Payload) ProtoMessage() {} -func (*Payload) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *Payload) GetType() PayloadType { - if m != nil && m.Type != nil { - return *m.Type - } - return PayloadType_COMPRESSABLE -} - -func (m *Payload) GetBody() []byte { - if m != nil { - return m.Body - } - return nil -} - -// A protobuf representation for grpc status. This is used by test -// clients to specify a status that the server should attempt to return. -type EchoStatus struct { - Code *int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` - Message *string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *EchoStatus) Reset() { *m = EchoStatus{} } -func (m *EchoStatus) String() string { return proto.CompactTextString(m) } -func (*EchoStatus) ProtoMessage() {} -func (*EchoStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *EchoStatus) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *EchoStatus) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -// Unary request. -type SimpleRequest struct { - // Desired payload type in the response from the server. - // If response_type is RANDOM, server randomly chooses one from other formats. - ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` - // Desired payload size in the response from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - ResponseSize *int32 `protobuf:"varint,2,opt,name=response_size,json=responseSize" json:"response_size,omitempty"` - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` - // Whether SimpleResponse should include username. - FillUsername *bool `protobuf:"varint,4,opt,name=fill_username,json=fillUsername" json:"fill_username,omitempty"` - // Whether SimpleResponse should include OAuth scope. - FillOauthScope *bool `protobuf:"varint,5,opt,name=fill_oauth_scope,json=fillOauthScope" json:"fill_oauth_scope,omitempty"` - // Whether server should return a given status - ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } -func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } -func (*SimpleRequest) ProtoMessage() {} -func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *SimpleRequest) GetResponseType() PayloadType { - if m != nil && m.ResponseType != nil { - return *m.ResponseType - } - return PayloadType_COMPRESSABLE -} - -func (m *SimpleRequest) GetResponseSize() int32 { - if m != nil && m.ResponseSize != nil { - return *m.ResponseSize - } - return 0 -} - -func (m *SimpleRequest) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *SimpleRequest) GetFillUsername() bool { - if m != nil && m.FillUsername != nil { - return *m.FillUsername - } - return false -} - -func (m *SimpleRequest) GetFillOauthScope() bool { - if m != nil && m.FillOauthScope != nil { - return *m.FillOauthScope - } - return false -} - -func (m *SimpleRequest) GetResponseStatus() *EchoStatus { - if m != nil { - return m.ResponseStatus - } - return nil -} - -// Unary response, as configured by the request. -type SimpleResponse struct { - // Payload to increase message size. - Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` - // The user the request came from, for verifying authentication was - // successful when the client expected it. - Username *string `protobuf:"bytes,2,opt,name=username" json:"username,omitempty"` - // OAuth scope. - OauthScope *string `protobuf:"bytes,3,opt,name=oauth_scope,json=oauthScope" json:"oauth_scope,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } -func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } -func (*SimpleResponse) ProtoMessage() {} -func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -func (m *SimpleResponse) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *SimpleResponse) GetUsername() string { - if m != nil && m.Username != nil { - return *m.Username - } - return "" -} - -func (m *SimpleResponse) GetOauthScope() string { - if m != nil && m.OauthScope != nil { - return *m.OauthScope - } - return "" -} - -// Client-streaming request. -type StreamingInputCallRequest struct { - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingInputCallRequest) Reset() { *m = StreamingInputCallRequest{} } -func (m *StreamingInputCallRequest) String() string { return proto.CompactTextString(m) } -func (*StreamingInputCallRequest) ProtoMessage() {} -func (*StreamingInputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *StreamingInputCallRequest) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -// Client-streaming response. -type StreamingInputCallResponse struct { - // Aggregated size of payloads received from the client. - AggregatedPayloadSize *int32 `protobuf:"varint,1,opt,name=aggregated_payload_size,json=aggregatedPayloadSize" json:"aggregated_payload_size,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingInputCallResponse) Reset() { *m = StreamingInputCallResponse{} } -func (m *StreamingInputCallResponse) String() string { return proto.CompactTextString(m) } -func (*StreamingInputCallResponse) ProtoMessage() {} -func (*StreamingInputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -func (m *StreamingInputCallResponse) GetAggregatedPayloadSize() int32 { - if m != nil && m.AggregatedPayloadSize != nil { - return *m.AggregatedPayloadSize - } - return 0 -} - -// Configuration for a particular response. -type ResponseParameters struct { - // Desired payload sizes in responses from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - Size *int32 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"` - // Desired interval between consecutive responses in the response stream in - // microseconds. - IntervalUs *int32 `protobuf:"varint,2,opt,name=interval_us,json=intervalUs" json:"interval_us,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ResponseParameters) Reset() { *m = ResponseParameters{} } -func (m *ResponseParameters) String() string { return proto.CompactTextString(m) } -func (*ResponseParameters) ProtoMessage() {} -func (*ResponseParameters) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -func (m *ResponseParameters) GetSize() int32 { - if m != nil && m.Size != nil { - return *m.Size - } - return 0 -} - -func (m *ResponseParameters) GetIntervalUs() int32 { - if m != nil && m.IntervalUs != nil { - return *m.IntervalUs - } - return 0 -} - -// Server-streaming request. -type StreamingOutputCallRequest struct { - // Desired payload type in the response from the server. - // If response_type is RANDOM, the payload from each response in the stream - // might be of different types. This is to simulate a mixed type of payload - // stream. - ResponseType *PayloadType `protobuf:"varint,1,opt,name=response_type,json=responseType,enum=grpc.testing.PayloadType" json:"response_type,omitempty"` - // Configuration for each expected response message. - ResponseParameters []*ResponseParameters `protobuf:"bytes,2,rep,name=response_parameters,json=responseParameters" json:"response_parameters,omitempty"` - // Optional input payload sent along with the request. - Payload *Payload `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` - // Whether server should return a given status - ResponseStatus *EchoStatus `protobuf:"bytes,7,opt,name=response_status,json=responseStatus" json:"response_status,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingOutputCallRequest) Reset() { *m = StreamingOutputCallRequest{} } -func (m *StreamingOutputCallRequest) String() string { return proto.CompactTextString(m) } -func (*StreamingOutputCallRequest) ProtoMessage() {} -func (*StreamingOutputCallRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *StreamingOutputCallRequest) GetResponseType() PayloadType { - if m != nil && m.ResponseType != nil { - return *m.ResponseType - } - return PayloadType_COMPRESSABLE -} - -func (m *StreamingOutputCallRequest) GetResponseParameters() []*ResponseParameters { - if m != nil { - return m.ResponseParameters - } - return nil -} - -func (m *StreamingOutputCallRequest) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -func (m *StreamingOutputCallRequest) GetResponseStatus() *EchoStatus { - if m != nil { - return m.ResponseStatus - } - return nil -} - -// Server-streaming response, as configured by the request and parameters. -type StreamingOutputCallResponse struct { - // Payload to increase response size. - Payload *Payload `protobuf:"bytes,1,opt,name=payload" json:"payload,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StreamingOutputCallResponse) Reset() { *m = StreamingOutputCallResponse{} } -func (m *StreamingOutputCallResponse) String() string { return proto.CompactTextString(m) } -func (*StreamingOutputCallResponse) ProtoMessage() {} -func (*StreamingOutputCallResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -func (m *StreamingOutputCallResponse) GetPayload() *Payload { - if m != nil { - return m.Payload - } - return nil -} - -func init() { - proto.RegisterType((*Empty)(nil), "grpc.testing.Empty") - proto.RegisterType((*Payload)(nil), "grpc.testing.Payload") - proto.RegisterType((*EchoStatus)(nil), "grpc.testing.EchoStatus") - proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") - proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") - proto.RegisterType((*StreamingInputCallRequest)(nil), "grpc.testing.StreamingInputCallRequest") - proto.RegisterType((*StreamingInputCallResponse)(nil), "grpc.testing.StreamingInputCallResponse") - proto.RegisterType((*ResponseParameters)(nil), "grpc.testing.ResponseParameters") - proto.RegisterType((*StreamingOutputCallRequest)(nil), "grpc.testing.StreamingOutputCallRequest") - proto.RegisterType((*StreamingOutputCallResponse)(nil), "grpc.testing.StreamingOutputCallResponse") - proto.RegisterEnum("grpc.testing.PayloadType", PayloadType_name, PayloadType_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for TestService service - -type TestServiceClient interface { - // One empty request followed by one empty response. - EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) - // One request followed by one response. - // The server returns the client payload as-is. - UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) -} - -type testServiceClient struct { - cc *grpc.ClientConn -} - -func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { - return &testServiceClient{cc} -} - -func (c *testServiceClient) EmptyCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := grpc.Invoke(ctx, "/grpc.testing.TestService/EmptyCall", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { - out := new(SimpleResponse) - err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *testServiceClient) StreamingOutputCall(ctx context.Context, in *StreamingOutputCallRequest, opts ...grpc.CallOption) (TestService_StreamingOutputCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/StreamingOutputCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceStreamingOutputCallClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type TestService_StreamingOutputCallClient interface { - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceStreamingOutputCallClient struct { - grpc.ClientStream -} - -func (x *testServiceStreamingOutputCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) StreamingInputCall(ctx context.Context, opts ...grpc.CallOption) (TestService_StreamingInputCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/StreamingInputCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceStreamingInputCallClient{stream} - return x, nil -} - -type TestService_StreamingInputCallClient interface { - Send(*StreamingInputCallRequest) error - CloseAndRecv() (*StreamingInputCallResponse, error) - grpc.ClientStream -} - -type testServiceStreamingInputCallClient struct { - grpc.ClientStream -} - -func (x *testServiceStreamingInputCallClient) Send(m *StreamingInputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceStreamingInputCallClient) CloseAndRecv() (*StreamingInputCallResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(StreamingInputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/FullDuplexCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceFullDuplexCallClient{stream} - return x, nil -} - -type TestService_FullDuplexCallClient interface { - Send(*StreamingOutputCallRequest) error - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceFullDuplexCallClient struct { - grpc.ClientStream -} - -func (x *testServiceFullDuplexCallClient) Send(m *StreamingOutputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceFullDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) HalfDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_HalfDuplexCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[3], c.cc, "/grpc.testing.TestService/HalfDuplexCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceHalfDuplexCallClient{stream} - return x, nil -} - -type TestService_HalfDuplexCallClient interface { - Send(*StreamingOutputCallRequest) error - Recv() (*StreamingOutputCallResponse, error) - grpc.ClientStream -} - -type testServiceHalfDuplexCallClient struct { - grpc.ClientStream -} - -func (x *testServiceHalfDuplexCallClient) Send(m *StreamingOutputCallRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceHalfDuplexCallClient) Recv() (*StreamingOutputCallResponse, error) { - m := new(StreamingOutputCallResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for TestService service - -type TestServiceServer interface { - // One empty request followed by one empty response. - EmptyCall(context.Context, *Empty) (*Empty, error) - // One request followed by one response. - // The server returns the client payload as-is. - UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - StreamingOutputCall(*StreamingOutputCallRequest, TestService_StreamingOutputCallServer) error - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - StreamingInputCall(TestService_StreamingInputCallServer) error - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - FullDuplexCall(TestService_FullDuplexCallServer) error - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - HalfDuplexCall(TestService_HalfDuplexCallServer) error -} - -func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { - s.RegisterService(&_TestService_serviceDesc, srv) -} - -func _TestService_EmptyCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TestServiceServer).EmptyCall(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.testing.TestService/EmptyCall", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TestServiceServer).EmptyCall(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SimpleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TestServiceServer).UnaryCall(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.testing.TestService/UnaryCall", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TestService_StreamingOutputCall_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(StreamingOutputCallRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(TestServiceServer).StreamingOutputCall(m, &testServiceStreamingOutputCallServer{stream}) -} - -type TestService_StreamingOutputCallServer interface { - Send(*StreamingOutputCallResponse) error - grpc.ServerStream -} - -type testServiceStreamingOutputCallServer struct { - grpc.ServerStream -} - -func (x *testServiceStreamingOutputCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _TestService_StreamingInputCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).StreamingInputCall(&testServiceStreamingInputCallServer{stream}) -} - -type TestService_StreamingInputCallServer interface { - SendAndClose(*StreamingInputCallResponse) error - Recv() (*StreamingInputCallRequest, error) - grpc.ServerStream -} - -type testServiceStreamingInputCallServer struct { - grpc.ServerStream -} - -func (x *testServiceStreamingInputCallServer) SendAndClose(m *StreamingInputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceStreamingInputCallServer) Recv() (*StreamingInputCallRequest, error) { - m := new(StreamingInputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) -} - -type TestService_FullDuplexCallServer interface { - Send(*StreamingOutputCallResponse) error - Recv() (*StreamingOutputCallRequest, error) - grpc.ServerStream -} - -type testServiceFullDuplexCallServer struct { - grpc.ServerStream -} - -func (x *testServiceFullDuplexCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceFullDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { - m := new(StreamingOutputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TestService_HalfDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).HalfDuplexCall(&testServiceHalfDuplexCallServer{stream}) -} - -type TestService_HalfDuplexCallServer interface { - Send(*StreamingOutputCallResponse) error - Recv() (*StreamingOutputCallRequest, error) - grpc.ServerStream -} - -type testServiceHalfDuplexCallServer struct { - grpc.ServerStream -} - -func (x *testServiceHalfDuplexCallServer) Send(m *StreamingOutputCallResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceHalfDuplexCallServer) Recv() (*StreamingOutputCallRequest, error) { - m := new(StreamingOutputCallRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _TestService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.testing.TestService", - HandlerType: (*TestServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "EmptyCall", - Handler: _TestService_EmptyCall_Handler, - }, - { - MethodName: "UnaryCall", - Handler: _TestService_UnaryCall_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamingOutputCall", - Handler: _TestService_StreamingOutputCall_Handler, - ServerStreams: true, - }, - { - StreamName: "StreamingInputCall", - Handler: _TestService_StreamingInputCall_Handler, - ClientStreams: true, - }, - { - StreamName: "FullDuplexCall", - Handler: _TestService_FullDuplexCall_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "HalfDuplexCall", - Handler: _TestService_HalfDuplexCall_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "grpc_testing/test.proto", -} - -// Client API for UnimplementedService service - -type UnimplementedServiceClient interface { - // A call that no server should implement - UnimplementedCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) -} - -type unimplementedServiceClient struct { - cc *grpc.ClientConn -} - -func NewUnimplementedServiceClient(cc *grpc.ClientConn) UnimplementedServiceClient { - return &unimplementedServiceClient{cc} -} - -func (c *unimplementedServiceClient) UnimplementedCall(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := grpc.Invoke(ctx, "/grpc.testing.UnimplementedService/UnimplementedCall", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for UnimplementedService service - -type UnimplementedServiceServer interface { - // A call that no server should implement - UnimplementedCall(context.Context, *Empty) (*Empty, error) -} - -func RegisterUnimplementedServiceServer(s *grpc.Server, srv UnimplementedServiceServer) { - s.RegisterService(&_UnimplementedService_serviceDesc, srv) -} - -func _UnimplementedService_UnimplementedCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(UnimplementedServiceServer).UnimplementedCall(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.testing.UnimplementedService/UnimplementedCall", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(UnimplementedServiceServer).UnimplementedCall(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -var _UnimplementedService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.testing.UnimplementedService", - HandlerType: (*UnimplementedServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "UnimplementedCall", - Handler: _UnimplementedService_UnimplementedCall_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "grpc_testing/test.proto", -} - -func init() { proto.RegisterFile("grpc_testing/test.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 656 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0xc5, 0x69, 0x42, 0xda, 0x49, 0x6a, 0xc2, 0x94, 0xaa, 0x6e, 0x8a, 0x44, 0x64, 0x0e, 0x18, - 0x24, 0x52, 0x14, 0x09, 0x0e, 0x48, 0x80, 0x4a, 0x9b, 0x8a, 0x4a, 0x6d, 0x53, 0xec, 0xe6, 0x1c, - 0x2d, 0xc9, 0xd4, 0xb5, 0xe4, 0x2f, 0xec, 0x75, 0x45, 0x7a, 0xe0, 0xcf, 0xf0, 0x23, 0x38, 0xf0, - 0xe7, 0xd0, 0xae, 0xed, 0xc4, 0x49, 0x53, 0xd1, 0xf2, 0x75, 0xca, 0xee, 0x9b, 0x37, 0xb3, 0xf3, - 0x66, 0x5e, 0x0c, 0x1b, 0x76, 0x14, 0x0e, 0x07, 0x9c, 0x62, 0xee, 0xf8, 0xf6, 0xb6, 0xf8, 0x6d, - 0x87, 0x51, 0xc0, 0x03, 0xac, 0x8b, 0x40, 0x3b, 0x0b, 0xe8, 0x55, 0xa8, 0x74, 0xbd, 0x90, 0x8f, - 0xf5, 0x43, 0xa8, 0x9e, 0xb0, 0xb1, 0x1b, 0xb0, 0x11, 0x3e, 0x87, 0x32, 0x1f, 0x87, 0xa4, 0x29, - 0x2d, 0xc5, 0x50, 0x3b, 0x9b, 0xed, 0x62, 0x42, 0x3b, 0x23, 0x9d, 0x8e, 0x43, 0x32, 0x25, 0x0d, - 0x11, 0xca, 0x9f, 0x82, 0xd1, 0x58, 0x2b, 0xb5, 0x14, 0xa3, 0x6e, 0xca, 0xb3, 0xfe, 0x1a, 0xa0, - 0x3b, 0x3c, 0x0f, 0x2c, 0xce, 0x78, 0x12, 0x0b, 0xc6, 0x30, 0x18, 0xa5, 0x05, 0x2b, 0xa6, 0x3c, - 0xa3, 0x06, 0x55, 0x8f, 0xe2, 0x98, 0xd9, 0x24, 0x13, 0x57, 0xcc, 0xfc, 0xaa, 0x7f, 0x2f, 0xc1, - 0xaa, 0xe5, 0x78, 0xa1, 0x4b, 0x26, 0x7d, 0x4e, 0x28, 0xe6, 0xf8, 0x16, 0x56, 0x23, 0x8a, 0xc3, - 0xc0, 0x8f, 0x69, 0x70, 0xb3, 0xce, 0xea, 0x39, 0x5f, 0xdc, 0xf0, 0x71, 0x21, 0x3f, 0x76, 0x2e, - 0xd3, 0x17, 0x2b, 0x53, 0x92, 0xe5, 0x5c, 0x12, 0x6e, 0x43, 0x35, 0x4c, 0x2b, 0x68, 0x4b, 0x2d, - 0xc5, 0xa8, 0x75, 0xd6, 0x17, 0x96, 0x37, 0x73, 0x96, 0xa8, 0x7a, 0xe6, 0xb8, 0xee, 0x20, 0x89, - 0x29, 0xf2, 0x99, 0x47, 0x5a, 0xb9, 0xa5, 0x18, 0xcb, 0x66, 0x5d, 0x80, 0xfd, 0x0c, 0x43, 0x03, - 0x1a, 0x92, 0x14, 0xb0, 0x84, 0x9f, 0x0f, 0xe2, 0x61, 0x10, 0x92, 0x56, 0x91, 0x3c, 0x55, 0xe0, - 0x3d, 0x01, 0x5b, 0x02, 0xc5, 0x1d, 0xb8, 0x37, 0x6d, 0x52, 0xce, 0x4d, 0xab, 0xca, 0x3e, 0xb4, - 0xd9, 0x3e, 0xa6, 0x73, 0x35, 0xd5, 0x89, 0x00, 0x79, 0xd7, 0xbf, 0x82, 0x9a, 0x0f, 0x2e, 0xc5, - 0x8b, 0xa2, 0x94, 0x1b, 0x89, 0x6a, 0xc2, 0xf2, 0x44, 0x4f, 0xba, 0x97, 0xc9, 0x1d, 0x1f, 0x41, - 0xad, 0x28, 0x63, 0x49, 0x86, 0x21, 0x98, 0x48, 0xd0, 0x0f, 0x61, 0xd3, 0xe2, 0x11, 0x31, 0xcf, - 0xf1, 0xed, 0x03, 0x3f, 0x4c, 0xf8, 0x2e, 0x73, 0xdd, 0x7c, 0x89, 0xb7, 0x6d, 0x45, 0x3f, 0x85, - 0xe6, 0xa2, 0x6a, 0x99, 0xb2, 0x57, 0xb0, 0xc1, 0x6c, 0x3b, 0x22, 0x9b, 0x71, 0x1a, 0x0d, 0xb2, - 0x9c, 0x74, 0xbb, 0xa9, 0xcd, 0xd6, 0xa7, 0xe1, 0xac, 0xb4, 0x58, 0xb3, 0x7e, 0x00, 0x98, 0xd7, - 0x38, 0x61, 0x11, 0xf3, 0x88, 0x53, 0x24, 0x1d, 0x5a, 0x48, 0x95, 0x67, 0x21, 0xd7, 0xf1, 0x39, - 0x45, 0x17, 0x4c, 0xec, 0x38, 0xf3, 0x0c, 0xe4, 0x50, 0x3f, 0xd6, 0xbf, 0x95, 0x0a, 0x1d, 0xf6, - 0x12, 0x3e, 0x27, 0xf8, 0x4f, 0x5d, 0xfb, 0x11, 0xd6, 0x26, 0xf9, 0xe1, 0xa4, 0x55, 0xad, 0xd4, - 0x5a, 0x32, 0x6a, 0x9d, 0xd6, 0x6c, 0x95, 0xab, 0x92, 0x4c, 0x8c, 0xae, 0xca, 0xbc, 0xb5, 0xc7, - 0xff, 0x82, 0x29, 0x8f, 0x61, 0x6b, 0xe1, 0x90, 0x7e, 0xd3, 0xa1, 0xcf, 0xde, 0x41, 0xad, 0x30, - 0x33, 0x6c, 0x40, 0x7d, 0xb7, 0x77, 0x74, 0x62, 0x76, 0x2d, 0x6b, 0xe7, 0xfd, 0x61, 0xb7, 0x71, - 0x07, 0x11, 0xd4, 0xfe, 0xf1, 0x0c, 0xa6, 0x20, 0xc0, 0x5d, 0x73, 0xe7, 0x78, 0xaf, 0x77, 0xd4, - 0x28, 0x75, 0x7e, 0x94, 0xa1, 0x76, 0x4a, 0x31, 0xb7, 0x28, 0xba, 0x70, 0x86, 0x84, 0x2f, 0x61, - 0x45, 0x7e, 0x02, 0x45, 0x5b, 0xb8, 0x36, 0xa7, 0x4b, 0x04, 0x9a, 0x8b, 0x40, 0xdc, 0x87, 0x95, - 0xbe, 0xcf, 0xa2, 0x34, 0x6d, 0x6b, 0x96, 0x31, 0xf3, 0xf9, 0x6a, 0x3e, 0x5c, 0x1c, 0xcc, 0x06, - 0xe0, 0xc2, 0xda, 0x82, 0xf9, 0xa0, 0x31, 0x97, 0x74, 0xad, 0xcf, 0x9a, 0x4f, 0x6f, 0xc0, 0x4c, - 0xdf, 0x7a, 0xa1, 0xa0, 0x03, 0x78, 0xf5, 0x4f, 0x85, 0x4f, 0xae, 0x29, 0x31, 0xff, 0x27, 0x6e, - 0x1a, 0xbf, 0x26, 0xa6, 0x4f, 0x19, 0xe2, 0x29, 0x75, 0x3f, 0x71, 0xdd, 0xbd, 0x24, 0x74, 0xe9, - 0xcb, 0x3f, 0xd3, 0x64, 0x28, 0x52, 0x95, 0xfa, 0x81, 0xb9, 0x67, 0xff, 0xe1, 0xa9, 0x4e, 0x1f, - 0x1e, 0xf4, 0x7d, 0xb9, 0x41, 0x8f, 0x7c, 0x4e, 0xa3, 0xdc, 0x45, 0x6f, 0xe0, 0xfe, 0x0c, 0x7e, - 0x3b, 0x37, 0xfd, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x15, 0x62, 0x93, 0xba, 0xaf, 0x07, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto b/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto deleted file mode 100644 index 20d4366b0..000000000 --- a/vendor/google.golang.org/grpc/interop/grpc_testing/test.proto +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2017 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// An integration test service that covers all the method signature permutations -// of unary/streaming requests/responses. -syntax = "proto2"; - -package grpc.testing; - -message Empty {} - -// The type of payload that should be returned. -enum PayloadType { - // Compressable text format. - COMPRESSABLE = 0; - - // Uncompressable binary format. - UNCOMPRESSABLE = 1; - - // Randomly chosen from all other formats defined in this enum. - RANDOM = 2; -} - -// A block of data, to simply increase gRPC message size. -message Payload { - // The type of data in body. - optional PayloadType type = 1; - // Primary contents of payload. - optional bytes body = 2; -} - -// A protobuf representation for grpc status. This is used by test -// clients to specify a status that the server should attempt to return. -message EchoStatus { - optional int32 code = 1; - optional string message = 2; -} - -// Unary request. -message SimpleRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, server randomly chooses one from other formats. - optional PayloadType response_type = 1; - - // Desired payload size in the response from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - optional int32 response_size = 2; - - // Optional input payload sent along with the request. - optional Payload payload = 3; - - // Whether SimpleResponse should include username. - optional bool fill_username = 4; - - // Whether SimpleResponse should include OAuth scope. - optional bool fill_oauth_scope = 5; - - // Whether server should return a given status - optional EchoStatus response_status = 7; -} - -// Unary response, as configured by the request. -message SimpleResponse { - // Payload to increase message size. - optional Payload payload = 1; - - // The user the request came from, for verifying authentication was - // successful when the client expected it. - optional string username = 2; - - // OAuth scope. - optional string oauth_scope = 3; -} - -// Client-streaming request. -message StreamingInputCallRequest { - // Optional input payload sent along with the request. - optional Payload payload = 1; - - // Not expecting any payload from the response. -} - -// Client-streaming response. -message StreamingInputCallResponse { - // Aggregated size of payloads received from the client. - optional int32 aggregated_payload_size = 1; -} - -// Configuration for a particular response. -message ResponseParameters { - // Desired payload sizes in responses from the server. - // If response_type is COMPRESSABLE, this denotes the size before compression. - optional int32 size = 1; - - // Desired interval between consecutive responses in the response stream in - // microseconds. - optional int32 interval_us = 2; -} - -// Server-streaming request. -message StreamingOutputCallRequest { - // Desired payload type in the response from the server. - // If response_type is RANDOM, the payload from each response in the stream - // might be of different types. This is to simulate a mixed type of payload - // stream. - optional PayloadType response_type = 1; - - // Configuration for each expected response message. - repeated ResponseParameters response_parameters = 2; - - // Optional input payload sent along with the request. - optional Payload payload = 3; - - // Whether server should return a given status - optional EchoStatus response_status = 7; -} - -// Server-streaming response, as configured by the request and parameters. -message StreamingOutputCallResponse { - // Payload to increase response size. - optional Payload payload = 1; -} - -// A simple service to test the various types of RPCs and experiment with -// performance with various types of payload. -service TestService { - // One empty request followed by one empty response. - rpc EmptyCall(Empty) returns (Empty); - - // One request followed by one response. - // The server returns the client payload as-is. - rpc UnaryCall(SimpleRequest) returns (SimpleResponse); - - // One request followed by a sequence of responses (streamed download). - // The server returns the payload with client desired type and sizes. - rpc StreamingOutputCall(StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); - - // A sequence of requests followed by one response (streamed upload). - // The server returns the aggregated size of client payload as the result. - rpc StreamingInputCall(stream StreamingInputCallRequest) - returns (StreamingInputCallResponse); - - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - rpc FullDuplexCall(stream StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); - - // A sequence of requests followed by a sequence of responses. - // The server buffers all the client requests and then serves them in order. A - // stream of responses are returned to the client when the server starts with - // first request. - rpc HalfDuplexCall(stream StreamingOutputCallRequest) - returns (stream StreamingOutputCallResponse); -} - -// A simple service NOT implemented at servers so clients can test for -// that case. -service UnimplementedService { - // A call that no server should implement - rpc UnimplementedCall(grpc.testing.Empty) returns (grpc.testing.Empty); -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto deleted file mode 100644 index c52ccc6ab..000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2016 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Service exported by server reflection - -syntax = "proto3"; - -package grpc.reflection.v1alpha; - -service ServerReflection { - // The reflection service is structured as a bidirectional stream, ensuring - // all related requests go to a single server. - rpc ServerReflectionInfo(stream ServerReflectionRequest) - returns (stream ServerReflectionResponse); -} - -// The message sent by the client when calling ServerReflectionInfo method. -message ServerReflectionRequest { - string host = 1; - // To use reflection service, the client should set one of the following - // fields in message_request. The server distinguishes requests by their - // defined field and then handles them using corresponding methods. - oneof message_request { - // Find a proto file by the file name. - string file_by_filename = 3; - - // Find the proto file that declares the given fully-qualified symbol name. - // This field should be a fully-qualified symbol name - // (e.g. .[.] or .). - string file_containing_symbol = 4; - - // Find the proto file which defines an extension extending the given - // message type with the given field number. - ExtensionRequest file_containing_extension = 5; - - // Finds the tag numbers used by all known extensions of extendee_type, and - // appends them to ExtensionNumberResponse in an undefined order. - // Its corresponding method is best-effort: it's not guaranteed that the - // reflection service will implement this method, and it's not guaranteed - // that this method will provide all extensions. Returns - // StatusCode::UNIMPLEMENTED if it's not implemented. - // This field should be a fully-qualified type name. The format is - // . - string all_extension_numbers_of_type = 6; - - // List the full names of registered services. The content will not be - // checked. - string list_services = 7; - } -} - -// The type name and extension number sent by the client when requesting -// file_containing_extension. -message ExtensionRequest { - // Fully-qualified type name. The format should be . - string containing_type = 1; - int32 extension_number = 2; -} - -// The message sent by the server to answer ServerReflectionInfo method. -message ServerReflectionResponse { - string valid_host = 1; - ServerReflectionRequest original_request = 2; - // The server set one of the following fields according to the message_request - // in the request. - oneof message_response { - // This message is used to answer file_by_filename, file_containing_symbol, - // file_containing_extension requests with transitive dependencies. As - // the repeated label is not allowed in oneof fields, we use a - // FileDescriptorResponse message to encapsulate the repeated fields. - // The reflection service is allowed to avoid sending FileDescriptorProtos - // that were previously sent in response to earlier requests in the stream. - FileDescriptorResponse file_descriptor_response = 4; - - // This message is used to answer all_extension_numbers_of_type requst. - ExtensionNumberResponse all_extension_numbers_response = 5; - - // This message is used to answer list_services request. - ListServiceResponse list_services_response = 6; - - // This message is used when an error occurs. - ErrorResponse error_response = 7; - } -} - -// Serialized FileDescriptorProto messages sent by the server answering -// a file_by_filename, file_containing_symbol, or file_containing_extension -// request. -message FileDescriptorResponse { - // Serialized FileDescriptorProto messages. We avoid taking a dependency on - // descriptor.proto, which uses proto2 only features, by making them opaque - // bytes instead. - repeated bytes file_descriptor_proto = 1; -} - -// A list of extension numbers sent by the server answering -// all_extension_numbers_of_type request. -message ExtensionNumberResponse { - // Full name of the base type, including the package name. The format - // is . - string base_type_name = 1; - repeated int32 extension_number = 2; -} - -// A list of ServiceResponse sent by the server answering list_services request. -message ListServiceResponse { - // The information of each service may be expanded in the future, so we use - // ServiceResponse message to encapsulate it. - repeated ServiceResponse service = 1; -} - -// The information of a single service used by ListServiceResponse to answer -// list_services request. -message ServiceResponse { - // Full name of a registered service, including its package name. The format - // is . - string name = 1; -} - -// The error code and error message sent by the server when an error occurs. -message ErrorResponse { - // This field uses the error codes defined in grpc::StatusCode. - int32 error_code = 1; - string error_message = 2; -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go deleted file mode 100644 index 5b0161885..000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.pb.go +++ /dev/null @@ -1,77 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: proto2.proto - -/* -Package grpc_testing is a generated protocol buffer package. - -It is generated from these files: - proto2.proto - proto2_ext.proto - proto2_ext2.proto - test.proto - -It has these top-level messages: - ToBeExtended - Extension - AnotherExtension - SearchResponse - SearchRequest -*/ -package grpc_testing - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type ToBeExtended struct { - Foo *int32 `protobuf:"varint,1,req,name=foo" json:"foo,omitempty"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ToBeExtended) Reset() { *m = ToBeExtended{} } -func (m *ToBeExtended) String() string { return proto.CompactTextString(m) } -func (*ToBeExtended) ProtoMessage() {} -func (*ToBeExtended) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -var extRange_ToBeExtended = []proto.ExtensionRange{ - {10, 30}, -} - -func (*ToBeExtended) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ToBeExtended -} - -func (m *ToBeExtended) GetFoo() int32 { - if m != nil && m.Foo != nil { - return *m.Foo - } - return 0 -} - -func init() { - proto.RegisterType((*ToBeExtended)(nil), "grpc.testing.ToBeExtended") -} - -func init() { proto.RegisterFile("proto2.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 86 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x28, 0xca, 0x2f, - 0xc9, 0x37, 0xd2, 0x03, 0x53, 0x42, 0x3c, 0xe9, 0x45, 0x05, 0xc9, 0x7a, 0x25, 0xa9, 0xc5, 0x25, - 0x99, 0x79, 0xe9, 0x4a, 0x6a, 0x5c, 0x3c, 0x21, 0xf9, 0x4e, 0xa9, 0xae, 0x15, 0x25, 0xa9, 0x79, - 0x29, 0xa9, 0x29, 0x42, 0x02, 0x5c, 0xcc, 0x69, 0xf9, 0xf9, 0x12, 0x8c, 0x0a, 0x4c, 0x1a, 0xac, - 0x41, 0x20, 0xa6, 0x16, 0x0b, 0x07, 0x97, 0x80, 0x3c, 0x20, 0x00, 0x00, 0xff, 0xff, 0x74, 0x86, - 0x9c, 0x08, 0x44, 0x00, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto deleted file mode 100644 index a675d143d..000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2.proto +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2017 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto2"; - -package grpc.testing; - -message ToBeExtended { - required int32 foo = 1; - extensions 10 to 30; -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go deleted file mode 100644 index 9ae4f07af..000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.pb.go +++ /dev/null @@ -1,82 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: proto2_ext.proto - -package grpc_testing - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type Extension struct { - Whatzit *int32 `protobuf:"varint,1,opt,name=whatzit" json:"whatzit,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Extension) Reset() { *m = Extension{} } -func (m *Extension) String() string { return proto.CompactTextString(m) } -func (*Extension) ProtoMessage() {} -func (*Extension) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } - -func (m *Extension) GetWhatzit() int32 { - if m != nil && m.Whatzit != nil { - return *m.Whatzit - } - return 0 -} - -var E_Foo = &proto.ExtensionDesc{ - ExtendedType: (*ToBeExtended)(nil), - ExtensionType: (*int32)(nil), - Field: 13, - Name: "grpc.testing.foo", - Tag: "varint,13,opt,name=foo", - Filename: "proto2_ext.proto", -} - -var E_Bar = &proto.ExtensionDesc{ - ExtendedType: (*ToBeExtended)(nil), - ExtensionType: (*Extension)(nil), - Field: 17, - Name: "grpc.testing.bar", - Tag: "bytes,17,opt,name=bar", - Filename: "proto2_ext.proto", -} - -var E_Baz = &proto.ExtensionDesc{ - ExtendedType: (*ToBeExtended)(nil), - ExtensionType: (*SearchRequest)(nil), - Field: 19, - Name: "grpc.testing.baz", - Tag: "bytes,19,opt,name=baz", - Filename: "proto2_ext.proto", -} - -func init() { - proto.RegisterType((*Extension)(nil), "grpc.testing.Extension") - proto.RegisterExtension(E_Foo) - proto.RegisterExtension(E_Bar) - proto.RegisterExtension(E_Baz) -} - -func init() { proto.RegisterFile("proto2_ext.proto", fileDescriptor1) } - -var fileDescriptor1 = []byte{ - // 179 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x28, 0x28, 0xca, 0x2f, - 0xc9, 0x37, 0x8a, 0x4f, 0xad, 0x28, 0xd1, 0x03, 0x33, 0x85, 0x78, 0xd2, 0x8b, 0x0a, 0x92, 0xf5, - 0x4a, 0x52, 0x8b, 0x4b, 0x32, 0xf3, 0xd2, 0xa5, 0x78, 0x20, 0xf2, 0x10, 0x39, 0x29, 0x2e, 0x90, - 0x30, 0x84, 0xad, 0xa4, 0xca, 0xc5, 0xe9, 0x5a, 0x51, 0x92, 0x9a, 0x57, 0x9c, 0x99, 0x9f, 0x27, - 0x24, 0xc1, 0xc5, 0x5e, 0x9e, 0x91, 0x58, 0x52, 0x95, 0x59, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, - 0x1a, 0x04, 0xe3, 0x5a, 0xe9, 0x70, 0x31, 0xa7, 0xe5, 0xe7, 0x0b, 0x49, 0xe9, 0x21, 0x1b, 0xab, - 0x17, 0x92, 0xef, 0x94, 0x0a, 0xd6, 0x9d, 0x92, 0x9a, 0x22, 0xc1, 0x0b, 0xd6, 0x01, 0x52, 0x66, - 0xe5, 0xca, 0xc5, 0x9c, 0x94, 0x58, 0x84, 0x57, 0xb5, 0xa0, 0x02, 0xa3, 0x06, 0xb7, 0x91, 0x38, - 0xaa, 0x0a, 0xb8, 0x4b, 0x82, 0x40, 0xfa, 0xad, 0x3c, 0x41, 0xc6, 0x54, 0xe1, 0x35, 0x46, 0x18, - 0x6c, 0x8c, 0x34, 0xaa, 0x8a, 0xe0, 0xd4, 0xc4, 0xa2, 0xe4, 0x8c, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, - 0xe2, 0x12, 0x90, 0x51, 0x55, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x71, 0x6b, 0x94, 0x9f, 0x21, - 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto deleted file mode 100644 index a4942e481..000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext.proto +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2017 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto2"; - -package grpc.testing; - -import "proto2.proto"; -import "test.proto"; - -extend ToBeExtended { - optional int32 foo = 13; - optional Extension bar = 17; - optional SearchRequest baz = 19; -} - -message Extension { - optional int32 whatzit = 1; -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.pb.go deleted file mode 100644 index 26ec982aa..000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.pb.go +++ /dev/null @@ -1,71 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: proto2_ext2.proto - -package grpc_testing - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type AnotherExtension struct { - Whatchamacallit *int32 `protobuf:"varint,1,opt,name=whatchamacallit" json:"whatchamacallit,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *AnotherExtension) Reset() { *m = AnotherExtension{} } -func (m *AnotherExtension) String() string { return proto.CompactTextString(m) } -func (*AnotherExtension) ProtoMessage() {} -func (*AnotherExtension) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } - -func (m *AnotherExtension) GetWhatchamacallit() int32 { - if m != nil && m.Whatchamacallit != nil { - return *m.Whatchamacallit - } - return 0 -} - -var E_Frob = &proto.ExtensionDesc{ - ExtendedType: (*ToBeExtended)(nil), - ExtensionType: (*string)(nil), - Field: 23, - Name: "grpc.testing.frob", - Tag: "bytes,23,opt,name=frob", - Filename: "proto2_ext2.proto", -} - -var E_Nitz = &proto.ExtensionDesc{ - ExtendedType: (*ToBeExtended)(nil), - ExtensionType: (*AnotherExtension)(nil), - Field: 29, - Name: "grpc.testing.nitz", - Tag: "bytes,29,opt,name=nitz", - Filename: "proto2_ext2.proto", -} - -func init() { - proto.RegisterType((*AnotherExtension)(nil), "grpc.testing.AnotherExtension") - proto.RegisterExtension(E_Frob) - proto.RegisterExtension(E_Nitz) -} - -func init() { proto.RegisterFile("proto2_ext2.proto", fileDescriptor2) } - -var fileDescriptor2 = []byte{ - // 165 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2c, 0x28, 0xca, 0x2f, - 0xc9, 0x37, 0x8a, 0x4f, 0xad, 0x28, 0x31, 0xd2, 0x03, 0xb3, 0x85, 0x78, 0xd2, 0x8b, 0x0a, 0x92, - 0xf5, 0x4a, 0x52, 0x8b, 0x4b, 0x32, 0xf3, 0xd2, 0xa5, 0x78, 0x20, 0x0a, 0x20, 0x72, 0x4a, 0x36, - 0x5c, 0x02, 0x8e, 0x79, 0xf9, 0x25, 0x19, 0xa9, 0x45, 0xae, 0x15, 0x25, 0xa9, 0x79, 0xc5, 0x99, - 0xf9, 0x79, 0x42, 0x1a, 0x5c, 0xfc, 0xe5, 0x19, 0x89, 0x25, 0xc9, 0x19, 0x89, 0xb9, 0x89, 0xc9, - 0x89, 0x39, 0x39, 0x99, 0x25, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0xe8, 0xc2, 0x56, 0x7a, - 0x5c, 0x2c, 0x69, 0x45, 0xf9, 0x49, 0x42, 0x52, 0x7a, 0xc8, 0x56, 0xe8, 0x85, 0xe4, 0x3b, 0xa5, - 0x82, 0x8d, 0x4b, 0x49, 0x4d, 0x91, 0x10, 0x57, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xab, 0xb3, 0xf2, - 0xe3, 0x62, 0xc9, 0xcb, 0x2c, 0xa9, 0xc2, 0xab, 0x5e, 0x56, 0x81, 0x51, 0x83, 0xdb, 0x48, 0x0e, - 0x55, 0x05, 0xba, 0x1b, 0x83, 0xc0, 0xe6, 0x00, 0x02, 0x00, 0x00, 0xff, 0xff, 0xf0, 0x7e, 0x0d, - 0x26, 0xed, 0x00, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.proto deleted file mode 100644 index d91ba0061..000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/proto2_ext2.proto +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2017 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto2"; - -package grpc.testing; - -import "proto2.proto"; - -extend ToBeExtended { - optional string frob = 23; - optional AnotherExtension nitz = 29; -} - -message AnotherExtension { - optional int32 whatchamacallit = 1; -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go deleted file mode 100644 index 62f71ff15..000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/test.pb.go +++ /dev/null @@ -1,247 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: test.proto - -package grpc_testing - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type SearchResponse struct { - Results []*SearchResponse_Result `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` -} - -func (m *SearchResponse) Reset() { *m = SearchResponse{} } -func (m *SearchResponse) String() string { return proto.CompactTextString(m) } -func (*SearchResponse) ProtoMessage() {} -func (*SearchResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } - -func (m *SearchResponse) GetResults() []*SearchResponse_Result { - if m != nil { - return m.Results - } - return nil -} - -type SearchResponse_Result struct { - Url string `protobuf:"bytes,1,opt,name=url" json:"url,omitempty"` - Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` - Snippets []string `protobuf:"bytes,3,rep,name=snippets" json:"snippets,omitempty"` -} - -func (m *SearchResponse_Result) Reset() { *m = SearchResponse_Result{} } -func (m *SearchResponse_Result) String() string { return proto.CompactTextString(m) } -func (*SearchResponse_Result) ProtoMessage() {} -func (*SearchResponse_Result) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 0} } - -func (m *SearchResponse_Result) GetUrl() string { - if m != nil { - return m.Url - } - return "" -} - -func (m *SearchResponse_Result) GetTitle() string { - if m != nil { - return m.Title - } - return "" -} - -func (m *SearchResponse_Result) GetSnippets() []string { - if m != nil { - return m.Snippets - } - return nil -} - -type SearchRequest struct { - Query string `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` -} - -func (m *SearchRequest) Reset() { *m = SearchRequest{} } -func (m *SearchRequest) String() string { return proto.CompactTextString(m) } -func (*SearchRequest) ProtoMessage() {} -func (*SearchRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } - -func (m *SearchRequest) GetQuery() string { - if m != nil { - return m.Query - } - return "" -} - -func init() { - proto.RegisterType((*SearchResponse)(nil), "grpc.testing.SearchResponse") - proto.RegisterType((*SearchResponse_Result)(nil), "grpc.testing.SearchResponse.Result") - proto.RegisterType((*SearchRequest)(nil), "grpc.testing.SearchRequest") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for SearchService service - -type SearchServiceClient interface { - Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) - StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchService_StreamingSearchClient, error) -} - -type searchServiceClient struct { - cc *grpc.ClientConn -} - -func NewSearchServiceClient(cc *grpc.ClientConn) SearchServiceClient { - return &searchServiceClient{cc} -} - -func (c *searchServiceClient) Search(ctx context.Context, in *SearchRequest, opts ...grpc.CallOption) (*SearchResponse, error) { - out := new(SearchResponse) - err := grpc.Invoke(ctx, "/grpc.testing.SearchService/Search", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *searchServiceClient) StreamingSearch(ctx context.Context, opts ...grpc.CallOption) (SearchService_StreamingSearchClient, error) { - stream, err := grpc.NewClientStream(ctx, &_SearchService_serviceDesc.Streams[0], c.cc, "/grpc.testing.SearchService/StreamingSearch", opts...) - if err != nil { - return nil, err - } - x := &searchServiceStreamingSearchClient{stream} - return x, nil -} - -type SearchService_StreamingSearchClient interface { - Send(*SearchRequest) error - Recv() (*SearchResponse, error) - grpc.ClientStream -} - -type searchServiceStreamingSearchClient struct { - grpc.ClientStream -} - -func (x *searchServiceStreamingSearchClient) Send(m *SearchRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *searchServiceStreamingSearchClient) Recv() (*SearchResponse, error) { - m := new(SearchResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for SearchService service - -type SearchServiceServer interface { - Search(context.Context, *SearchRequest) (*SearchResponse, error) - StreamingSearch(SearchService_StreamingSearchServer) error -} - -func RegisterSearchServiceServer(s *grpc.Server, srv SearchServiceServer) { - s.RegisterService(&_SearchService_serviceDesc, srv) -} - -func _SearchService_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SearchRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SearchServiceServer).Search(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.testing.SearchService/Search", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SearchServiceServer).Search(ctx, req.(*SearchRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SearchService_StreamingSearch_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SearchServiceServer).StreamingSearch(&searchServiceStreamingSearchServer{stream}) -} - -type SearchService_StreamingSearchServer interface { - Send(*SearchResponse) error - Recv() (*SearchRequest, error) - grpc.ServerStream -} - -type searchServiceStreamingSearchServer struct { - grpc.ServerStream -} - -func (x *searchServiceStreamingSearchServer) Send(m *SearchResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *searchServiceStreamingSearchServer) Recv() (*SearchRequest, error) { - m := new(SearchRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _SearchService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.testing.SearchService", - HandlerType: (*SearchServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Search", - Handler: _SearchService_Search_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamingSearch", - Handler: _SearchService_StreamingSearch_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "test.proto", -} - -func init() { proto.RegisterFile("test.proto", fileDescriptor3) } - -var fileDescriptor3 = []byte{ - // 231 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x91, 0xbd, 0x4a, 0xc5, 0x40, - 0x10, 0x85, 0x59, 0x83, 0xd1, 0x3b, 0xfe, 0x32, 0x58, 0x84, 0x68, 0x11, 0xae, 0x08, 0xa9, 0x16, - 0xb9, 0xd6, 0x56, 0xb6, 0x16, 0xb2, 0x79, 0x82, 0x6b, 0x18, 0xe2, 0x42, 0x4c, 0x36, 0x33, 0x13, - 0xc1, 0x87, 0xb1, 0xf5, 0x39, 0x25, 0x59, 0x23, 0x0a, 0x62, 0x63, 0xb7, 0xe7, 0xe3, 0xcc, 0xb7, - 0xbb, 0x0c, 0x80, 0x92, 0xa8, 0x0d, 0xdc, 0x6b, 0x8f, 0x87, 0x0d, 0x87, 0xda, 0x4e, 0xc0, 0x77, - 0xcd, 0xfa, 0xcd, 0xc0, 0x71, 0x45, 0x5b, 0xae, 0x9f, 0x1c, 0x49, 0xe8, 0x3b, 0x21, 0xbc, 0x85, - 0x3d, 0x26, 0x19, 0x5b, 0x95, 0xcc, 0x14, 0x49, 0x79, 0xb0, 0xb9, 0xb4, 0xdf, 0x47, 0xec, 0xcf, - 0xba, 0x75, 0x73, 0xd7, 0x2d, 0x33, 0xf9, 0x3d, 0xa4, 0x11, 0xe1, 0x29, 0x24, 0x23, 0xb7, 0x99, - 0x29, 0x4c, 0xb9, 0x72, 0xd3, 0x11, 0xcf, 0x60, 0x57, 0xbd, 0xb6, 0x94, 0xed, 0xcc, 0x2c, 0x06, - 0xcc, 0x61, 0x5f, 0x3a, 0x1f, 0x02, 0xa9, 0x64, 0x49, 0x91, 0x94, 0x2b, 0xf7, 0x95, 0xd7, 0x57, - 0x70, 0xb4, 0xdc, 0x37, 0x8c, 0x24, 0x3a, 0x29, 0x86, 0x91, 0xf8, 0xf5, 0x53, 0x1b, 0xc3, 0xe6, - 0xdd, 0x2c, 0xbd, 0x8a, 0xf8, 0xc5, 0xd7, 0x84, 0x77, 0x90, 0x46, 0x80, 0xe7, 0xbf, 0x3f, 0x7f, - 0xd6, 0xe5, 0x17, 0x7f, 0xfd, 0x0d, 0x1f, 0xe0, 0xa4, 0x52, 0xa6, 0xed, 0xb3, 0xef, 0x9a, 0x7f, - 0xdb, 0x4a, 0x73, 0x6d, 0x1e, 0xd3, 0x79, 0x09, 0x37, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x20, - 0xd6, 0x09, 0xb8, 0x92, 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testing/test.proto b/vendor/google.golang.org/grpc/reflection/grpc_testing/test.proto deleted file mode 100644 index cae3f01a0..000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_testing/test.proto +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2017 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package grpc.testing; - -message SearchResponse { - message Result { - string url = 1; - string title = 2; - repeated string snippets = 3; - } - repeated Result results = 1; -} - -message SearchRequest { - string query = 1; -} - -service SearchService { - rpc Search(SearchRequest) returns (SearchResponse); - rpc StreamingSearch(stream SearchRequest) returns (stream SearchResponse); -} diff --git a/vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.proto b/vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.proto deleted file mode 100644 index 1e175193e..000000000 --- a/vendor/google.golang.org/grpc/reflection/grpc_testingv3/testv3.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package grpc.testingv3; - -message SearchResponseV3 { - message Result { - string url = 1; - string title = 2; - repeated string snippets = 3; - } - repeated Result results = 1; -} - -message SearchRequestV3 { - string query = 1; -} - -service SearchServiceV3 { - rpc Search(SearchRequestV3) returns (SearchResponseV3); - rpc StreamingSearch(stream SearchRequestV3) returns (stream SearchResponseV3); -} diff --git a/vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go b/vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go deleted file mode 100644 index c0c14a24b..000000000 --- a/vendor/google.golang.org/grpc/stats/grpc_testing/test.pb.go +++ /dev/null @@ -1,369 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_testing/test.proto - -/* -Package grpc_testing is a generated protocol buffer package. - -It is generated from these files: - grpc_testing/test.proto - -It has these top-level messages: - SimpleRequest - SimpleResponse -*/ -package grpc_testing - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type SimpleRequest struct { - Id int32 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"` -} - -func (m *SimpleRequest) Reset() { *m = SimpleRequest{} } -func (m *SimpleRequest) String() string { return proto.CompactTextString(m) } -func (*SimpleRequest) ProtoMessage() {} -func (*SimpleRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *SimpleRequest) GetId() int32 { - if m != nil { - return m.Id - } - return 0 -} - -type SimpleResponse struct { - Id int32 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` -} - -func (m *SimpleResponse) Reset() { *m = SimpleResponse{} } -func (m *SimpleResponse) String() string { return proto.CompactTextString(m) } -func (*SimpleResponse) ProtoMessage() {} -func (*SimpleResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *SimpleResponse) GetId() int32 { - if m != nil { - return m.Id - } - return 0 -} - -func init() { - proto.RegisterType((*SimpleRequest)(nil), "grpc.testing.SimpleRequest") - proto.RegisterType((*SimpleResponse)(nil), "grpc.testing.SimpleResponse") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for TestService service - -type TestServiceClient interface { - // One request followed by one response. - // The server returns the client id as-is. - UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) - // Client stream - ClientStreamCall(ctx context.Context, opts ...grpc.CallOption) (TestService_ClientStreamCallClient, error) - // Server stream - ServerStreamCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (TestService_ServerStreamCallClient, error) -} - -type testServiceClient struct { - cc *grpc.ClientConn -} - -func NewTestServiceClient(cc *grpc.ClientConn) TestServiceClient { - return &testServiceClient{cc} -} - -func (c *testServiceClient) UnaryCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (*SimpleResponse, error) { - out := new(SimpleResponse) - err := grpc.Invoke(ctx, "/grpc.testing.TestService/UnaryCall", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *testServiceClient) FullDuplexCall(ctx context.Context, opts ...grpc.CallOption) (TestService_FullDuplexCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[0], c.cc, "/grpc.testing.TestService/FullDuplexCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceFullDuplexCallClient{stream} - return x, nil -} - -type TestService_FullDuplexCallClient interface { - Send(*SimpleRequest) error - Recv() (*SimpleResponse, error) - grpc.ClientStream -} - -type testServiceFullDuplexCallClient struct { - grpc.ClientStream -} - -func (x *testServiceFullDuplexCallClient) Send(m *SimpleRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceFullDuplexCallClient) Recv() (*SimpleResponse, error) { - m := new(SimpleResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) ClientStreamCall(ctx context.Context, opts ...grpc.CallOption) (TestService_ClientStreamCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[1], c.cc, "/grpc.testing.TestService/ClientStreamCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceClientStreamCallClient{stream} - return x, nil -} - -type TestService_ClientStreamCallClient interface { - Send(*SimpleRequest) error - CloseAndRecv() (*SimpleResponse, error) - grpc.ClientStream -} - -type testServiceClientStreamCallClient struct { - grpc.ClientStream -} - -func (x *testServiceClientStreamCallClient) Send(m *SimpleRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *testServiceClientStreamCallClient) CloseAndRecv() (*SimpleResponse, error) { - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - m := new(SimpleResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *testServiceClient) ServerStreamCall(ctx context.Context, in *SimpleRequest, opts ...grpc.CallOption) (TestService_ServerStreamCallClient, error) { - stream, err := grpc.NewClientStream(ctx, &_TestService_serviceDesc.Streams[2], c.cc, "/grpc.testing.TestService/ServerStreamCall", opts...) - if err != nil { - return nil, err - } - x := &testServiceServerStreamCallClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type TestService_ServerStreamCallClient interface { - Recv() (*SimpleResponse, error) - grpc.ClientStream -} - -type testServiceServerStreamCallClient struct { - grpc.ClientStream -} - -func (x *testServiceServerStreamCallClient) Recv() (*SimpleResponse, error) { - m := new(SimpleResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// Server API for TestService service - -type TestServiceServer interface { - // One request followed by one response. - // The server returns the client id as-is. - UnaryCall(context.Context, *SimpleRequest) (*SimpleResponse, error) - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - FullDuplexCall(TestService_FullDuplexCallServer) error - // Client stream - ClientStreamCall(TestService_ClientStreamCallServer) error - // Server stream - ServerStreamCall(*SimpleRequest, TestService_ServerStreamCallServer) error -} - -func RegisterTestServiceServer(s *grpc.Server, srv TestServiceServer) { - s.RegisterService(&_TestService_serviceDesc, srv) -} - -func _TestService_UnaryCall_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SimpleRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TestServiceServer).UnaryCall(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.testing.TestService/UnaryCall", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TestServiceServer).UnaryCall(ctx, req.(*SimpleRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _TestService_FullDuplexCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).FullDuplexCall(&testServiceFullDuplexCallServer{stream}) -} - -type TestService_FullDuplexCallServer interface { - Send(*SimpleResponse) error - Recv() (*SimpleRequest, error) - grpc.ServerStream -} - -type testServiceFullDuplexCallServer struct { - grpc.ServerStream -} - -func (x *testServiceFullDuplexCallServer) Send(m *SimpleResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceFullDuplexCallServer) Recv() (*SimpleRequest, error) { - m := new(SimpleRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TestService_ClientStreamCall_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(TestServiceServer).ClientStreamCall(&testServiceClientStreamCallServer{stream}) -} - -type TestService_ClientStreamCallServer interface { - SendAndClose(*SimpleResponse) error - Recv() (*SimpleRequest, error) - grpc.ServerStream -} - -type testServiceClientStreamCallServer struct { - grpc.ServerStream -} - -func (x *testServiceClientStreamCallServer) SendAndClose(m *SimpleResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *testServiceClientStreamCallServer) Recv() (*SimpleRequest, error) { - m := new(SimpleRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _TestService_ServerStreamCall_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SimpleRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(TestServiceServer).ServerStreamCall(m, &testServiceServerStreamCallServer{stream}) -} - -type TestService_ServerStreamCallServer interface { - Send(*SimpleResponse) error - grpc.ServerStream -} - -type testServiceServerStreamCallServer struct { - grpc.ServerStream -} - -func (x *testServiceServerStreamCallServer) Send(m *SimpleResponse) error { - return x.ServerStream.SendMsg(m) -} - -var _TestService_serviceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.testing.TestService", - HandlerType: (*TestServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "UnaryCall", - Handler: _TestService_UnaryCall_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "FullDuplexCall", - Handler: _TestService_FullDuplexCall_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "ClientStreamCall", - Handler: _TestService_ClientStreamCall_Handler, - ClientStreams: true, - }, - { - StreamName: "ServerStreamCall", - Handler: _TestService_ServerStreamCall_Handler, - ServerStreams: true, - }, - }, - Metadata: "grpc_testing/test.proto", -} - -func init() { proto.RegisterFile("grpc_testing/test.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 202 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0x2f, 0x2a, 0x48, - 0x8e, 0x2f, 0x49, 0x2d, 0x2e, 0xc9, 0xcc, 0x4b, 0xd7, 0x07, 0xd1, 0x7a, 0x05, 0x45, 0xf9, 0x25, - 0xf9, 0x42, 0x3c, 0x20, 0x09, 0x3d, 0xa8, 0x84, 0x92, 0x3c, 0x17, 0x6f, 0x70, 0x66, 0x6e, 0x41, - 0x4e, 0x6a, 0x50, 0x6a, 0x61, 0x69, 0x6a, 0x71, 0x89, 0x10, 0x1f, 0x17, 0x53, 0x66, 0x8a, 0x04, - 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x53, 0x66, 0x8a, 0x92, 0x02, 0x17, 0x1f, 0x4c, 0x41, 0x71, - 0x41, 0x7e, 0x5e, 0x71, 0x2a, 0x54, 0x05, 0x33, 0x4c, 0x85, 0xd1, 0x09, 0x26, 0x2e, 0xee, 0x90, - 0xd4, 0xe2, 0x92, 0xe0, 0xd4, 0xa2, 0xb2, 0xcc, 0xe4, 0x54, 0x21, 0x37, 0x2e, 0xce, 0xd0, 0xbc, - 0xc4, 0xa2, 0x4a, 0xe7, 0xc4, 0x9c, 0x1c, 0x21, 0x69, 0x3d, 0x64, 0xeb, 0xf4, 0x50, 0xec, 0x92, - 0x92, 0xc1, 0x2e, 0x09, 0xb5, 0xc7, 0x9f, 0x8b, 0xcf, 0xad, 0x34, 0x27, 0xc7, 0xa5, 0xb4, 0x20, - 0x27, 0xb5, 0x82, 0x42, 0xc3, 0x34, 0x18, 0x0d, 0x18, 0x85, 0xfc, 0xb9, 0x04, 0x9c, 0x73, 0x32, - 0x53, 0xf3, 0x4a, 0x82, 0x4b, 0x8a, 0x52, 0x13, 0x73, 0x29, 0x36, 0x12, 0x64, 0x20, 0xc8, 0xd3, - 0xa9, 0x45, 0x54, 0x31, 0xd0, 0x80, 0x31, 0x89, 0x0d, 0x1c, 0x45, 0xc6, 0x80, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x4c, 0x43, 0x27, 0x67, 0xbd, 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/stats/grpc_testing/test.proto b/vendor/google.golang.org/grpc/stats/grpc_testing/test.proto deleted file mode 100644 index b49a0d5a7..000000000 --- a/vendor/google.golang.org/grpc/stats/grpc_testing/test.proto +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2017 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package grpc.testing; - -message SimpleRequest { - int32 id = 2; -} - -message SimpleResponse { - int32 id = 3; -} - -// A simple test service. -service TestService { - // One request followed by one response. - // The server returns the client id as-is. - rpc UnaryCall(SimpleRequest) returns (SimpleResponse); - - // A sequence of requests with each request served by the server immediately. - // As one request could lead to multiple responses, this interface - // demonstrates the idea of full duplexing. - rpc FullDuplexCall(stream SimpleRequest) returns (stream SimpleResponse); - - // Client stream - rpc ClientStreamCall(stream SimpleRequest) returns (SimpleResponse); - - // Server stream - rpc ServerStreamCall(SimpleRequest) returns (stream SimpleResponse); -} diff --git a/vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go b/vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go deleted file mode 100644 index 9401b1dd8..000000000 --- a/vendor/google.golang.org/grpc/test/codec_perf/perf.pb.go +++ /dev/null @@ -1,62 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: codec_perf/perf.proto - -/* -Package codec_perf is a generated protocol buffer package. - -It is generated from these files: - codec_perf/perf.proto - -It has these top-level messages: - Buffer -*/ -package codec_perf - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Buffer is a message that contains a body of bytes that is used to exercise -// encoding and decoding overheads. -type Buffer struct { - Body []byte `protobuf:"bytes,1,opt,name=body" json:"body,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Buffer) Reset() { *m = Buffer{} } -func (m *Buffer) String() string { return proto.CompactTextString(m) } -func (*Buffer) ProtoMessage() {} -func (*Buffer) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -func (m *Buffer) GetBody() []byte { - if m != nil { - return m.Body - } - return nil -} - -func init() { - proto.RegisterType((*Buffer)(nil), "codec.perf.Buffer") -} - -func init() { proto.RegisterFile("codec_perf/perf.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 78 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0xce, 0x4f, 0x49, - 0x4d, 0x8e, 0x2f, 0x48, 0x2d, 0x4a, 0xd3, 0x07, 0x11, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, - 0x5c, 0x60, 0x61, 0x3d, 0x90, 0x88, 0x92, 0x0c, 0x17, 0x9b, 0x53, 0x69, 0x5a, 0x5a, 0x6a, 0x91, - 0x90, 0x10, 0x17, 0x4b, 0x52, 0x7e, 0x4a, 0xa5, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x98, - 0x0d, 0x08, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x93, 0x4c, 0x5f, 0x41, 0x00, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/grpc/test/codec_perf/perf.proto b/vendor/google.golang.org/grpc/test/codec_perf/perf.proto deleted file mode 100644 index f42dbcafe..000000000 --- a/vendor/google.golang.org/grpc/test/codec_perf/perf.proto +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2017 gRPC authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Messages used for performance tests that may not reference grpc directly for -// reasons of import cycles. -syntax = "proto2"; - -package codec.perf; - -// Buffer is a message that contains a body of bytes that is used to exercise -// encoding and decoding overheads. -message Buffer { - optional bytes body = 1; -} diff --git a/vendor/google.golang.org/grpc/testdata/ca.pem b/vendor/google.golang.org/grpc/testdata/ca.pem deleted file mode 100644 index 6c8511a73..000000000 --- a/vendor/google.golang.org/grpc/testdata/ca.pem +++ /dev/null @@ -1,15 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICSjCCAbOgAwIBAgIJAJHGGR4dGioHMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQxDzANBgNVBAMTBnRlc3RjYTAeFw0xNDExMTEyMjMxMjla -Fw0yNDExMDgyMjMxMjlaMFYxCzAJBgNVBAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0 -YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQxDzANBgNVBAMT -BnRlc3RjYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwEDfBV5MYdlHVHJ7 -+L4nxrZy7mBfAVXpOc5vMYztssUI7mL2/iYujiIXM+weZYNTEpLdjyJdu7R5gGUu -g1jSVK/EPHfc74O7AyZU34PNIP4Sh33N+/A5YexrNgJlPY+E3GdVYi4ldWJjgkAd -Qah2PH5ACLrIIC6tRka9hcaBlIECAwEAAaMgMB4wDAYDVR0TBAUwAwEB/zAOBgNV -HQ8BAf8EBAMCAgQwDQYJKoZIhvcNAQELBQADgYEAHzC7jdYlzAVmddi/gdAeKPau -sPBG/C2HCWqHzpCUHcKuvMzDVkY/MP2o6JIW2DBbY64bO/FceExhjcykgaYtCH/m -oIU63+CFOTtR7otyQAWHqXa7q4SbCDlG7DyRFxqG0txPtGvy12lgldA2+RgcigQG -Dfcog5wrJytaQ6UA0wE= ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/testdata/server1.key b/vendor/google.golang.org/grpc/testdata/server1.key deleted file mode 100644 index 143a5b876..000000000 --- a/vendor/google.golang.org/grpc/testdata/server1.key +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIICdQIBADANBgkqhkiG9w0BAQEFAASCAl8wggJbAgEAAoGBAOHDFScoLCVJpYDD -M4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1BgzkWF+slf -3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd9N8YwbBY -AckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAECgYAn7qGnM2vbjJNBm0VZCkOkTIWm -V10okw7EPJrdL2mkre9NasghNXbE1y5zDshx5Nt3KsazKOxTT8d0Jwh/3KbaN+YY -tTCbKGW0pXDRBhwUHRcuRzScjli8Rih5UOCiZkhefUTcRb6xIhZJuQy71tjaSy0p -dHZRmYyBYO2YEQ8xoQJBAPrJPhMBkzmEYFtyIEqAxQ/o/A6E+E4w8i+KM7nQCK7q -K4JXzyXVAjLfyBZWHGM2uro/fjqPggGD6QH1qXCkI4MCQQDmdKeb2TrKRh5BY1LR -81aJGKcJ2XbcDu6wMZK4oqWbTX2KiYn9GB0woM6nSr/Y6iy1u145YzYxEV/iMwff -DJULAkB8B2MnyzOg0pNFJqBJuH29bKCcHa8gHJzqXhNO5lAlEbMK95p/P2Wi+4Hd -aiEIAF1BF326QJcvYKmwSmrORp85AkAlSNxRJ50OWrfMZnBgzVjDx3xG6KsFQVk2 -ol6VhqL6dFgKUORFUWBvnKSyhjJxurlPEahV6oo6+A+mPhFY8eUvAkAZQyTdupP3 -XEFQKctGz+9+gKkemDp7LBBMEMBXrGTLPhpEfcjv/7KPdnFHYmhYeBTBnuVmTVWe -F98XJ7tIFfJq ------END PRIVATE KEY----- diff --git a/vendor/google.golang.org/grpc/testdata/server1.pem b/vendor/google.golang.org/grpc/testdata/server1.pem deleted file mode 100644 index f3d43fcc5..000000000 --- a/vendor/google.golang.org/grpc/testdata/server1.pem +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE----- -MIICnDCCAgWgAwIBAgIBBzANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJBVTET -MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ -dHkgTHRkMQ8wDQYDVQQDEwZ0ZXN0Y2EwHhcNMTUxMTA0MDIyMDI0WhcNMjUxMTAx -MDIyMDI0WjBlMQswCQYDVQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNV -BAcTB0NoaWNhZ28xFTATBgNVBAoTDEV4YW1wbGUsIENvLjEaMBgGA1UEAxQRKi50 -ZXN0Lmdvb2dsZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAOHDFSco -LCVJpYDDM4HYtIdV6Ake/sMNaaKdODjDMsux/4tDydlumN+fm+AjPEK5GHhGn1Bg -zkWF+slf3BxhrA/8dNsnunstVA7ZBgA/5qQxMfGAq4wHNVX77fBZOgp9VlSMVfyd -9N8YwbBYAckOeUQadTi2X1S6OgJXgQ0m3MWhAgMBAAGjazBpMAkGA1UdEwQCMAAw -CwYDVR0PBAQDAgXgME8GA1UdEQRIMEaCECoudGVzdC5nb29nbGUuZnKCGHdhdGVy -em9vaS50ZXN0Lmdvb2dsZS5iZYISKi50ZXN0LnlvdXR1YmUuY29thwTAqAEDMA0G -CSqGSIb3DQEBCwUAA4GBAJFXVifQNub1LUP4JlnX5lXNlo8FxZ2a12AFQs+bzoJ6 -hM044EDjqyxUqSbVePK0ni3w1fHQB5rY9yYC5f8G7aqqTY1QOhoUk8ZTSTRpnkTh -y4jjdvTZeLDVBlueZUTDRmy2feY5aZIU18vFDK08dTG0A87pppuv1LNIR3loveU8 ------END CERTIFICATE----- diff --git a/vendor/google.golang.org/grpc/testdata/testdata.go b/vendor/google.golang.org/grpc/testdata/testdata.go deleted file mode 100644 index 5609b19b3..000000000 --- a/vendor/google.golang.org/grpc/testdata/testdata.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package testdata - -import ( - "log" - "os" - "path/filepath" -) - -// Path returns the absolute path the given relative file or directory path, -// relative to the google.golang.org/grpc/testdata directory in the user's GOPATH. -// If rel is already absolute, it is returned unmodified. -func Path(rel string) string { - if filepath.IsAbs(rel) { - return rel - } - - v, err := goPackagePath("google.golang.org/grpc/testdata") - if err != nil { - log.Fatalf("Error finding google.golang.org/grpc/testdata directory: %v", err) - } - - return filepath.Join(v, rel) -} - -func goPackagePath(pkg string) (path string, err error) { - gp := os.Getenv("GOPATH") - if gp == "" { - return path, os.ErrNotExist - } - - for _, p := range filepath.SplitList(gp) { - dir := filepath.Join(p, "src", filepath.FromSlash(pkg)) - fi, err := os.Stat(dir) - if os.IsNotExist(err) { - continue - } - if err != nil { - return "", err - } - if !fi.IsDir() { - continue - } - return dir, nil - } - return path, os.ErrNotExist -} diff --git a/vendor/k8s.io/apiextensions-apiserver/main.go b/vendor/k8s.io/apiextensions-apiserver/main.go deleted file mode 100644 index 888a04b1a..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/main.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "flag" - "os" - - "github.com/golang/glog" - - "k8s.io/apiextensions-apiserver/pkg/cmd/server" - genericapiserver "k8s.io/apiserver/pkg/server" - "k8s.io/apiserver/pkg/util/logs" -) - -func main() { - logs.InitLogs() - defer logs.FlushLogs() - - stopCh := genericapiserver.SetupSignalHandler() - cmd := server.NewServerCommand(os.Stdout, os.Stderr, stopCh) - cmd.Flags().AddGoFlagSet(flag.CommandLine) - if err := cmd.Execute(); err != nil { - glog.Fatal(err) - } -} diff --git a/vendor/k8s.io/apiextensions-apiserver/test.0/integration/fixtures/resources.go b/vendor/k8s.io/apiextensions-apiserver/test.0/integration/fixtures/resources.go deleted file mode 100644 index 9a1f2d848..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/test.0/integration/fixtures/resources.go +++ /dev/null @@ -1,403 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fixtures - -import ( - "fmt" - "time" - - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/apiserver/pkg/storage/names" - "k8s.io/client-go/discovery" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/rest" - "k8s.io/client-go/restmapper" - "k8s.io/client-go/scale" -) - -const ( - noxuInstanceNum int64 = 9223372036854775807 -) - -// NewRandomNameCustomResourceDefinition generates a CRD with random name to avoid name conflict in e2e tests -func NewRandomNameCustomResourceDefinition(scope apiextensionsv1beta1.ResourceScope) *apiextensionsv1beta1.CustomResourceDefinition { - // ensure the singular doesn't end in an s for now - gName := names.SimpleNameGenerator.GenerateName("foo") + "a" - return &apiextensionsv1beta1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{Name: gName + "s.mygroup.example.com"}, - Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ - Group: "mygroup.example.com", - Version: "v1beta1", - Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ - Plural: gName + "s", - Singular: gName, - Kind: gName, - ListKind: gName + "List", - }, - Scope: scope, - }, - } -} - -// NewNoxuCustomResourceDefinition returns a WishIHadChosenNoxu CRD. -func NewNoxuCustomResourceDefinition(scope apiextensionsv1beta1.ResourceScope) *apiextensionsv1beta1.CustomResourceDefinition { - return &apiextensionsv1beta1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{Name: "noxus.mygroup.example.com"}, - Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ - Group: "mygroup.example.com", - Version: "v1beta1", - Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ - Plural: "noxus", - Singular: "nonenglishnoxu", - Kind: "WishIHadChosenNoxu", - ShortNames: []string{"foo", "bar", "abc", "def"}, - ListKind: "NoxuItemList", - Categories: []string{"all"}, - }, - Scope: scope, - }, - } -} - -// NewVersionedNoxuInstance returns a WishIHadChosenNoxu instance for a given version -func NewVersionedNoxuInstance(namespace, name, version string) *unstructured.Unstructured { - return &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "mygroup.example.com/" + version, - "kind": "WishIHadChosenNoxu", - "metadata": map[string]interface{}{ - "namespace": namespace, - "name": name, - }, - "content": map[string]interface{}{ - "key": "value", - }, - "num": map[string]interface{}{ - "num1": noxuInstanceNum, - "num2": 1000000, - }, - }, - } -} - -// NewNoxuInstance returns a WishIHadChosenNoxu instance for v1beta1. -func NewNoxuInstance(namespace, name string) *unstructured.Unstructured { - return NewVersionedNoxuInstance(namespace, name, "v1beta1") -} - -// NewMultipleVersionNoxuCRD returns a WishIHadChosenNoxu with multiple versions. -func NewMultipleVersionNoxuCRD(scope apiextensionsv1beta1.ResourceScope) *apiextensionsv1beta1.CustomResourceDefinition { - return &apiextensionsv1beta1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{Name: "noxus.mygroup.example.com"}, - Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ - Group: "mygroup.example.com", - Version: "v1beta1", - Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ - Plural: "noxus", - Singular: "nonenglishnoxu", - Kind: "WishIHadChosenNoxu", - ShortNames: []string{"foo", "bar", "abc", "def"}, - ListKind: "NoxuItemList", - Categories: []string{"all"}, - }, - Scope: scope, - Versions: []apiextensionsv1beta1.CustomResourceDefinitionVersion{ - { - Name: "v1beta1", - Served: true, - Storage: false, - }, - { - Name: "v1beta2", - Served: true, - Storage: true, - }, - { - Name: "v0", - Served: false, - Storage: false, - }, - }, - }, - } -} - -// NewNoxu2CustomResourceDefinition returns a WishIHadChosenNoxu2 CRD. -func NewNoxu2CustomResourceDefinition(scope apiextensionsv1beta1.ResourceScope) *apiextensionsv1beta1.CustomResourceDefinition { - return &apiextensionsv1beta1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{Name: "noxus2.mygroup.example.com"}, - Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ - Group: "mygroup.example.com", - Version: "v1alpha1", - Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ - Plural: "noxus2", - Singular: "nonenglishnoxu2", - Kind: "WishIHadChosenNoxu2", - ShortNames: []string{"foo", "bar", "abc", "def"}, - ListKind: "Noxu2ItemList", - }, - Scope: scope, - }, - } -} - -// NewCurletCustomResourceDefinition returns a Curlet CRD. -func NewCurletCustomResourceDefinition(scope apiextensionsv1beta1.ResourceScope) *apiextensionsv1beta1.CustomResourceDefinition { - return &apiextensionsv1beta1.CustomResourceDefinition{ - ObjectMeta: metav1.ObjectMeta{Name: "curlets.mygroup.example.com"}, - Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{ - Group: "mygroup.example.com", - Version: "v1beta1", - Names: apiextensionsv1beta1.CustomResourceDefinitionNames{ - Plural: "curlets", - Singular: "curlet", - Kind: "Curlet", - ListKind: "CurletList", - }, - Scope: scope, - }, - } -} - -// NewCurletInstance returns a Curlet instance. -func NewCurletInstance(namespace, name string) *unstructured.Unstructured { - return &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": "mygroup.example.com/v1beta1", - "kind": "Curlet", - "metadata": map[string]interface{}{ - "namespace": namespace, - "name": name, - }, - "content": map[string]interface{}{ - "key": "value", - }, - }, - } -} - -func servedVersions(crd *apiextensionsv1beta1.CustomResourceDefinition) []string { - if len(crd.Spec.Versions) == 0 { - return []string{crd.Spec.Version} - } - var versions []string - for _, v := range crd.Spec.Versions { - if v.Served { - versions = append(versions, v.Name) - } - } - return versions -} - -func existsInDiscovery(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface, version string) (bool, error) { - groupResource, err := apiExtensionsClient.Discovery().ServerResourcesForGroupVersion(crd.Spec.Group + "/" + version) - if err != nil { - if errors.IsNotFound(err) { - return false, nil - } - return false, err - } - for _, g := range groupResource.APIResources { - if g.Name == crd.Spec.Names.Plural { - return true, nil - } - } - return false, nil -} - -// CreateNewCustomResourceDefinitionWatchUnsafe creates the CRD and makes sure -// the apiextension apiserver has installed the CRD. But it's not safe to watch -// the created CR. Please call CreateNewCustomResourceDefinition if you need to -// watch the CR. -func CreateNewCustomResourceDefinitionWatchUnsafe(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) (*apiextensionsv1beta1.CustomResourceDefinition, error) { - crd, err := apiExtensionsClient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd) - if err != nil { - return nil, err - } - - // wait until all resources appears in discovery - for _, version := range servedVersions(crd) { - err := wait.PollImmediate(500*time.Millisecond, 30*time.Second, func() (bool, error) { - return existsInDiscovery(crd, apiExtensionsClient, version) - }) - if err != nil { - return nil, err - } - } - - return crd, err -} - -// CreateNewCustomResourceDefinition creates the given CRD and makes sure its watch cache is primed on the server. -func CreateNewCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface, dynamicClientSet dynamic.Interface) (*apiextensionsv1beta1.CustomResourceDefinition, error) { - crd, err := CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionsClient) - if err != nil { - return nil, err - } - - // This is only for a test. We need the watch cache to have a resource version that works for the test. - // When new REST storage is created, the storage cacher for the CR starts asynchronously. - // REST API operations return like list use the RV of etcd, but the storage cacher's reflector's list - // can get a different RV because etcd can be touched in between the initial list operation (if that's what you're doing first) - // and the storage cache reflector starting. - // Later, you can issue a watch with the REST apis list.RV and end up earlier than the storage cacher. - // The general working model is that if you get a "resourceVersion too old" message, you re-list and rewatch. - // For this test, we'll actually cycle, "list/watch/create/delete" until we get an RV from list that observes the create and not an error. - // This way all the tests that are checking for watches don't have to worry about RV too old problems because crazy things *could* happen - // before like the created RV could be too old to watch. - err = wait.PollImmediate(500*time.Millisecond, 30*time.Second, func() (bool, error) { - return isWatchCachePrimed(crd, dynamicClientSet) - }) - if err != nil { - return nil, err - } - return crd, nil -} - -func resourceClientForVersion(crd *apiextensionsv1beta1.CustomResourceDefinition, dynamicClientSet dynamic.Interface, namespace, version string) dynamic.ResourceInterface { - gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: version, Resource: crd.Spec.Names.Plural} - if crd.Spec.Scope != apiextensionsv1beta1.ClusterScoped { - return dynamicClientSet.Resource(gvr).Namespace(namespace) - } - return dynamicClientSet.Resource(gvr) -} - -// isWatchCachePrimed returns true if the watch is primed for an specified version of CRD watch -func isWatchCachePrimed(crd *apiextensionsv1beta1.CustomResourceDefinition, dynamicClientSet dynamic.Interface) (bool, error) { - ns := "" - if crd.Spec.Scope != apiextensionsv1beta1.ClusterScoped { - ns = "aval" - } - - versions := servedVersions(crd) - if len(versions) == 0 { - return true, nil - } - - resourceClient := resourceClientForVersion(crd, dynamicClientSet, ns, versions[0]) - instanceName := "setup-instance" - instance := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "apiVersion": crd.Spec.Group + "/" + versions[0], - "kind": crd.Spec.Names.Kind, - "metadata": map[string]interface{}{ - "namespace": ns, - "name": instanceName, - }, - "alpha": "foo_123", - "beta": 10, - "gamma": "bar", - "delta": "hello", - "epsilon": "foobar", - "spec": map[string]interface{}{}, - }, - } - createdInstance, err := resourceClient.Create(instance, metav1.CreateOptions{}) - if err != nil { - return false, err - } - err = resourceClient.Delete(createdInstance.GetName(), nil) - if err != nil { - return false, err - } - - // Wait for all versions of watch cache to be primed and also make sure we consumed the DELETE event for all - // versions so that any new watch with ResourceVersion=0 does not get those events. This is source of some flaky tests. - // When a client creates a watch with resourceVersion=0, it will get an ADD event for any existing objects - // but because they specified resourceVersion=0, there is no starting point in the cache buffer to return existing events - // from, thus the server will return anything from current head of the cache to the end. By accessing the delete - // events for all versions here, we make sure that the head of the cache is passed those events and they will not being - // delivered to any future watch with resourceVersion=0. - for _, v := range versions { - noxuWatch, err := resourceClientForVersion(crd, dynamicClientSet, ns, v).Watch( - metav1.ListOptions{ResourceVersion: createdInstance.GetResourceVersion()}) - if err != nil { - return false, err - } - defer noxuWatch.Stop() - - select { - case watchEvent := <-noxuWatch.ResultChan(): - if watch.Error == watchEvent.Type { - return false, nil - } - if watch.Deleted != watchEvent.Type { - return false, fmt.Errorf("expected DELETE, but got %#v", watchEvent) - } - case <-time.After(5 * time.Second): - return false, fmt.Errorf("gave up waiting for watch event") - } - } - - return true, nil -} - -// DeleteCustomResourceDefinition deletes a CRD and waits until it disappears from discovery. -func DeleteCustomResourceDefinition(crd *apiextensionsv1beta1.CustomResourceDefinition, apiExtensionsClient clientset.Interface) error { - if err := apiExtensionsClient.Apiextensions().CustomResourceDefinitions().Delete(crd.Name, nil); err != nil { - return err - } - for _, version := range servedVersions(crd) { - err := wait.PollImmediate(500*time.Millisecond, 30*time.Second, func() (bool, error) { - exists, err := existsInDiscovery(crd, apiExtensionsClient, version) - return !exists, err - }) - if err != nil { - return err - } - } - return nil -} - -// CreateNewScaleClient returns a scale client. -func CreateNewScaleClient(crd *apiextensionsv1beta1.CustomResourceDefinition, config *rest.Config) (scale.ScalesGetter, error) { - discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) - if err != nil { - return nil, err - } - groupResource, err := discoveryClient.ServerResourcesForGroupVersion(crd.Spec.Group + "/" + crd.Spec.Version) - if err != nil { - return nil, err - } - - resources := []*restmapper.APIGroupResources{ - { - Group: metav1.APIGroup{ - Name: crd.Spec.Group, - Versions: []metav1.GroupVersionForDiscovery{ - {Version: crd.Spec.Version}, - }, - PreferredVersion: metav1.GroupVersionForDiscovery{Version: crd.Spec.Version}, - }, - VersionedResources: map[string][]metav1.APIResource{ - crd.Spec.Version: groupResource.APIResources, - }, - }, - } - - restMapper := restmapper.NewDiscoveryRESTMapper(resources) - resolver := scale.NewDiscoveryScaleKindResolver(discoveryClient) - - return scale.NewForConfig(config, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver) -} diff --git a/vendor/k8s.io/apiextensions-apiserver/test.0/integration/fixtures/server.go b/vendor/k8s.io/apiextensions-apiserver/test.0/integration/fixtures/server.go deleted file mode 100644 index 0a3d6444e..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/test.0/integration/fixtures/server.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fixtures - -import ( - "io/ioutil" - "os" - "strings" - - "github.com/pborman/uuid" - "k8s.io/apiextensions-apiserver/pkg/cmd/server/options" - - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - servertesting "k8s.io/apiextensions-apiserver/pkg/cmd/server/testing" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/rest" -) - -// StartDefaultServer starts a test server. -func StartDefaultServer(t servertesting.Logger) (func(), *rest.Config, *options.CustomResourceDefinitionsServerOptions, error) { - // create kubeconfig which will not actually be used. But authz/authn needs it to startup. - fakeKubeConfig, err := ioutil.TempFile("", "kubeconfig") - fakeKubeConfig.WriteString(` -apiVersion: v1 -kind: Config -clusters: -- cluster: - server: http://127.1.2.3:12345 - name: integration -contexts: -- context: - cluster: integration - user: test - name: default-context -current-context: default-context -users: -- name: test - user: - password: test - username: test -`) - fakeKubeConfig.Close() - - s, err := servertesting.StartTestServer(t, nil, []string{ - "--etcd-prefix", uuid.New(), - "--etcd-servers", strings.Join(IntegrationEtcdServers(), ","), - "--authentication-skip-lookup", - "--authentication-kubeconfig", fakeKubeConfig.Name(), - "--authorization-kubeconfig", fakeKubeConfig.Name(), - "--kubeconfig", fakeKubeConfig.Name(), - "--disable-admission-plugins", "NamespaceLifecycle,MutatingAdmissionWebhook,ValidatingAdmissionWebhook", - }, nil) - if err != nil { - os.Remove(fakeKubeConfig.Name()) - return nil, nil, nil, err - } - - tearDownFn := func() { - defer os.Remove(fakeKubeConfig.Name()) - s.TearDownFn() - } - - return tearDownFn, s.ClientConfig, s.ServerOpts, nil -} - -// StartDefaultServerWithClients starts a test server and returns clients for it. -func StartDefaultServerWithClients(t servertesting.Logger) (func(), clientset.Interface, dynamic.Interface, error) { - tearDown, config, _, err := StartDefaultServer(t) - if err != nil { - return nil, nil, nil, err - } - - apiExtensionsClient, err := clientset.NewForConfig(config) - if err != nil { - tearDown() - return nil, nil, nil, err - } - - dynamicClient, err := dynamic.NewForConfig(config) - if err != nil { - tearDown() - return nil, nil, nil, err - } - - return tearDown, apiExtensionsClient, dynamicClient, nil -} - -// IntegrationEtcdServers returns etcd server URLs. -func IntegrationEtcdServers() []string { - if etcdURL, ok := os.LookupEnv("KUBE_INTEGRATION_ETCD_URL"); ok { - return []string{etcdURL} - } - return []string{"http://127.0.0.1:2379"} -} diff --git a/vendor/k8s.io/apiextensions-apiserver/test.0/integration/helpers.go b/vendor/k8s.io/apiextensions-apiserver/test.0/integration/helpers.go deleted file mode 100644 index 763440345..000000000 --- a/vendor/k8s.io/apiextensions-apiserver/test.0/integration/helpers.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "fmt" - "testing" - - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/dynamic" -) - -func instantiateCustomResource(t *testing.T, instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition) (*unstructured.Unstructured, error) { - return instantiateVersionedCustomResource(t, instanceToCreate, client, definition, definition.Spec.Versions[0].Name) -} - -func instantiateVersionedCustomResource(t *testing.T, instanceToCreate *unstructured.Unstructured, client dynamic.ResourceInterface, definition *apiextensionsv1beta1.CustomResourceDefinition, version string) (*unstructured.Unstructured, error) { - createdInstance, err := client.Create(instanceToCreate, metav1.CreateOptions{}) - if err != nil { - t.Logf("%#v", createdInstance) - return nil, err - } - createdObjectMeta, err := meta.Accessor(createdInstance) - if err != nil { - t.Fatal(err) - } - // it should have a UUID - if len(createdObjectMeta.GetUID()) == 0 { - t.Errorf("missing uuid: %#v", createdInstance) - } - createdTypeMeta, err := meta.TypeAccessor(createdInstance) - if err != nil { - t.Fatal(err) - } - if e, a := definition.Spec.Group+"/"+version, createdTypeMeta.GetAPIVersion(); e != a { - t.Errorf("expected %v, got %v", e, a) - } - if e, a := definition.Spec.Names.Kind, createdTypeMeta.GetKind(); e != a { - t.Errorf("expected %v, got %v", e, a) - } - return createdInstance, nil -} - -func newNamespacedCustomResourceVersionedClient(ns string, client dynamic.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition, version string) dynamic.ResourceInterface { - gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: version, Resource: crd.Spec.Names.Plural} - - if crd.Spec.Scope != apiextensionsv1beta1.ClusterScoped { - return client.Resource(gvr).Namespace(ns) - } - return client.Resource(gvr) -} - -func newNamespacedCustomResourceClient(ns string, client dynamic.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition) dynamic.ResourceInterface { - return newNamespacedCustomResourceVersionedClient(ns, client, crd, crd.Spec.Versions[0].Name) -} - -// updateCustomResourceDefinitionWithRetry updates a CRD, retrying up to 5 times on version conflict errors. -func updateCustomResourceDefinitionWithRetry(client clientset.Interface, name string, update func(*apiextensionsv1beta1.CustomResourceDefinition)) (*apiextensionsv1beta1.CustomResourceDefinition, error) { - for i := 0; i < 5; i++ { - crd, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("failed to get CustomResourceDefinition %q: %v", name, err) - } - update(crd) - crd, err = client.ApiextensionsV1beta1().CustomResourceDefinitions().Update(crd) - if err == nil { - return crd, nil - } - if !errors.IsConflict(err) { - return nil, fmt.Errorf("failed to update CustomResourceDefinition %q: %v", name, err) - } - } - return nil, fmt.Errorf("too many retries after conflicts updating CustomResourceDefinition %q", name) -} diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/doc.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/doc.go deleted file mode 100644 index f4fc2ed9d..000000000 --- a/vendor/k8s.io/apiserver/plugin/pkg/audit/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package audit contains implementations for pkg/audit/AuditBackend interface -package audit // import "k8s.io/apiserver/plugin/pkg/audit" diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/fake/doc.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/fake/doc.go deleted file mode 100644 index 273947612..000000000 --- a/vendor/k8s.io/apiserver/plugin/pkg/audit/fake/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package fake provides a fake audit.Backend interface implementation for testing. -package fake // import "k8s.io/apiserver/plugin/pkg/audit/fake" diff --git a/vendor/k8s.io/apiserver/plugin/pkg/audit/fake/fake.go b/vendor/k8s.io/apiserver/plugin/pkg/audit/fake/fake.go deleted file mode 100644 index a886529ec..000000000 --- a/vendor/k8s.io/apiserver/plugin/pkg/audit/fake/fake.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fake - -import ( - auditinternal "k8s.io/apiserver/pkg/apis/audit" - "k8s.io/apiserver/pkg/audit" -) - -var _ audit.Backend = &Backend{} - -// Backend is a fake audit backend for testing purposes. -type Backend struct { - OnRequest func(events []*auditinternal.Event) -} - -// Run does nothing. -func (b *Backend) Run(stopCh <-chan struct{}) error { - return nil -} - -// Shutdown does nothing. -func (b *Backend) Shutdown() { - return -} - -// ProcessEvents calls a callback on a batch, if present. -func (b *Backend) ProcessEvents(ev ...*auditinternal.Event) { - if b.OnRequest != nil { - b.OnRequest(ev) - } -} - -func (b *Backend) String() string { - return "" -} diff --git a/vendor/k8s.io/client-go/tools/bootstrap/token/api/doc.go b/vendor/k8s.io/client-go/tools/bootstrap/token/api/doc.go deleted file mode 100644 index 249e0a059..000000000 --- a/vendor/k8s.io/client-go/tools/bootstrap/token/api/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package api (k8s.io/client-go/tools/bootstrap/token/api) contains constants and types needed for -// bootstrap tokens as maintained by the BootstrapSigner and TokenCleaner -// controllers (in k8s.io/kubernetes/pkg/controller/bootstrap) -package api // import "k8s.io/client-go/tools/bootstrap/token/api" diff --git a/vendor/k8s.io/client-go/tools/bootstrap/token/api/types.go b/vendor/k8s.io/client-go/tools/bootstrap/token/api/types.go deleted file mode 100644 index 3bea78b17..000000000 --- a/vendor/k8s.io/client-go/tools/bootstrap/token/api/types.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/api/core/v1" -) - -const ( - // BootstrapTokenSecretPrefix is the prefix for bootstrap token names. - // Bootstrap tokens secrets must be named in the form - // `bootstrap-token-`. This is the prefix to be used before the - // token ID. - BootstrapTokenSecretPrefix = "bootstrap-token-" - - // SecretTypeBootstrapToken is used during the automated bootstrap process (first - // implemented by kubeadm). It stores tokens that are used to sign well known - // ConfigMaps. They may also eventually be used for authentication. - SecretTypeBootstrapToken v1.SecretType = "bootstrap.kubernetes.io/token" - - // BootstrapTokenIDKey is the id of this token. This can be transmitted in the - // clear and encoded in the name of the secret. It must be a random 6 character - // string that matches the regexp `^([a-z0-9]{6})$`. Required. - BootstrapTokenIDKey = "token-id" - - // BootstrapTokenSecretKey is the actual secret. It must be a random 16 character - // string that matches the regexp `^([a-z0-9]{16})$`. Required. - BootstrapTokenSecretKey = "token-secret" - - // BootstrapTokenExpirationKey is when this token should be expired and no - // longer used. A controller will delete this resource after this time. This - // is an absolute UTC time using RFC3339. If this cannot be parsed, the token - // should be considered invalid. Optional. - BootstrapTokenExpirationKey = "expiration" - - // BootstrapTokenDescriptionKey is a description in human-readable format that - // describes what the bootstrap token is used for. Optional. - BootstrapTokenDescriptionKey = "description" - - // BootstrapTokenExtraGroupsKey is a comma-separated list of group names. - // The bootstrap token will authenticate as these groups in addition to the - // "system:bootstrappers" group. - BootstrapTokenExtraGroupsKey = "auth-extra-groups" - - // BootstrapTokenUsagePrefix is the prefix for the other usage constants that specifies different - // functions of a bootstrap token - BootstrapTokenUsagePrefix = "usage-bootstrap-" - - // BootstrapTokenUsageSigningKey signals that this token should be used to - // sign configs as part of the bootstrap process. Value must be "true". Any - // other value is assumed to be false. Optional. - BootstrapTokenUsageSigningKey = "usage-bootstrap-signing" - - // BootstrapTokenUsageAuthentication signals that this token should be used - // as a bearer token to authenticate against the Kubernetes API. The bearer - // token takes the form "." and authenticates as the - // user "system:bootstrap:" in the "system:bootstrappers" group - // as well as any groups specified using BootstrapTokenExtraGroupsKey. - // Value must be "true". Any other value is assumed to be false. Optional. - BootstrapTokenUsageAuthentication = "usage-bootstrap-authentication" - - // ConfigMapClusterInfo defines the name for the ConfigMap where the information how to connect and trust the cluster exist - ConfigMapClusterInfo = "cluster-info" - - // KubeConfigKey defines at which key in the Data object of the ConfigMap the KubeConfig object is stored - KubeConfigKey = "kubeconfig" - - // JWSSignatureKeyPrefix defines what key prefix the JWS-signed tokens have - JWSSignatureKeyPrefix = "jws-kubeconfig-" - - // BootstrapUserPrefix is the username prefix bootstrapping bearer tokens - // authenticate as. The full username given is "system:bootstrap:". - BootstrapUserPrefix = "system:bootstrap:" - - // BootstrapDefaultGroup is the default group for bootstrapping bearer - // tokens (in addition to any groups from BootstrapTokenExtraGroupsKey). - BootstrapDefaultGroup = "system:bootstrappers" - - // BootstrapGroupPattern is the valid regex pattern that all groups - // assigned to a bootstrap token by BootstrapTokenExtraGroupsKey must match. - // See also util.ValidateBootstrapGroupName() - BootstrapGroupPattern = `\Asystem:bootstrappers:[a-z0-9:-]{0,255}[a-z0-9]\z` - - // BootstrapTokenPattern defines the {id}.{secret} regular expression pattern - BootstrapTokenPattern = `\A([a-z0-9]{6})\.([a-z0-9]{16})\z` - - // BootstrapTokenIDPattern defines token's id regular expression pattern - BootstrapTokenIDPattern = `\A([a-z0-9]{6})\z` - - // BootstrapTokenIDBytes defines the number of bytes used for the Bootstrap Token's ID field - BootstrapTokenIDBytes = 6 - - // BootstrapTokenSecretBytes defines the number of bytes used the Bootstrap Token's Secret field - BootstrapTokenSecretBytes = 16 -) - -// KnownTokenUsages specifies the known functions a token will get. -var KnownTokenUsages = []string{"signing", "authentication"} diff --git a/vendor/k8s.io/client-go/tools/bootstrap/token/util/helpers.go b/vendor/k8s.io/client-go/tools/bootstrap/token/util/helpers.go deleted file mode 100644 index bb1fbeb65..000000000 --- a/vendor/k8s.io/client-go/tools/bootstrap/token/util/helpers.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "bufio" - "crypto/rand" - "fmt" - "regexp" - "strings" - - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/tools/bootstrap/token/api" -) - -// validBootstrapTokenChars defines the characters a bootstrap token can consist of -const validBootstrapTokenChars = "0123456789abcdefghijklmnopqrstuvwxyz" - -var ( - // BootstrapTokenRegexp is a compiled regular expression of TokenRegexpString - BootstrapTokenRegexp = regexp.MustCompile(api.BootstrapTokenPattern) - // BootstrapTokenIDRegexp is a compiled regular expression of TokenIDRegexpString - BootstrapTokenIDRegexp = regexp.MustCompile(api.BootstrapTokenIDPattern) - // BootstrapGroupRegexp is a compiled regular expression of BootstrapGroupPattern - BootstrapGroupRegexp = regexp.MustCompile(api.BootstrapGroupPattern) -) - -// GenerateBootstrapToken generates a new, random Bootstrap Token. -func GenerateBootstrapToken() (string, error) { - tokenID, err := randBytes(api.BootstrapTokenIDBytes) - if err != nil { - return "", err - } - - tokenSecret, err := randBytes(api.BootstrapTokenSecretBytes) - if err != nil { - return "", err - } - - return TokenFromIDAndSecret(tokenID, tokenSecret), nil -} - -// randBytes returns a random string consisting of the characters in -// validBootstrapTokenChars, with the length customized by the parameter -func randBytes(length int) (string, error) { - // len("0123456789abcdefghijklmnopqrstuvwxyz") = 36 which doesn't evenly divide - // the possible values of a byte: 256 mod 36 = 4. Discard any random bytes we - // read that are >= 252 so the bytes we evenly divide the character set. - const maxByteValue = 252 - - var ( - b byte - err error - token = make([]byte, length) - ) - - reader := bufio.NewReaderSize(rand.Reader, length*2) - for i := range token { - for { - if b, err = reader.ReadByte(); err != nil { - return "", err - } - if b < maxByteValue { - break - } - } - - token[i] = validBootstrapTokenChars[int(b)%len(validBootstrapTokenChars)] - } - - return string(token), nil -} - -// TokenFromIDAndSecret returns the full token which is of the form "{id}.{secret}" -func TokenFromIDAndSecret(id, secret string) string { - return fmt.Sprintf("%s.%s", id, secret) -} - -// IsValidBootstrapToken returns whether the given string is valid as a Bootstrap Token and -// in other words satisfies the BootstrapTokenRegexp -func IsValidBootstrapToken(token string) bool { - return BootstrapTokenRegexp.MatchString(token) -} - -// IsValidBootstrapTokenID returns whether the given string is valid as a Bootstrap Token ID and -// in other words satisfies the BootstrapTokenIDRegexp -func IsValidBootstrapTokenID(tokenID string) bool { - return BootstrapTokenIDRegexp.MatchString(tokenID) -} - -// BootstrapTokenSecretName returns the expected name for the Secret storing the -// Bootstrap Token in the Kubernetes API. -func BootstrapTokenSecretName(tokenID string) string { - return fmt.Sprintf("%s%s", api.BootstrapTokenSecretPrefix, tokenID) -} - -// ValidateBootstrapGroupName checks if the provided group name is a valid -// bootstrap group name. Returns nil if valid or a validation error if invalid. -func ValidateBootstrapGroupName(name string) error { - if BootstrapGroupRegexp.Match([]byte(name)) { - return nil - } - return fmt.Errorf("bootstrap group %q is invalid (must match %s)", name, api.BootstrapGroupPattern) -} - -// ValidateUsages validates that the passed in string are valid usage strings for bootstrap tokens. -func ValidateUsages(usages []string) error { - validUsages := sets.NewString(api.KnownTokenUsages...) - invalidUsages := sets.NewString() - for _, usage := range usages { - if !validUsages.Has(usage) { - invalidUsages.Insert(usage) - } - } - if len(invalidUsages) > 0 { - return fmt.Errorf("invalid bootstrap token usage string: %s, valid usage options: %s", strings.Join(invalidUsages.List(), ","), strings.Join(api.KnownTokenUsages, ",")) - } - return nil -} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go b/vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go deleted file mode 100644 index b93537291..000000000 --- a/vendor/k8s.io/client-go/tools/leaderelection/healthzadaptor.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package leaderelection - -import ( - "net/http" - "sync" - "time" -) - -// HealthzAdaptor associates the /healthz endpoint with the LeaderElection object. -// It helps deal with the /healthz endpoint being set up prior to the LeaderElection. -// This contains the code needed to act as an adaptor between the leader -// election code the health check code. It allows us to provide health -// status about the leader election. Most specifically about if the leader -// has failed to renew without exiting the process. In that case we should -// report not healthy and rely on the kubelet to take down the process. -type HealthzAdaptor struct { - pointerLock sync.Mutex - le *LeaderElector - timeout time.Duration -} - -// Name returns the name of the health check we are implementing. -func (l *HealthzAdaptor) Name() string { - return "leaderElection" -} - -// Check is called by the healthz endpoint handler. -// It fails (returns an error) if we own the lease but had not been able to renew it. -func (l *HealthzAdaptor) Check(req *http.Request) error { - l.pointerLock.Lock() - defer l.pointerLock.Unlock() - if l.le == nil { - return nil - } - return l.le.Check(l.timeout) -} - -// SetLeaderElection ties a leader election object to a HealthzAdaptor -func (l *HealthzAdaptor) SetLeaderElection(le *LeaderElector) { - l.pointerLock.Lock() - defer l.pointerLock.Unlock() - l.le = le -} - -// NewLeaderHealthzAdaptor creates a basic healthz adaptor to monitor a leader election. -// timeout determines the time beyond the lease expiry to be allowed for timeout. -// checks within the timeout period after the lease expires will still return healthy. -func NewLeaderHealthzAdaptor(timeout time.Duration) *HealthzAdaptor { - result := &HealthzAdaptor{ - timeout: timeout, - } - return result -} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go deleted file mode 100644 index 5f38682e6..000000000 --- a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go +++ /dev/null @@ -1,336 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package leaderelection implements leader election of a set of endpoints. -// It uses an annotation in the endpoints object to store the record of the -// election state. -// -// This implementation does not guarantee that only one client is acting as a -// leader (a.k.a. fencing). A client observes timestamps captured locally to -// infer the state of the leader election. Thus the implementation is tolerant -// to arbitrary clock skew, but is not tolerant to arbitrary clock skew rate. -// -// However the level of tolerance to skew rate can be configured by setting -// RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a -// maximum tolerated ratio of time passed on the fastest node to time passed on -// the slowest node can be approximately achieved with a configuration that sets -// the same ratio of LeaseDuration to RenewDeadline. For example if a user wanted -// to tolerate some nodes progressing forward in time twice as fast as other nodes, -// the user could set LeaseDuration to 60 seconds and RenewDeadline to 30 seconds. -// -// While not required, some method of clock synchronization between nodes in the -// cluster is highly recommended. It's important to keep in mind when configuring -// this client that the tolerance to skew rate varies inversely to master -// availability. -// -// Larger clusters often have a more lenient SLA for API latency. This should be -// taken into account when configuring the client. The rate of leader transitions -// should be monitored and RetryPeriod and LeaseDuration should be increased -// until the rate is stable and acceptably low. It's important to keep in mind -// when configuring this client that the tolerance to API latency varies inversely -// to master availability. -// -// DISCLAIMER: this is an alpha API. This library will likely change significantly -// or even be removed entirely in subsequent releases. Depend on this API at -// your own risk. -package leaderelection - -import ( - "context" - "fmt" - "reflect" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - rl "k8s.io/client-go/tools/leaderelection/resourcelock" - - "github.com/golang/glog" -) - -const ( - JitterFactor = 1.2 -) - -// NewLeaderElector creates a LeaderElector from a LeaderElectionConfig -func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) { - if lec.LeaseDuration <= lec.RenewDeadline { - return nil, fmt.Errorf("leaseDuration must be greater than renewDeadline") - } - if lec.RenewDeadline <= time.Duration(JitterFactor*float64(lec.RetryPeriod)) { - return nil, fmt.Errorf("renewDeadline must be greater than retryPeriod*JitterFactor") - } - if lec.LeaseDuration < 1 { - return nil, fmt.Errorf("leaseDuration must be greater than zero") - } - if lec.RenewDeadline < 1 { - return nil, fmt.Errorf("renewDeadline must be greater than zero") - } - if lec.RetryPeriod < 1 { - return nil, fmt.Errorf("retryPeriod must be greater than zero") - } - - if lec.Lock == nil { - return nil, fmt.Errorf("Lock must not be nil.") - } - return &LeaderElector{ - config: lec, - clock: clock.RealClock{}, - }, nil -} - -type LeaderElectionConfig struct { - // Lock is the resource that will be used for locking - Lock rl.Interface - - // LeaseDuration is the duration that non-leader candidates will - // wait to force acquire leadership. This is measured against time of - // last observed ack. - LeaseDuration time.Duration - // RenewDeadline is the duration that the acting master will retry - // refreshing leadership before giving up. - RenewDeadline time.Duration - // RetryPeriod is the duration the LeaderElector clients should wait - // between tries of actions. - RetryPeriod time.Duration - - // Callbacks are callbacks that are triggered during certain lifecycle - // events of the LeaderElector - Callbacks LeaderCallbacks - - // WatchDog is the associated health checker - // WatchDog may be null if its not needed/configured. - WatchDog *HealthzAdaptor - - // Name is the name of the resource lock for debugging - Name string -} - -// LeaderCallbacks are callbacks that are triggered during certain -// lifecycle events of the LeaderElector. These are invoked asynchronously. -// -// possible future callbacks: -// * OnChallenge() -type LeaderCallbacks struct { - // OnStartedLeading is called when a LeaderElector client starts leading - OnStartedLeading func(context.Context) - // OnStoppedLeading is called when a LeaderElector client stops leading - OnStoppedLeading func() - // OnNewLeader is called when the client observes a leader that is - // not the previously observed leader. This includes the first observed - // leader when the client starts. - OnNewLeader func(identity string) -} - -// LeaderElector is a leader election client. -type LeaderElector struct { - config LeaderElectionConfig - // internal bookkeeping - observedRecord rl.LeaderElectionRecord - observedTime time.Time - // used to implement OnNewLeader(), may lag slightly from the - // value observedRecord.HolderIdentity if the transition has - // not yet been reported. - reportedLeader string - - // clock is wrapper around time to allow for less flaky testing - clock clock.Clock - - // name is the name of the resource lock for debugging - name string -} - -// Run starts the leader election loop -func (le *LeaderElector) Run(ctx context.Context) { - defer func() { - runtime.HandleCrash() - le.config.Callbacks.OnStoppedLeading() - }() - if !le.acquire(ctx) { - return // ctx signalled done - } - ctx, cancel := context.WithCancel(ctx) - defer cancel() - go le.config.Callbacks.OnStartedLeading(ctx) - le.renew(ctx) -} - -// RunOrDie starts a client with the provided config or panics if the config -// fails to validate. -func RunOrDie(ctx context.Context, lec LeaderElectionConfig) { - le, err := NewLeaderElector(lec) - if err != nil { - panic(err) - } - if lec.WatchDog != nil { - lec.WatchDog.SetLeaderElection(le) - } - le.Run(ctx) -} - -// GetLeader returns the identity of the last observed leader or returns the empty string if -// no leader has yet been observed. -func (le *LeaderElector) GetLeader() string { - return le.observedRecord.HolderIdentity -} - -// IsLeader returns true if the last observed leader was this client else returns false. -func (le *LeaderElector) IsLeader() bool { - return le.observedRecord.HolderIdentity == le.config.Lock.Identity() -} - -// acquire loops calling tryAcquireOrRenew and returns true immediately when tryAcquireOrRenew succeeds. -// Returns false if ctx signals done. -func (le *LeaderElector) acquire(ctx context.Context) bool { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - succeeded := false - desc := le.config.Lock.Describe() - glog.Infof("attempting to acquire leader lease %v...", desc) - wait.JitterUntil(func() { - succeeded = le.tryAcquireOrRenew() - le.maybeReportTransition() - if !succeeded { - glog.V(4).Infof("failed to acquire lease %v", desc) - return - } - le.config.Lock.RecordEvent("became leader") - glog.Infof("successfully acquired lease %v", desc) - cancel() - }, le.config.RetryPeriod, JitterFactor, true, ctx.Done()) - return succeeded -} - -// renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails or ctx signals done. -func (le *LeaderElector) renew(ctx context.Context) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - wait.Until(func() { - timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline) - defer timeoutCancel() - err := wait.PollImmediateUntil(le.config.RetryPeriod, func() (bool, error) { - done := make(chan bool, 1) - go func() { - defer close(done) - done <- le.tryAcquireOrRenew() - }() - - select { - case <-timeoutCtx.Done(): - return false, fmt.Errorf("failed to tryAcquireOrRenew %s", timeoutCtx.Err()) - case result := <-done: - return result, nil - } - }, timeoutCtx.Done()) - - le.maybeReportTransition() - desc := le.config.Lock.Describe() - if err == nil { - glog.V(4).Infof("successfully renewed lease %v", desc) - return - } - le.config.Lock.RecordEvent("stopped leading") - glog.Infof("failed to renew lease %v: %v", desc, err) - cancel() - }, le.config.RetryPeriod, ctx.Done()) -} - -// tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired, -// else it tries to renew the lease if it has already been acquired. Returns true -// on success else returns false. -func (le *LeaderElector) tryAcquireOrRenew() bool { - now := metav1.Now() - leaderElectionRecord := rl.LeaderElectionRecord{ - HolderIdentity: le.config.Lock.Identity(), - LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second), - RenewTime: now, - AcquireTime: now, - } - - // 1. obtain or create the ElectionRecord - oldLeaderElectionRecord, err := le.config.Lock.Get() - if err != nil { - if !errors.IsNotFound(err) { - glog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err) - return false - } - if err = le.config.Lock.Create(leaderElectionRecord); err != nil { - glog.Errorf("error initially creating leader election record: %v", err) - return false - } - le.observedRecord = leaderElectionRecord - le.observedTime = le.clock.Now() - return true - } - - // 2. Record obtained, check the Identity & Time - if !reflect.DeepEqual(le.observedRecord, *oldLeaderElectionRecord) { - le.observedRecord = *oldLeaderElectionRecord - le.observedTime = le.clock.Now() - } - if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) && - !le.IsLeader() { - glog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) - return false - } - - // 3. We're going to try to update. The leaderElectionRecord is set to it's default - // here. Let's correct it before updating. - if le.IsLeader() { - leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime - leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions - } else { - leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1 - } - - // update the lock itself - if err = le.config.Lock.Update(leaderElectionRecord); err != nil { - glog.Errorf("Failed to update lock: %v", err) - return false - } - le.observedRecord = leaderElectionRecord - le.observedTime = le.clock.Now() - return true -} - -func (le *LeaderElector) maybeReportTransition() { - if le.observedRecord.HolderIdentity == le.reportedLeader { - return - } - le.reportedLeader = le.observedRecord.HolderIdentity - if le.config.Callbacks.OnNewLeader != nil { - go le.config.Callbacks.OnNewLeader(le.reportedLeader) - } -} - -// Check will determine if the current lease is expired by more than timeout. -func (le *LeaderElector) Check(maxTolerableExpiredLease time.Duration) error { - if !le.IsLeader() { - // Currently not concerned with the case that we are hot standby - return nil - } - // If we are more than timeout seconds after the lease duration that is past the timeout - // on the lease renew. Time to start reporting ourselves as unhealthy. We should have - // died but conditions like deadlock can prevent this. (See #70819) - if le.clock.Since(le.observedTime) > le.config.LeaseDuration+maxTolerableExpiredLease { - return fmt.Errorf("failed election to renew leadership on lease %s", le.config.Name) - } - - return nil -} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go deleted file mode 100644 index 4ff595603..000000000 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/configmaplock.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resourcelock - -import ( - "encoding/json" - "errors" - "fmt" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" -) - -// TODO: This is almost a exact replica of Endpoints lock. -// going forwards as we self host more and more components -// and use ConfigMaps as the means to pass that configuration -// data we will likely move to deprecate the Endpoints lock. - -type ConfigMapLock struct { - // ConfigMapMeta should contain a Name and a Namespace of a - // ConfigMapMeta object that the LeaderElector will attempt to lead. - ConfigMapMeta metav1.ObjectMeta - Client corev1client.ConfigMapsGetter - LockConfig ResourceLockConfig - cm *v1.ConfigMap -} - -// Get returns the election record from a ConfigMap Annotation -func (cml *ConfigMapLock) Get() (*LeaderElectionRecord, error) { - var record LeaderElectionRecord - var err error - cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(cml.ConfigMapMeta.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - if cml.cm.Annotations == nil { - cml.cm.Annotations = make(map[string]string) - } - if recordBytes, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey]; found { - if err := json.Unmarshal([]byte(recordBytes), &record); err != nil { - return nil, err - } - } - return &record, nil -} - -// Create attempts to create a LeaderElectionRecord annotation -func (cml *ConfigMapLock) Create(ler LeaderElectionRecord) error { - recordBytes, err := json.Marshal(ler) - if err != nil { - return err - } - cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Create(&v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: cml.ConfigMapMeta.Name, - Namespace: cml.ConfigMapMeta.Namespace, - Annotations: map[string]string{ - LeaderElectionRecordAnnotationKey: string(recordBytes), - }, - }, - }) - return err -} - -// Update will update an existing annotation on a given resource. -func (cml *ConfigMapLock) Update(ler LeaderElectionRecord) error { - if cml.cm == nil { - return errors.New("endpoint not initialized, call get or create first") - } - recordBytes, err := json.Marshal(ler) - if err != nil { - return err - } - cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) - cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(cml.cm) - return err -} - -// RecordEvent in leader election while adding meta-data -func (cml *ConfigMapLock) RecordEvent(s string) { - events := fmt.Sprintf("%v %v", cml.LockConfig.Identity, s) - cml.LockConfig.EventRecorder.Eventf(&v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events) -} - -// Describe is used to convert details on current resource lock -// into a string -func (cml *ConfigMapLock) Describe() string { - return fmt.Sprintf("%v/%v", cml.ConfigMapMeta.Namespace, cml.ConfigMapMeta.Name) -} - -// returns the Identity of the lock -func (cml *ConfigMapLock) Identity() string { - return cml.LockConfig.Identity -} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go deleted file mode 100644 index 6f7dcfb0c..000000000 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/endpointslock.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resourcelock - -import ( - "encoding/json" - "errors" - "fmt" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" -) - -type EndpointsLock struct { - // EndpointsMeta should contain a Name and a Namespace of an - // Endpoints object that the LeaderElector will attempt to lead. - EndpointsMeta metav1.ObjectMeta - Client corev1client.EndpointsGetter - LockConfig ResourceLockConfig - e *v1.Endpoints -} - -// Get returns the election record from a Endpoints Annotation -func (el *EndpointsLock) Get() (*LeaderElectionRecord, error) { - var record LeaderElectionRecord - var err error - el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(el.EndpointsMeta.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - if el.e.Annotations == nil { - el.e.Annotations = make(map[string]string) - } - if recordBytes, found := el.e.Annotations[LeaderElectionRecordAnnotationKey]; found { - if err := json.Unmarshal([]byte(recordBytes), &record); err != nil { - return nil, err - } - } - return &record, nil -} - -// Create attempts to create a LeaderElectionRecord annotation -func (el *EndpointsLock) Create(ler LeaderElectionRecord) error { - recordBytes, err := json.Marshal(ler) - if err != nil { - return err - } - el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Create(&v1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: el.EndpointsMeta.Name, - Namespace: el.EndpointsMeta.Namespace, - Annotations: map[string]string{ - LeaderElectionRecordAnnotationKey: string(recordBytes), - }, - }, - }) - return err -} - -// Update will update and existing annotation on a given resource. -func (el *EndpointsLock) Update(ler LeaderElectionRecord) error { - if el.e == nil { - return errors.New("endpoint not initialized, call get or create first") - } - recordBytes, err := json.Marshal(ler) - if err != nil { - return err - } - el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) - el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(el.e) - return err -} - -// RecordEvent in leader election while adding meta-data -func (el *EndpointsLock) RecordEvent(s string) { - events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s) - el.LockConfig.EventRecorder.Eventf(&v1.Endpoints{ObjectMeta: el.e.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events) -} - -// Describe is used to convert details on current resource lock -// into a string -func (el *EndpointsLock) Describe() string { - return fmt.Sprintf("%v/%v", el.EndpointsMeta.Namespace, el.EndpointsMeta.Name) -} - -// returns the Identity of the lock -func (el *EndpointsLock) Identity() string { - return el.LockConfig.Identity -} diff --git a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go b/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go deleted file mode 100644 index 676fd1d7d..000000000 --- a/vendor/k8s.io/client-go/tools/leaderelection/resourcelock/interface.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resourcelock - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/record" -) - -const ( - LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader" - EndpointsResourceLock = "endpoints" - ConfigMapsResourceLock = "configmaps" -) - -// LeaderElectionRecord is the record that is stored in the leader election annotation. -// This information should be used for observational purposes only and could be replaced -// with a random string (e.g. UUID) with only slight modification of this code. -// TODO(mikedanese): this should potentially be versioned -type LeaderElectionRecord struct { - HolderIdentity string `json:"holderIdentity"` - LeaseDurationSeconds int `json:"leaseDurationSeconds"` - AcquireTime metav1.Time `json:"acquireTime"` - RenewTime metav1.Time `json:"renewTime"` - LeaderTransitions int `json:"leaderTransitions"` -} - -// ResourceLockConfig common data that exists across different -// resource locks -type ResourceLockConfig struct { - Identity string - EventRecorder record.EventRecorder -} - -// Interface offers a common interface for locking on arbitrary -// resources used in leader election. The Interface is used -// to hide the details on specific implementations in order to allow -// them to change over time. This interface is strictly for use -// by the leaderelection code. -type Interface interface { - // Get returns the LeaderElectionRecord - Get() (*LeaderElectionRecord, error) - - // Create attempts to create a LeaderElectionRecord - Create(ler LeaderElectionRecord) error - - // Update will update and existing LeaderElectionRecord - Update(ler LeaderElectionRecord) error - - // RecordEvent is used to record events - RecordEvent(string) - - // Identity will return the locks Identity - Identity() string - - // Describe is used to convert details on current resource lock - // into a string - Describe() string -} - -// Manufacture will create a lock of a given type according to the input parameters -func New(lockType string, ns string, name string, client corev1.CoreV1Interface, rlc ResourceLockConfig) (Interface, error) { - switch lockType { - case EndpointsResourceLock: - return &EndpointsLock{ - EndpointsMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - Client: client, - LockConfig: rlc, - }, nil - case ConfigMapsResourceLock: - return &ConfigMapLock{ - ConfigMapMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - Client: client, - LockConfig: rlc, - }, nil - default: - return nil, fmt.Errorf("Invalid lock-type %s", lockType) - } -} diff --git a/vendor/k8s.io/client-go/tools/portforward/doc.go b/vendor/k8s.io/client-go/tools/portforward/doc.go deleted file mode 100644 index 2f5340634..000000000 --- a/vendor/k8s.io/client-go/tools/portforward/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package portforward adds support for SSH-like port forwarding from the client's -// local host to remote containers. -package portforward // import "k8s.io/client-go/tools/portforward" diff --git a/vendor/k8s.io/client-go/tools/portforward/portforward.go b/vendor/k8s.io/client-go/tools/portforward/portforward.go deleted file mode 100644 index 9d7936e7c..000000000 --- a/vendor/k8s.io/client-go/tools/portforward/portforward.go +++ /dev/null @@ -1,342 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package portforward - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "strconv" - "strings" - "sync" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/httpstream" - "k8s.io/apimachinery/pkg/util/runtime" -) - -// TODO move to API machinery and re-unify with kubelet/server/portfoward -// The subprotocol "portforward.k8s.io" is used for port forwarding. -const PortForwardProtocolV1Name = "portforward.k8s.io" - -// PortForwarder knows how to listen for local connections and forward them to -// a remote pod via an upgraded HTTP request. -type PortForwarder struct { - ports []ForwardedPort - stopChan <-chan struct{} - - dialer httpstream.Dialer - streamConn httpstream.Connection - listeners []io.Closer - Ready chan struct{} - requestIDLock sync.Mutex - requestID int - out io.Writer - errOut io.Writer -} - -// ForwardedPort contains a Local:Remote port pairing. -type ForwardedPort struct { - Local uint16 - Remote uint16 -} - -/* - valid port specifications: - - 5000 - - forwards from localhost:5000 to pod:5000 - - 8888:5000 - - forwards from localhost:8888 to pod:5000 - - 0:5000 - :5000 - - selects a random available local port, - forwards from localhost: to pod:5000 -*/ -func parsePorts(ports []string) ([]ForwardedPort, error) { - var forwards []ForwardedPort - for _, portString := range ports { - parts := strings.Split(portString, ":") - var localString, remoteString string - if len(parts) == 1 { - localString = parts[0] - remoteString = parts[0] - } else if len(parts) == 2 { - localString = parts[0] - if localString == "" { - // support :5000 - localString = "0" - } - remoteString = parts[1] - } else { - return nil, fmt.Errorf("Invalid port format '%s'", portString) - } - - localPort, err := strconv.ParseUint(localString, 10, 16) - if err != nil { - return nil, fmt.Errorf("Error parsing local port '%s': %s", localString, err) - } - - remotePort, err := strconv.ParseUint(remoteString, 10, 16) - if err != nil { - return nil, fmt.Errorf("Error parsing remote port '%s': %s", remoteString, err) - } - if remotePort == 0 { - return nil, fmt.Errorf("Remote port must be > 0") - } - - forwards = append(forwards, ForwardedPort{uint16(localPort), uint16(remotePort)}) - } - - return forwards, nil -} - -// New creates a new PortForwarder. -func New(dialer httpstream.Dialer, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) { - if len(ports) == 0 { - return nil, errors.New("You must specify at least 1 port") - } - parsedPorts, err := parsePorts(ports) - if err != nil { - return nil, err - } - return &PortForwarder{ - dialer: dialer, - ports: parsedPorts, - stopChan: stopChan, - Ready: readyChan, - out: out, - errOut: errOut, - }, nil -} - -// ForwardPorts formats and executes a port forwarding request. The connection will remain -// open until stopChan is closed. -func (pf *PortForwarder) ForwardPorts() error { - defer pf.Close() - - var err error - pf.streamConn, _, err = pf.dialer.Dial(PortForwardProtocolV1Name) - if err != nil { - return fmt.Errorf("error upgrading connection: %s", err) - } - defer pf.streamConn.Close() - - return pf.forward() -} - -// forward dials the remote host specific in req, upgrades the request, starts -// listeners for each port specified in ports, and forwards local connections -// to the remote host via streams. -func (pf *PortForwarder) forward() error { - var err error - - listenSuccess := false - for _, port := range pf.ports { - err = pf.listenOnPort(&port) - switch { - case err == nil: - listenSuccess = true - default: - if pf.errOut != nil { - fmt.Fprintf(pf.errOut, "Unable to listen on port %d: %v\n", port.Local, err) - } - } - } - - if !listenSuccess { - return fmt.Errorf("Unable to listen on any of the requested ports: %v", pf.ports) - } - - if pf.Ready != nil { - close(pf.Ready) - } - - // wait for interrupt or conn closure - select { - case <-pf.stopChan: - case <-pf.streamConn.CloseChan(): - runtime.HandleError(errors.New("lost connection to pod")) - } - - return nil -} - -// listenOnPort delegates tcp4 and tcp6 listener creation and waits for connections on both of these addresses. -// If both listener creation fail, an error is raised. -func (pf *PortForwarder) listenOnPort(port *ForwardedPort) error { - errTcp4 := pf.listenOnPortAndAddress(port, "tcp4", "127.0.0.1") - errTcp6 := pf.listenOnPortAndAddress(port, "tcp6", "::1") - if errTcp4 != nil && errTcp6 != nil { - return fmt.Errorf("All listeners failed to create with the following errors: %s, %s", errTcp4, errTcp6) - } - return nil -} - -// listenOnPortAndAddress delegates listener creation and waits for new connections -// in the background f -func (pf *PortForwarder) listenOnPortAndAddress(port *ForwardedPort, protocol string, address string) error { - listener, err := pf.getListener(protocol, address, port) - if err != nil { - return err - } - pf.listeners = append(pf.listeners, listener) - go pf.waitForConnection(listener, *port) - return nil -} - -// getListener creates a listener on the interface targeted by the given hostname on the given port with -// the given protocol. protocol is in net.Listen style which basically admits values like tcp, tcp4, tcp6 -func (pf *PortForwarder) getListener(protocol string, hostname string, port *ForwardedPort) (net.Listener, error) { - listener, err := net.Listen(protocol, net.JoinHostPort(hostname, strconv.Itoa(int(port.Local)))) - if err != nil { - return nil, fmt.Errorf("Unable to create listener: Error %s", err) - } - listenerAddress := listener.Addr().String() - host, localPort, _ := net.SplitHostPort(listenerAddress) - localPortUInt, err := strconv.ParseUint(localPort, 10, 16) - - if err != nil { - return nil, fmt.Errorf("Error parsing local port: %s from %s (%s)", err, listenerAddress, host) - } - port.Local = uint16(localPortUInt) - if pf.out != nil { - fmt.Fprintf(pf.out, "Forwarding from %s -> %d\n", net.JoinHostPort(hostname, strconv.Itoa(int(localPortUInt))), port.Remote) - } - - return listener, nil -} - -// waitForConnection waits for new connections to listener and handles them in -// the background. -func (pf *PortForwarder) waitForConnection(listener net.Listener, port ForwardedPort) { - for { - conn, err := listener.Accept() - if err != nil { - // TODO consider using something like https://github.com/hydrogen18/stoppableListener? - if !strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") { - runtime.HandleError(fmt.Errorf("Error accepting connection on port %d: %v", port.Local, err)) - } - return - } - go pf.handleConnection(conn, port) - } -} - -func (pf *PortForwarder) nextRequestID() int { - pf.requestIDLock.Lock() - defer pf.requestIDLock.Unlock() - id := pf.requestID - pf.requestID++ - return id -} - -// handleConnection copies data between the local connection and the stream to -// the remote server. -func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) { - defer conn.Close() - - if pf.out != nil { - fmt.Fprintf(pf.out, "Handling connection for %d\n", port.Local) - } - - requestID := pf.nextRequestID() - - // create error stream - headers := http.Header{} - headers.Set(v1.StreamType, v1.StreamTypeError) - headers.Set(v1.PortHeader, fmt.Sprintf("%d", port.Remote)) - headers.Set(v1.PortForwardRequestIDHeader, strconv.Itoa(requestID)) - errorStream, err := pf.streamConn.CreateStream(headers) - if err != nil { - runtime.HandleError(fmt.Errorf("error creating error stream for port %d -> %d: %v", port.Local, port.Remote, err)) - return - } - // we're not writing to this stream - errorStream.Close() - - errorChan := make(chan error) - go func() { - message, err := ioutil.ReadAll(errorStream) - switch { - case err != nil: - errorChan <- fmt.Errorf("error reading from error stream for port %d -> %d: %v", port.Local, port.Remote, err) - case len(message) > 0: - errorChan <- fmt.Errorf("an error occurred forwarding %d -> %d: %v", port.Local, port.Remote, string(message)) - } - close(errorChan) - }() - - // create data stream - headers.Set(v1.StreamType, v1.StreamTypeData) - dataStream, err := pf.streamConn.CreateStream(headers) - if err != nil { - runtime.HandleError(fmt.Errorf("error creating forwarding stream for port %d -> %d: %v", port.Local, port.Remote, err)) - return - } - - localError := make(chan struct{}) - remoteDone := make(chan struct{}) - - go func() { - // Copy from the remote side to the local port. - if _, err := io.Copy(conn, dataStream); err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - runtime.HandleError(fmt.Errorf("error copying from remote stream to local connection: %v", err)) - } - - // inform the select below that the remote copy is done - close(remoteDone) - }() - - go func() { - // inform server we're not sending any more data after copy unblocks - defer dataStream.Close() - - // Copy from the local port to the remote side. - if _, err := io.Copy(dataStream, conn); err != nil && !strings.Contains(err.Error(), "use of closed network connection") { - runtime.HandleError(fmt.Errorf("error copying from local connection to remote stream: %v", err)) - // break out of the select below without waiting for the other copy to finish - close(localError) - } - }() - - // wait for either a local->remote error or for copying from remote->local to finish - select { - case <-remoteDone: - case <-localError: - } - - // always expect something on errorChan (it may be nil) - err = <-errorChan - if err != nil { - runtime.HandleError(err) - } -} - -func (pf *PortForwarder) Close() { - // stop all listeners - for _, l := range pf.listeners { - if err := l.Close(); err != nil { - runtime.HandleError(fmt.Errorf("error closing listener: %v", err)) - } - } -} diff --git a/vendor/k8s.io/client-go/tools/record/doc.go b/vendor/k8s.io/client-go/tools/record/doc.go deleted file mode 100644 index 657ddecbc..000000000 --- a/vendor/k8s.io/client-go/tools/record/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package record has all client logic for recording and reporting events. -package record // import "k8s.io/client-go/tools/record" diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go deleted file mode 100644 index 168dfa80c..000000000 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ /dev/null @@ -1,322 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package record - -import ( - "fmt" - "math/rand" - "time" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/watch" - restclient "k8s.io/client-go/rest" - ref "k8s.io/client-go/tools/reference" - - "net/http" - - "github.com/golang/glog" -) - -const maxTriesPerEvent = 12 - -var defaultSleepDuration = 10 * time.Second - -const maxQueuedEvents = 1000 - -// EventSink knows how to store events (client.Client implements it.) -// EventSink must respect the namespace that will be embedded in 'event'. -// It is assumed that EventSink will return the same sorts of errors as -// pkg/client's REST client. -type EventSink interface { - Create(event *v1.Event) (*v1.Event, error) - Update(event *v1.Event) (*v1.Event, error) - Patch(oldEvent *v1.Event, data []byte) (*v1.Event, error) -} - -// EventRecorder knows how to record events on behalf of an EventSource. -type EventRecorder interface { - // Event constructs an event from the given information and puts it in the queue for sending. - // 'object' is the object this event is about. Event will make a reference-- or you may also - // pass a reference to the object directly. - // 'type' of this event, and can be one of Normal, Warning. New types could be added in future - // 'reason' is the reason this event is generated. 'reason' should be short and unique; it - // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used - // to automate handling of events, so imagine people writing switch statements to handle them. - // You want to make that easy. - // 'message' is intended to be human readable. - // - // The resulting event will be created in the same namespace as the reference object. - Event(object runtime.Object, eventtype, reason, message string) - - // Eventf is just like Event, but with Sprintf for the message field. - Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) - - // PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field. - PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) - - // AnnotatedEventf is just like eventf, but with annotations attached - AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) -} - -// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. -type EventBroadcaster interface { - // StartEventWatcher starts sending events received from this EventBroadcaster to the given - // event handler function. The return value can be ignored or used to stop recording, if - // desired. - StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface - - // StartRecordingToSink starts sending events received from this EventBroadcaster to the given - // sink. The return value can be ignored or used to stop recording, if desired. - StartRecordingToSink(sink EventSink) watch.Interface - - // StartLogging starts sending events received from this EventBroadcaster to the given logging - // function. The return value can be ignored or used to stop recording, if desired. - StartLogging(logf func(format string, args ...interface{})) watch.Interface - - // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster - // with the event source set to the given event source. - NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder -} - -// Creates a new event broadcaster. -func NewBroadcaster() EventBroadcaster { - return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration} -} - -func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { - return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration} -} - -type eventBroadcasterImpl struct { - *watch.Broadcaster - sleepDuration time.Duration -} - -// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. -// The return value can be ignored or used to stop recording, if desired. -// TODO: make me an object with parameterizable queue length and retry interval -func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { - // The default math/rand package functions aren't thread safe, so create a - // new Rand object for each StartRecording call. - randGen := rand.New(rand.NewSource(time.Now().UnixNano())) - eventCorrelator := NewEventCorrelator(clock.RealClock{}) - return eventBroadcaster.StartEventWatcher( - func(event *v1.Event) { - recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration) - }) -} - -func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) { - // Make a copy before modification, because there could be multiple listeners. - // Events are safe to copy like this. - eventCopy := *event - event = &eventCopy - result, err := eventCorrelator.EventCorrelate(event) - if err != nil { - utilruntime.HandleError(err) - } - if result.Skip { - return - } - tries := 0 - for { - if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { - break - } - tries++ - if tries >= maxTriesPerEvent { - glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) - break - } - // Randomize the first sleep so that various clients won't all be - // synced up if the master goes down. - if tries == 1 { - time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64())) - } else { - time.Sleep(sleepDuration) - } - } -} - -func isKeyNotFoundError(err error) bool { - statusErr, _ := err.(*errors.StatusError) - - if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { - return true - } - - return false -} - -// recordEvent attempts to write event to a sink. It returns true if the event -// was successfully recorded or discarded, false if it should be retried. -// If updateExistingEvent is false, it creates a new event, otherwise it updates -// existing event. -func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { - var newEvent *v1.Event - var err error - if updateExistingEvent { - newEvent, err = sink.Patch(event, patch) - } - // Update can fail because the event may have been removed and it no longer exists. - if !updateExistingEvent || (updateExistingEvent && isKeyNotFoundError(err)) { - // Making sure that ResourceVersion is empty on creation - event.ResourceVersion = "" - newEvent, err = sink.Create(event) - } - if err == nil { - // we need to update our event correlator with the server returned state to handle name/resourceversion - eventCorrelator.UpdateState(newEvent) - return true - } - - // If we can't contact the server, then hold everything while we keep trying. - // Otherwise, something about the event is malformed and we should abandon it. - switch err.(type) { - case *restclient.RequestConstructionError: - // We will construct the request the same next time, so don't keep trying. - glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) - return true - case *errors.StatusError: - if errors.IsAlreadyExists(err) { - glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) - } else { - glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) - } - return true - case *errors.UnexpectedObjectError: - // We don't expect this; it implies the server's response didn't match a - // known pattern. Go ahead and retry. - default: - // This case includes actual http transport errors. Go ahead and retry. - } - glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) - return false -} - -// StartLogging starts sending events received from this EventBroadcaster to the given logging function. -// The return value can be ignored or used to stop recording, if desired. -func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { - return eventBroadcaster.StartEventWatcher( - func(e *v1.Event) { - logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) - }) -} - -// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. -// The return value can be ignored or used to stop recording, if desired. -func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { - watcher := eventBroadcaster.Watch() - go func() { - defer utilruntime.HandleCrash() - for watchEvent := range watcher.ResultChan() { - event, ok := watchEvent.Object.(*v1.Event) - if !ok { - // This is all local, so there's no reason this should - // ever happen. - continue - } - eventHandler(event) - } - }() - return watcher -} - -// NewRecorder returns an EventRecorder that records events with the given event source. -func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { - return &recorderImpl{scheme, source, eventBroadcaster.Broadcaster, clock.RealClock{}} -} - -type recorderImpl struct { - scheme *runtime.Scheme - source v1.EventSource - *watch.Broadcaster - clock clock.Clock -} - -func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) { - ref, err := ref.GetReference(recorder.scheme, object) - if err != nil { - glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) - return - } - - if !validateEventType(eventtype) { - glog.Errorf("Unsupported event type: '%v'", eventtype) - return - } - - event := recorder.makeEvent(ref, annotations, eventtype, reason, message) - event.Source = recorder.source - - go func() { - // NOTE: events should be a non-blocking operation - defer utilruntime.HandleCrash() - recorder.Action(watch.Added, event) - }() -} - -func validateEventType(eventtype string) bool { - switch eventtype { - case v1.EventTypeNormal, v1.EventTypeWarning: - return true - } - return false -} - -func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { - recorder.generateEvent(object, nil, metav1.Now(), eventtype, reason, message) -} - -func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) -} - -func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.generateEvent(object, nil, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...)) -} - -func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { - recorder.generateEvent(object, annotations, metav1.Now(), eventtype, reason, fmt.Sprintf(messageFmt, args...)) -} - -func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event { - t := metav1.Time{Time: recorder.clock.Now()} - namespace := ref.Namespace - if namespace == "" { - namespace = metav1.NamespaceDefault - } - return &v1.Event{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), - Namespace: namespace, - Annotations: annotations, - }, - InvolvedObject: *ref, - Reason: reason, - Message: message, - FirstTimestamp: t, - LastTimestamp: t, - Count: 1, - Type: eventtype, - } -} diff --git a/vendor/k8s.io/client-go/tools/record/events_cache.go b/vendor/k8s.io/client-go/tools/record/events_cache.go deleted file mode 100644 index a42084f3a..000000000 --- a/vendor/k8s.io/client-go/tools/record/events_cache.go +++ /dev/null @@ -1,462 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package record - -import ( - "encoding/json" - "fmt" - "strings" - "sync" - "time" - - "github.com/golang/groupcache/lru" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/client-go/util/flowcontrol" -) - -const ( - maxLruCacheEntries = 4096 - - // if we see the same event that varies only by message - // more than 10 times in a 10 minute period, aggregate the event - defaultAggregateMaxEvents = 10 - defaultAggregateIntervalInSeconds = 600 - - // by default, allow a source to send 25 events about an object - // but control the refill rate to 1 new event every 5 minutes - // this helps control the long-tail of events for things that are always - // unhealthy - defaultSpamBurst = 25 - defaultSpamQPS = 1. / 300. -) - -// getEventKey builds unique event key based on source, involvedObject, reason, message -func getEventKey(event *v1.Event) string { - return strings.Join([]string{ - event.Source.Component, - event.Source.Host, - event.InvolvedObject.Kind, - event.InvolvedObject.Namespace, - event.InvolvedObject.Name, - event.InvolvedObject.FieldPath, - string(event.InvolvedObject.UID), - event.InvolvedObject.APIVersion, - event.Type, - event.Reason, - event.Message, - }, - "") -} - -// getSpamKey builds unique event key based on source, involvedObject -func getSpamKey(event *v1.Event) string { - return strings.Join([]string{ - event.Source.Component, - event.Source.Host, - event.InvolvedObject.Kind, - event.InvolvedObject.Namespace, - event.InvolvedObject.Name, - string(event.InvolvedObject.UID), - event.InvolvedObject.APIVersion, - }, - "") -} - -// EventFilterFunc is a function that returns true if the event should be skipped -type EventFilterFunc func(event *v1.Event) bool - -// EventSourceObjectSpamFilter is responsible for throttling -// the amount of events a source and object can produce. -type EventSourceObjectSpamFilter struct { - sync.RWMutex - - // the cache that manages last synced state - cache *lru.Cache - - // burst is the amount of events we allow per source + object - burst int - - // qps is the refill rate of the token bucket in queries per second - qps float32 - - // clock is used to allow for testing over a time interval - clock clock.Clock -} - -// NewEventSourceObjectSpamFilter allows burst events from a source about an object with the specified qps refill. -func NewEventSourceObjectSpamFilter(lruCacheSize, burst int, qps float32, clock clock.Clock) *EventSourceObjectSpamFilter { - return &EventSourceObjectSpamFilter{ - cache: lru.New(lruCacheSize), - burst: burst, - qps: qps, - clock: clock, - } -} - -// spamRecord holds data used to perform spam filtering decisions. -type spamRecord struct { - // rateLimiter controls the rate of events about this object - rateLimiter flowcontrol.RateLimiter -} - -// Filter controls that a given source+object are not exceeding the allowed rate. -func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool { - var record spamRecord - - // controls our cached information about this event (source+object) - eventKey := getSpamKey(event) - - // do we have a record of similar events in our cache? - f.Lock() - defer f.Unlock() - value, found := f.cache.Get(eventKey) - if found { - record = value.(spamRecord) - } - - // verify we have a rate limiter for this record - if record.rateLimiter == nil { - record.rateLimiter = flowcontrol.NewTokenBucketRateLimiterWithClock(f.qps, f.burst, f.clock) - } - - // ensure we have available rate - filter := !record.rateLimiter.TryAccept() - - // update the cache - f.cache.Add(eventKey, record) - - return filter -} - -// EventAggregatorKeyFunc is responsible for grouping events for aggregation -// It returns a tuple of the following: -// aggregateKey - key the identifies the aggregate group to bucket this event -// localKey - key that makes this event in the local group -type EventAggregatorKeyFunc func(event *v1.Event) (aggregateKey string, localKey string) - -// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type and event.Reason -func EventAggregatorByReasonFunc(event *v1.Event) (string, string) { - return strings.Join([]string{ - event.Source.Component, - event.Source.Host, - event.InvolvedObject.Kind, - event.InvolvedObject.Namespace, - event.InvolvedObject.Name, - string(event.InvolvedObject.UID), - event.InvolvedObject.APIVersion, - event.Type, - event.Reason, - }, - ""), event.Message -} - -// EventAggregatorMessageFunc is responsible for producing an aggregation message -type EventAggregatorMessageFunc func(event *v1.Event) string - -// EventAggregratorByReasonMessageFunc returns an aggregate message by prefixing the incoming message -func EventAggregatorByReasonMessageFunc(event *v1.Event) string { - return "(combined from similar events): " + event.Message -} - -// EventAggregator identifies similar events and aggregates them into a single event -type EventAggregator struct { - sync.RWMutex - - // The cache that manages aggregation state - cache *lru.Cache - - // The function that groups events for aggregation - keyFunc EventAggregatorKeyFunc - - // The function that generates a message for an aggregate event - messageFunc EventAggregatorMessageFunc - - // The maximum number of events in the specified interval before aggregation occurs - maxEvents uint - - // The amount of time in seconds that must transpire since the last occurrence of a similar event before it's considered new - maxIntervalInSeconds uint - - // clock is used to allow for testing over a time interval - clock clock.Clock -} - -// NewEventAggregator returns a new instance of an EventAggregator -func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc, - maxEvents int, maxIntervalInSeconds int, clock clock.Clock) *EventAggregator { - return &EventAggregator{ - cache: lru.New(lruCacheSize), - keyFunc: keyFunc, - messageFunc: messageFunc, - maxEvents: uint(maxEvents), - maxIntervalInSeconds: uint(maxIntervalInSeconds), - clock: clock, - } -} - -// aggregateRecord holds data used to perform aggregation decisions -type aggregateRecord struct { - // we track the number of unique local keys we have seen in the aggregate set to know when to actually aggregate - // if the size of this set exceeds the max, we know we need to aggregate - localKeys sets.String - // The last time at which the aggregate was recorded - lastTimestamp metav1.Time -} - -// EventAggregate checks if a similar event has been seen according to the -// aggregation configuration (max events, max interval, etc) and returns: -// -// - The (potentially modified) event that should be created -// - The cache key for the event, for correlation purposes. This will be set to -// the full key for normal events, and to the result of -// EventAggregatorMessageFunc for aggregate events. -func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string) { - now := metav1.NewTime(e.clock.Now()) - var record aggregateRecord - // eventKey is the full cache key for this event - eventKey := getEventKey(newEvent) - // aggregateKey is for the aggregate event, if one is needed. - aggregateKey, localKey := e.keyFunc(newEvent) - - // Do we have a record of similar events in our cache? - e.Lock() - defer e.Unlock() - value, found := e.cache.Get(aggregateKey) - if found { - record = value.(aggregateRecord) - } - - // Is the previous record too old? If so, make a fresh one. Note: if we didn't - // find a similar record, its lastTimestamp will be the zero value, so we - // create a new one in that case. - maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second - interval := now.Time.Sub(record.lastTimestamp.Time) - if interval > maxInterval { - record = aggregateRecord{localKeys: sets.NewString()} - } - - // Write the new event into the aggregation record and put it on the cache - record.localKeys.Insert(localKey) - record.lastTimestamp = now - e.cache.Add(aggregateKey, record) - - // If we are not yet over the threshold for unique events, don't correlate them - if uint(record.localKeys.Len()) < e.maxEvents { - return newEvent, eventKey - } - - // do not grow our local key set any larger than max - record.localKeys.PopAny() - - // create a new aggregate event, and return the aggregateKey as the cache key - // (so that it can be overwritten.) - eventCopy := &v1.Event{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()), - Namespace: newEvent.Namespace, - }, - Count: 1, - FirstTimestamp: now, - InvolvedObject: newEvent.InvolvedObject, - LastTimestamp: now, - Message: e.messageFunc(newEvent), - Type: newEvent.Type, - Reason: newEvent.Reason, - Source: newEvent.Source, - } - return eventCopy, aggregateKey -} - -// eventLog records data about when an event was observed -type eventLog struct { - // The number of times the event has occurred since first occurrence. - count uint - - // The time at which the event was first recorded. - firstTimestamp metav1.Time - - // The unique name of the first occurrence of this event - name string - - // Resource version returned from previous interaction with server - resourceVersion string -} - -// eventLogger logs occurrences of an event -type eventLogger struct { - sync.RWMutex - cache *lru.Cache - clock clock.Clock -} - -// newEventLogger observes events and counts their frequencies -func newEventLogger(lruCacheEntries int, clock clock.Clock) *eventLogger { - return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock} -} - -// eventObserve records an event, or updates an existing one if key is a cache hit -func (e *eventLogger) eventObserve(newEvent *v1.Event, key string) (*v1.Event, []byte, error) { - var ( - patch []byte - err error - ) - eventCopy := *newEvent - event := &eventCopy - - e.Lock() - defer e.Unlock() - - // Check if there is an existing event we should update - lastObservation := e.lastEventObservationFromCache(key) - - // If we found a result, prepare a patch - if lastObservation.count > 0 { - // update the event based on the last observation so patch will work as desired - event.Name = lastObservation.name - event.ResourceVersion = lastObservation.resourceVersion - event.FirstTimestamp = lastObservation.firstTimestamp - event.Count = int32(lastObservation.count) + 1 - - eventCopy2 := *event - eventCopy2.Count = 0 - eventCopy2.LastTimestamp = metav1.NewTime(time.Unix(0, 0)) - eventCopy2.Message = "" - - newData, _ := json.Marshal(event) - oldData, _ := json.Marshal(eventCopy2) - patch, err = strategicpatch.CreateTwoWayMergePatch(oldData, newData, event) - } - - // record our new observation - e.cache.Add( - key, - eventLog{ - count: uint(event.Count), - firstTimestamp: event.FirstTimestamp, - name: event.Name, - resourceVersion: event.ResourceVersion, - }, - ) - return event, patch, err -} - -// updateState updates its internal tracking information based on latest server state -func (e *eventLogger) updateState(event *v1.Event) { - key := getEventKey(event) - e.Lock() - defer e.Unlock() - // record our new observation - e.cache.Add( - key, - eventLog{ - count: uint(event.Count), - firstTimestamp: event.FirstTimestamp, - name: event.Name, - resourceVersion: event.ResourceVersion, - }, - ) -} - -// lastEventObservationFromCache returns the event from the cache, reads must be protected via external lock -func (e *eventLogger) lastEventObservationFromCache(key string) eventLog { - value, ok := e.cache.Get(key) - if ok { - observationValue, ok := value.(eventLog) - if ok { - return observationValue - } - } - return eventLog{} -} - -// EventCorrelator processes all incoming events and performs analysis to avoid overwhelming the system. It can filter all -// incoming events to see if the event should be filtered from further processing. It can aggregate similar events that occur -// frequently to protect the system from spamming events that are difficult for users to distinguish. It performs de-duplication -// to ensure events that are observed multiple times are compacted into a single event with increasing counts. -type EventCorrelator struct { - // the function to filter the event - filterFunc EventFilterFunc - // the object that performs event aggregation - aggregator *EventAggregator - // the object that observes events as they come through - logger *eventLogger -} - -// EventCorrelateResult is the result of a Correlate -type EventCorrelateResult struct { - // the event after correlation - Event *v1.Event - // if provided, perform a strategic patch when updating the record on the server - Patch []byte - // if true, do no further processing of the event - Skip bool -} - -// NewEventCorrelator returns an EventCorrelator configured with default values. -// -// The EventCorrelator is responsible for event filtering, aggregating, and counting -// prior to interacting with the API server to record the event. -// -// The default behavior is as follows: -// * Aggregation is performed if a similar event is recorded 10 times in a -// in a 10 minute rolling interval. A similar event is an event that varies only by -// the Event.Message field. Rather than recording the precise event, aggregation -// will create a new event whose message reports that it has combined events with -// the same reason. -// * Events are incrementally counted if the exact same event is encountered multiple -// times. -// * A source may burst 25 events about an object, but has a refill rate budget -// per object of 1 event every 5 minutes to control long-tail of spam. -func NewEventCorrelator(clock clock.Clock) *EventCorrelator { - cacheSize := maxLruCacheEntries - spamFilter := NewEventSourceObjectSpamFilter(cacheSize, defaultSpamBurst, defaultSpamQPS, clock) - return &EventCorrelator{ - filterFunc: spamFilter.Filter, - aggregator: NewEventAggregator( - cacheSize, - EventAggregatorByReasonFunc, - EventAggregatorByReasonMessageFunc, - defaultAggregateMaxEvents, - defaultAggregateIntervalInSeconds, - clock), - - logger: newEventLogger(cacheSize, clock), - } -} - -// EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events -func (c *EventCorrelator) EventCorrelate(newEvent *v1.Event) (*EventCorrelateResult, error) { - if newEvent == nil { - return nil, fmt.Errorf("event is nil") - } - aggregateEvent, ckey := c.aggregator.EventAggregate(newEvent) - observedEvent, patch, err := c.logger.eventObserve(aggregateEvent, ckey) - if c.filterFunc(observedEvent) { - return &EventCorrelateResult{Skip: true}, nil - } - return &EventCorrelateResult{Event: observedEvent, Patch: patch}, err -} - -// UpdateState based on the latest observed state from server -func (c *EventCorrelator) UpdateState(event *v1.Event) { - c.logger.updateState(event) -} diff --git a/vendor/k8s.io/client-go/tools/record/fake.go b/vendor/k8s.io/client-go/tools/record/fake.go deleted file mode 100644 index 6e031daaf..000000000 --- a/vendor/k8s.io/client-go/tools/record/fake.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package record - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// FakeRecorder is used as a fake during tests. It is thread safe. It is usable -// when created manually and not by NewFakeRecorder, however all events may be -// thrown away in this case. -type FakeRecorder struct { - Events chan string -} - -func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { - if f.Events != nil { - f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message) - } -} - -func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { - if f.Events != nil { - f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) - } -} - -func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { -} - -func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { - f.Eventf(object, eventtype, reason, messageFmt, args) -} - -// NewFakeRecorder creates new fake event recorder with event channel with -// buffer of given size. -func NewFakeRecorder(bufferSize int) *FakeRecorder { - return &FakeRecorder{ - Events: make(chan string, bufferSize), - } -} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/doc.go b/vendor/k8s.io/client-go/tools/remotecommand/doc.go deleted file mode 100644 index ac06a9cd3..000000000 --- a/vendor/k8s.io/client-go/tools/remotecommand/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package remotecommand adds support for executing commands in containers, -// with support for separate stdin, stdout, and stderr streams, as well as -// TTY. -package remotecommand // import "k8s.io/client-go/tools/remotecommand" diff --git a/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go b/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go deleted file mode 100644 index 360276b65..000000000 --- a/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package remotecommand - -import ( - "fmt" - "io" - "io/ioutil" - - "k8s.io/apimachinery/pkg/util/runtime" -) - -// errorStreamDecoder interprets the data on the error channel and creates a go error object from it. -type errorStreamDecoder interface { - decode(message []byte) error -} - -// watchErrorStream watches the errorStream for remote command error data, -// decodes it with the given errorStreamDecoder, sends the decoded error (or nil if the remote -// command exited successfully) to the returned error channel, and closes it. -// This function returns immediately. -func watchErrorStream(errorStream io.Reader, d errorStreamDecoder) chan error { - errorChan := make(chan error) - - go func() { - defer runtime.HandleCrash() - - message, err := ioutil.ReadAll(errorStream) - switch { - case err != nil && err != io.EOF: - errorChan <- fmt.Errorf("error reading from error stream: %s", err) - case len(message) > 0: - errorChan <- d.decode(message) - default: - errorChan <- nil - } - close(errorChan) - }() - - return errorChan -} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go deleted file mode 100644 index d2b29861e..000000000 --- a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package remotecommand - -import ( - "fmt" - "io" - "net/http" - "net/url" - - "github.com/golang/glog" - - "k8s.io/apimachinery/pkg/util/httpstream" - "k8s.io/apimachinery/pkg/util/remotecommand" - restclient "k8s.io/client-go/rest" - spdy "k8s.io/client-go/transport/spdy" -) - -// StreamOptions holds information pertaining to the current streaming session: -// input/output streams, if the client is requesting a TTY, and a terminal size queue to -// support terminal resizing. -type StreamOptions struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer - Tty bool - TerminalSizeQueue TerminalSizeQueue -} - -// Executor is an interface for transporting shell-style streams. -type Executor interface { - // Stream initiates the transport of the standard shell streams. It will transport any - // non-nil stream to a remote system, and return an error if a problem occurs. If tty - // is set, the stderr stream is not used (raw TTY manages stdout and stderr over the - // stdout stream). - Stream(options StreamOptions) error -} - -type streamCreator interface { - CreateStream(headers http.Header) (httpstream.Stream, error) -} - -type streamProtocolHandler interface { - stream(conn streamCreator) error -} - -// streamExecutor handles transporting standard shell streams over an httpstream connection. -type streamExecutor struct { - upgrader spdy.Upgrader - transport http.RoundTripper - - method string - url *url.URL - protocols []string -} - -// NewSPDYExecutor connects to the provided server and upgrades the connection to -// multiplexed bidirectional streams. -func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { - wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) - if err != nil { - return nil, err - } - return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url) -} - -// NewSPDYExecutorForTransports connects to the provided server using the given transport, -// upgrades the response using the given upgrader to multiplexed bidirectional streams. -func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { - return NewSPDYExecutorForProtocols( - transport, upgrader, method, url, - remotecommand.StreamProtocolV4Name, - remotecommand.StreamProtocolV3Name, - remotecommand.StreamProtocolV2Name, - remotecommand.StreamProtocolV1Name, - ) -} - -// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to -// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most -// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports. -func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) { - return &streamExecutor{ - upgrader: upgrader, - transport: transport, - method: method, - url: url, - protocols: protocols, - }, nil -} - -// Stream opens a protocol streamer to the server and streams until a client closes -// the connection or the server disconnects. -func (e *streamExecutor) Stream(options StreamOptions) error { - req, err := http.NewRequest(e.method, e.url.String(), nil) - if err != nil { - return fmt.Errorf("error creating request: %v", err) - } - - conn, protocol, err := spdy.Negotiate( - e.upgrader, - &http.Client{Transport: e.transport}, - req, - e.protocols..., - ) - if err != nil { - return err - } - defer conn.Close() - - var streamer streamProtocolHandler - - switch protocol { - case remotecommand.StreamProtocolV4Name: - streamer = newStreamProtocolV4(options) - case remotecommand.StreamProtocolV3Name: - streamer = newStreamProtocolV3(options) - case remotecommand.StreamProtocolV2Name: - streamer = newStreamProtocolV2(options) - case "": - glog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) - fallthrough - case remotecommand.StreamProtocolV1Name: - streamer = newStreamProtocolV1(options) - } - - return streamer.stream(conn) -} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/resize.go b/vendor/k8s.io/client-go/tools/remotecommand/resize.go deleted file mode 100644 index c838f21ba..000000000 --- a/vendor/k8s.io/client-go/tools/remotecommand/resize.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package remotecommand - -// TerminalSize and TerminalSizeQueue was a part of k8s.io/kubernetes/pkg/util/term -// and were moved in order to decouple client from other term dependencies - -// TerminalSize represents the width and height of a terminal. -type TerminalSize struct { - Width uint16 - Height uint16 -} - -// TerminalSizeQueue is capable of returning terminal resize events as they occur. -type TerminalSizeQueue interface { - // Next returns the new terminal size after the terminal has been resized. It returns nil when - // monitoring has been stopped. - Next() *TerminalSize -} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v1.go b/vendor/k8s.io/client-go/tools/remotecommand/v1.go deleted file mode 100644 index 92dad727f..000000000 --- a/vendor/k8s.io/client-go/tools/remotecommand/v1.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package remotecommand - -import ( - "fmt" - "io" - "io/ioutil" - "net/http" - - "github.com/golang/glog" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/httpstream" -) - -// streamProtocolV1 implements the first version of the streaming exec & attach -// protocol. This version has some bugs, such as not being able to detect when -// non-interactive stdin data has ended. See http://issues.k8s.io/13394 and -// http://issues.k8s.io/13395 for more details. -type streamProtocolV1 struct { - StreamOptions - - errorStream httpstream.Stream - remoteStdin httpstream.Stream - remoteStdout httpstream.Stream - remoteStderr httpstream.Stream -} - -var _ streamProtocolHandler = &streamProtocolV1{} - -func newStreamProtocolV1(options StreamOptions) streamProtocolHandler { - return &streamProtocolV1{ - StreamOptions: options, - } -} - -func (p *streamProtocolV1) stream(conn streamCreator) error { - doneChan := make(chan struct{}, 2) - errorChan := make(chan error) - - cp := func(s string, dst io.Writer, src io.Reader) { - glog.V(6).Infof("Copying %s", s) - defer glog.V(6).Infof("Done copying %s", s) - if _, err := io.Copy(dst, src); err != nil && err != io.EOF { - glog.Errorf("Error copying %s: %v", s, err) - } - if s == v1.StreamTypeStdout || s == v1.StreamTypeStderr { - doneChan <- struct{}{} - } - } - - // set up all the streams first - var err error - headers := http.Header{} - headers.Set(v1.StreamType, v1.StreamTypeError) - p.errorStream, err = conn.CreateStream(headers) - if err != nil { - return err - } - defer p.errorStream.Reset() - - // Create all the streams first, then start the copy goroutines. The server doesn't start its copy - // goroutines until it's received all of the streams. If the client creates the stdin stream and - // immediately begins copying stdin data to the server, it's possible to overwhelm and wedge the - // spdy frame handler in the server so that it is full of unprocessed frames. The frames aren't - // getting processed because the server hasn't started its copying, and it won't do that until it - // gets all the streams. By creating all the streams first, we ensure that the server is ready to - // process data before the client starts sending any. See https://issues.k8s.io/16373 for more info. - if p.Stdin != nil { - headers.Set(v1.StreamType, v1.StreamTypeStdin) - p.remoteStdin, err = conn.CreateStream(headers) - if err != nil { - return err - } - defer p.remoteStdin.Reset() - } - - if p.Stdout != nil { - headers.Set(v1.StreamType, v1.StreamTypeStdout) - p.remoteStdout, err = conn.CreateStream(headers) - if err != nil { - return err - } - defer p.remoteStdout.Reset() - } - - if p.Stderr != nil && !p.Tty { - headers.Set(v1.StreamType, v1.StreamTypeStderr) - p.remoteStderr, err = conn.CreateStream(headers) - if err != nil { - return err - } - defer p.remoteStderr.Reset() - } - - // now that all the streams have been created, proceed with reading & copying - - // always read from errorStream - go func() { - message, err := ioutil.ReadAll(p.errorStream) - if err != nil && err != io.EOF { - errorChan <- fmt.Errorf("Error reading from error stream: %s", err) - return - } - if len(message) > 0 { - errorChan <- fmt.Errorf("Error executing remote command: %s", message) - return - } - }() - - if p.Stdin != nil { - // TODO this goroutine will never exit cleanly (the io.Copy never unblocks) - // because stdin is not closed until the process exits. If we try to call - // stdin.Close(), it returns no error but doesn't unblock the copy. It will - // exit when the process exits, instead. - go cp(v1.StreamTypeStdin, p.remoteStdin, p.Stdin) - } - - waitCount := 0 - completedStreams := 0 - - if p.Stdout != nil { - waitCount++ - go cp(v1.StreamTypeStdout, p.Stdout, p.remoteStdout) - } - - if p.Stderr != nil && !p.Tty { - waitCount++ - go cp(v1.StreamTypeStderr, p.Stderr, p.remoteStderr) - } - -Loop: - for { - select { - case <-doneChan: - completedStreams++ - if completedStreams == waitCount { - break Loop - } - case err := <-errorChan: - return err - } - } - - return nil -} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v2.go b/vendor/k8s.io/client-go/tools/remotecommand/v2.go deleted file mode 100644 index b74ae8de2..000000000 --- a/vendor/k8s.io/client-go/tools/remotecommand/v2.go +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package remotecommand - -import ( - "fmt" - "io" - "io/ioutil" - "net/http" - "sync" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/runtime" -) - -// streamProtocolV2 implements version 2 of the streaming protocol for attach -// and exec. The original streaming protocol was metav1. As a result, this -// version is referred to as version 2, even though it is the first actual -// numbered version. -type streamProtocolV2 struct { - StreamOptions - - errorStream io.Reader - remoteStdin io.ReadWriteCloser - remoteStdout io.Reader - remoteStderr io.Reader -} - -var _ streamProtocolHandler = &streamProtocolV2{} - -func newStreamProtocolV2(options StreamOptions) streamProtocolHandler { - return &streamProtocolV2{ - StreamOptions: options, - } -} - -func (p *streamProtocolV2) createStreams(conn streamCreator) error { - var err error - headers := http.Header{} - - // set up error stream - headers.Set(v1.StreamType, v1.StreamTypeError) - p.errorStream, err = conn.CreateStream(headers) - if err != nil { - return err - } - - // set up stdin stream - if p.Stdin != nil { - headers.Set(v1.StreamType, v1.StreamTypeStdin) - p.remoteStdin, err = conn.CreateStream(headers) - if err != nil { - return err - } - } - - // set up stdout stream - if p.Stdout != nil { - headers.Set(v1.StreamType, v1.StreamTypeStdout) - p.remoteStdout, err = conn.CreateStream(headers) - if err != nil { - return err - } - } - - // set up stderr stream - if p.Stderr != nil && !p.Tty { - headers.Set(v1.StreamType, v1.StreamTypeStderr) - p.remoteStderr, err = conn.CreateStream(headers) - if err != nil { - return err - } - } - return nil -} - -func (p *streamProtocolV2) copyStdin() { - if p.Stdin != nil { - var once sync.Once - - // copy from client's stdin to container's stdin - go func() { - defer runtime.HandleCrash() - - // if p.stdin is noninteractive, p.g. `echo abc | kubectl exec -i -- cat`, make sure - // we close remoteStdin as soon as the copy from p.stdin to remoteStdin finishes. Otherwise - // the executed command will remain running. - defer once.Do(func() { p.remoteStdin.Close() }) - - if _, err := io.Copy(p.remoteStdin, p.Stdin); err != nil { - runtime.HandleError(err) - } - }() - - // read from remoteStdin until the stream is closed. this is essential to - // be able to exit interactive sessions cleanly and not leak goroutines or - // hang the client's terminal. - // - // TODO we aren't using go-dockerclient any more; revisit this to determine if it's still - // required by engine-api. - // - // go-dockerclient's current hijack implementation - // (https://github.com/fsouza/go-dockerclient/blob/89f3d56d93788dfe85f864a44f85d9738fca0670/client.go#L564) - // waits for all three streams (stdin/stdout/stderr) to finish copying - // before returning. When hijack finishes copying stdout/stderr, it calls - // Close() on its side of remoteStdin, which allows this copy to complete. - // When that happens, we must Close() on our side of remoteStdin, to - // allow the copy in hijack to complete, and hijack to return. - go func() { - defer runtime.HandleCrash() - defer once.Do(func() { p.remoteStdin.Close() }) - - // this "copy" doesn't actually read anything - it's just here to wait for - // the server to close remoteStdin. - if _, err := io.Copy(ioutil.Discard, p.remoteStdin); err != nil { - runtime.HandleError(err) - } - }() - } -} - -func (p *streamProtocolV2) copyStdout(wg *sync.WaitGroup) { - if p.Stdout == nil { - return - } - - wg.Add(1) - go func() { - defer runtime.HandleCrash() - defer wg.Done() - - if _, err := io.Copy(p.Stdout, p.remoteStdout); err != nil { - runtime.HandleError(err) - } - }() -} - -func (p *streamProtocolV2) copyStderr(wg *sync.WaitGroup) { - if p.Stderr == nil || p.Tty { - return - } - - wg.Add(1) - go func() { - defer runtime.HandleCrash() - defer wg.Done() - - if _, err := io.Copy(p.Stderr, p.remoteStderr); err != nil { - runtime.HandleError(err) - } - }() -} - -func (p *streamProtocolV2) stream(conn streamCreator) error { - if err := p.createStreams(conn); err != nil { - return err - } - - // now that all the streams have been created, proceed with reading & copying - - errorChan := watchErrorStream(p.errorStream, &errorDecoderV2{}) - - p.copyStdin() - - var wg sync.WaitGroup - p.copyStdout(&wg) - p.copyStderr(&wg) - - // we're waiting for stdout/stderr to finish copying - wg.Wait() - - // waits for errorStream to finish reading with an error or nil - return <-errorChan -} - -// errorDecoderV2 interprets the error channel data as plain text. -type errorDecoderV2 struct{} - -func (d *errorDecoderV2) decode(message []byte) error { - return fmt.Errorf("error executing remote command: %s", message) -} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v3.go b/vendor/k8s.io/client-go/tools/remotecommand/v3.go deleted file mode 100644 index 846dd24a5..000000000 --- a/vendor/k8s.io/client-go/tools/remotecommand/v3.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package remotecommand - -import ( - "encoding/json" - "io" - "net/http" - "sync" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/runtime" -) - -// streamProtocolV3 implements version 3 of the streaming protocol for attach -// and exec. This version adds support for resizing the container's terminal. -type streamProtocolV3 struct { - *streamProtocolV2 - - resizeStream io.Writer -} - -var _ streamProtocolHandler = &streamProtocolV3{} - -func newStreamProtocolV3(options StreamOptions) streamProtocolHandler { - return &streamProtocolV3{ - streamProtocolV2: newStreamProtocolV2(options).(*streamProtocolV2), - } -} - -func (p *streamProtocolV3) createStreams(conn streamCreator) error { - // set up the streams from v2 - if err := p.streamProtocolV2.createStreams(conn); err != nil { - return err - } - - // set up resize stream - if p.Tty { - headers := http.Header{} - headers.Set(v1.StreamType, v1.StreamTypeResize) - var err error - p.resizeStream, err = conn.CreateStream(headers) - if err != nil { - return err - } - } - - return nil -} - -func (p *streamProtocolV3) handleResizes() { - if p.resizeStream == nil || p.TerminalSizeQueue == nil { - return - } - go func() { - defer runtime.HandleCrash() - - encoder := json.NewEncoder(p.resizeStream) - for { - size := p.TerminalSizeQueue.Next() - if size == nil { - return - } - if err := encoder.Encode(&size); err != nil { - runtime.HandleError(err) - } - } - }() -} - -func (p *streamProtocolV3) stream(conn streamCreator) error { - if err := p.createStreams(conn); err != nil { - return err - } - - // now that all the streams have been created, proceed with reading & copying - - errorChan := watchErrorStream(p.errorStream, &errorDecoderV3{}) - - p.handleResizes() - - p.copyStdin() - - var wg sync.WaitGroup - p.copyStdout(&wg) - p.copyStderr(&wg) - - // we're waiting for stdout/stderr to finish copying - wg.Wait() - - // waits for errorStream to finish reading with an error or nil - return <-errorChan -} - -type errorDecoderV3 struct { - errorDecoderV2 -} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v4.go b/vendor/k8s.io/client-go/tools/remotecommand/v4.go deleted file mode 100644 index 69ca934a0..000000000 --- a/vendor/k8s.io/client-go/tools/remotecommand/v4.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package remotecommand - -import ( - "encoding/json" - "errors" - "fmt" - "strconv" - "sync" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/remotecommand" - "k8s.io/client-go/util/exec" -) - -// streamProtocolV4 implements version 4 of the streaming protocol for attach -// and exec. This version adds support for exit codes on the error stream through -// the use of metav1.Status instead of plain text messages. -type streamProtocolV4 struct { - *streamProtocolV3 -} - -var _ streamProtocolHandler = &streamProtocolV4{} - -func newStreamProtocolV4(options StreamOptions) streamProtocolHandler { - return &streamProtocolV4{ - streamProtocolV3: newStreamProtocolV3(options).(*streamProtocolV3), - } -} - -func (p *streamProtocolV4) createStreams(conn streamCreator) error { - return p.streamProtocolV3.createStreams(conn) -} - -func (p *streamProtocolV4) handleResizes() { - p.streamProtocolV3.handleResizes() -} - -func (p *streamProtocolV4) stream(conn streamCreator) error { - if err := p.createStreams(conn); err != nil { - return err - } - - // now that all the streams have been created, proceed with reading & copying - - errorChan := watchErrorStream(p.errorStream, &errorDecoderV4{}) - - p.handleResizes() - - p.copyStdin() - - var wg sync.WaitGroup - p.copyStdout(&wg) - p.copyStderr(&wg) - - // we're waiting for stdout/stderr to finish copying - wg.Wait() - - // waits for errorStream to finish reading with an error or nil - return <-errorChan -} - -// errorDecoderV4 interprets the json-marshaled metav1.Status on the error channel -// and creates an exec.ExitError from it. -type errorDecoderV4 struct{} - -func (d *errorDecoderV4) decode(message []byte) error { - status := metav1.Status{} - err := json.Unmarshal(message, &status) - if err != nil { - return fmt.Errorf("error stream protocol error: %v in %q", err, string(message)) - } - switch status.Status { - case metav1.StatusSuccess: - return nil - case metav1.StatusFailure: - if status.Reason == remotecommand.NonZeroExitCodeReason { - if status.Details == nil { - return errors.New("error stream protocol error: details must be set") - } - for i := range status.Details.Causes { - c := &status.Details.Causes[i] - if c.Type != remotecommand.ExitCodeCauseType { - continue - } - - rc, err := strconv.ParseUint(c.Message, 10, 8) - if err != nil { - return fmt.Errorf("error stream protocol error: invalid exit code value %q", c.Message) - } - return exec.CodeExitError{ - Err: fmt.Errorf("command terminated with exit code %d", rc), - Code: int(rc), - } - } - - return fmt.Errorf("error stream protocol error: no %s cause given", remotecommand.ExitCodeCauseType) - } - default: - return errors.New("error stream protocol error: unknown error") - } - - return fmt.Errorf(status.Message) -} diff --git a/vendor/k8s.io/client-go/tools/watch/informerwatcher.go b/vendor/k8s.io/client-go/tools/watch/informerwatcher.go deleted file mode 100644 index 35a346949..000000000 --- a/vendor/k8s.io/client-go/tools/watch/informerwatcher.go +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package watch - -import ( - "sync" - "sync/atomic" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/tools/cache" -) - -func newTicketer() *ticketer { - return &ticketer{ - cond: sync.NewCond(&sync.Mutex{}), - } -} - -type ticketer struct { - counter uint64 - - cond *sync.Cond - current uint64 -} - -func (t *ticketer) GetTicket() uint64 { - // -1 to start from 0 - return atomic.AddUint64(&t.counter, 1) - 1 -} - -func (t *ticketer) WaitForTicket(ticket uint64, f func()) { - t.cond.L.Lock() - defer t.cond.L.Unlock() - for ticket != t.current { - t.cond.Wait() - } - - f() - - t.current++ - t.cond.Broadcast() -} - -// NewIndexerInformerWatcher will create an IndexerInformer and wrap it into watch.Interface -// so you can use it anywhere where you'd have used a regular Watcher returned from Watch method. -func NewIndexerInformerWatcher(lw cache.ListerWatcher, objType runtime.Object) (cache.Indexer, cache.Controller, watch.Interface) { - ch := make(chan watch.Event) - w := watch.NewProxyWatcher(ch) - t := newTicketer() - - indexer, informer := cache.NewIndexerInformer(lw, objType, 0, cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - go t.WaitForTicket(t.GetTicket(), func() { - select { - case ch <- watch.Event{ - Type: watch.Added, - Object: obj.(runtime.Object), - }: - case <-w.StopChan(): - } - }) - }, - UpdateFunc: func(old, new interface{}) { - go t.WaitForTicket(t.GetTicket(), func() { - select { - case ch <- watch.Event{ - Type: watch.Modified, - Object: new.(runtime.Object), - }: - case <-w.StopChan(): - } - }) - }, - DeleteFunc: func(obj interface{}) { - go t.WaitForTicket(t.GetTicket(), func() { - staleObj, stale := obj.(cache.DeletedFinalStateUnknown) - if stale { - // We have no means of passing the additional information down using watch API based on watch.Event - // but the caller can filter such objects by checking if metadata.deletionTimestamp is set - obj = staleObj - } - - select { - case ch <- watch.Event{ - Type: watch.Deleted, - Object: obj.(runtime.Object), - }: - case <-w.StopChan(): - } - }) - }, - }, cache.Indexers{}) - - go func() { - informer.Run(w.StopChan()) - }() - - return indexer, informer, w -} diff --git a/vendor/k8s.io/client-go/tools/watch/until.go b/vendor/k8s.io/client-go/tools/watch/until.go deleted file mode 100644 index 933578843..000000000 --- a/vendor/k8s.io/client-go/tools/watch/until.go +++ /dev/null @@ -1,225 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package watch - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/golang/glog" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/tools/cache" -) - -// PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet, -// or an error if the condition failed or detected an error state. -type PreconditionFunc func(store cache.Store) (bool, error) - -// ConditionFunc returns true if the condition has been reached, false if it has not been reached yet, -// or an error if the condition cannot be checked and should terminate. In general, it is better to define -// level driven conditions over edge driven conditions (pod has ready=true, vs pod modified and ready changed -// from false to true). -type ConditionFunc func(event watch.Event) (bool, error) - -// ErrWatchClosed is returned when the watch channel is closed before timeout in UntilWithoutRetry. -var ErrWatchClosed = errors.New("watch closed before UntilWithoutRetry timeout") - -// UntilWithoutRetry reads items from the watch until each provided condition succeeds, and then returns the last watch -// encountered. The first condition that returns an error terminates the watch (and the event is also returned). -// If no event has been received, the returned event will be nil. -// Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition. -// Waits until context deadline or until context is canceled. -// -// Warning: Unless you have a very specific use case (probably a special Watcher) don't use this function!!! -// Warning: This will fail e.g. on API timeouts and/or 'too old resource version' error. -// Warning: You are most probably looking for a function *Until* or *UntilWithSync* below, -// Warning: solving such issues. -// TODO: Consider making this function private to prevent misuse when the other occurrences in our codebase are gone. -func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions ...ConditionFunc) (*watch.Event, error) { - ch := watcher.ResultChan() - defer watcher.Stop() - var lastEvent *watch.Event - for _, condition := range conditions { - // check the next condition against the previous event and short circuit waiting for the next watch - if lastEvent != nil { - done, err := condition(*lastEvent) - if err != nil { - return lastEvent, err - } - if done { - continue - } - } - ConditionSucceeded: - for { - select { - case event, ok := <-ch: - if !ok { - return lastEvent, ErrWatchClosed - } - lastEvent = &event - - done, err := condition(event) - if err != nil { - return lastEvent, err - } - if done { - break ConditionSucceeded - } - - case <-ctx.Done(): - return lastEvent, wait.ErrWaitTimeout - } - } - } - return lastEvent, nil -} - -// UntilWithSync creates an informer from lw, optionally checks precondition when the store is synced, -// and watches the output until each provided condition succeeds, in a way that is identical -// to function UntilWithoutRetry. (See above.) -// UntilWithSync can deal with all errors like API timeout, lost connections and 'Resource version too old'. -// It is the only function that can recover from 'Resource version too old', Until and UntilWithoutRetry will -// just fail in that case. On the other hand it can't provide you with guarantees as strong as using simple -// Watch method with Until. It can skip some intermediate events in case of watch function failing but it will -// re-list to recover and you always get an event, if there has been a change, after recovery. -// Also with the current implementation based on DeltaFIFO, order of the events you receive is guaranteed only for -// particular object, not between more of them even it's the same resource. -// The most frequent usage would be a command that needs to watch the "state of the world" and should't fail, like: -// waiting for object reaching a state, "small" controllers, ... -func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition PreconditionFunc, conditions ...ConditionFunc) (*watch.Event, error) { - indexer, informer, watcher := NewIndexerInformerWatcher(lw, objType) - // Proxy watcher can be stopped multiple times so it's fine to use defer here to cover alternative branches and - // let UntilWithoutRetry to stop it - defer watcher.Stop() - - if precondition != nil { - if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { - return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %v", ctx.Err()) - } - - done, err := precondition(indexer) - if err != nil { - return nil, err - } - - if done { - return nil, nil - } - } - - return UntilWithoutRetry(ctx, watcher, conditions...) -} - -// ContextWithOptionalTimeout wraps context.WithTimeout and handles infinite timeouts expressed as 0 duration. -func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { - if timeout < 0 { - // This should be handled in validation - glog.Errorf("Timeout for context shall not be negative!") - timeout = 0 - } - - if timeout == 0 { - return context.WithCancel(parent) - } - - return context.WithTimeout(parent, timeout) -} - -// ListWatchUntil checks the provided conditions against the items returned by the list watcher, returning wait.ErrWaitTimeout -// if timeout is exceeded without all conditions returning true, or an error if an error occurs. -// TODO: check for watch expired error and retry watch from latest point? Same issue exists for Until. -// TODO: remove when no longer used -// -// Deprecated: Use UntilWithSync instead. -func ListWatchUntil(timeout time.Duration, lw cache.ListerWatcher, conditions ...ConditionFunc) (*watch.Event, error) { - if len(conditions) == 0 { - return nil, nil - } - - list, err := lw.List(metav1.ListOptions{}) - if err != nil { - return nil, err - } - initialItems, err := meta.ExtractList(list) - if err != nil { - return nil, err - } - - // use the initial items as simulated "adds" - var lastEvent *watch.Event - currIndex := 0 - passedConditions := 0 - for _, condition := range conditions { - // check the next condition against the previous event and short circuit waiting for the next watch - if lastEvent != nil { - done, err := condition(*lastEvent) - if err != nil { - return lastEvent, err - } - if done { - passedConditions = passedConditions + 1 - continue - } - } - - ConditionSucceeded: - for currIndex < len(initialItems) { - lastEvent = &watch.Event{Type: watch.Added, Object: initialItems[currIndex]} - currIndex++ - - done, err := condition(*lastEvent) - if err != nil { - return lastEvent, err - } - if done { - passedConditions = passedConditions + 1 - break ConditionSucceeded - } - } - } - if passedConditions == len(conditions) { - return lastEvent, nil - } - remainingConditions := conditions[passedConditions:] - - metaObj, err := meta.ListAccessor(list) - if err != nil { - return nil, err - } - currResourceVersion := metaObj.GetResourceVersion() - - watchInterface, err := lw.Watch(metav1.ListOptions{ResourceVersion: currResourceVersion}) - if err != nil { - return nil, err - } - - ctx, cancel := ContextWithOptionalTimeout(context.Background(), timeout) - defer cancel() - evt, err := UntilWithoutRetry(ctx, watchInterface, remainingConditions...) - if err == ErrWatchClosed { - // present a consistent error interface to callers - err = wait.ErrWaitTimeout - } - return evt, err -}