From 1c21399dd8707dcf6f8cd346ef229289c7fcb236 Mon Sep 17 00:00:00 2001 From: Abhinav Dahiya Date: Fri, 19 Jul 2019 11:29:23 -0700 Subject: [PATCH] vendor: bump library-fo esp https://github.com/openshift/library-go/pull/479 --- glide.lock | 8 +- vendor/github.com/ghodss/yaml/.travis.yml | 4 +- vendor/github.com/ghodss/yaml/go.mod | 3 + vendor/github.com/ghodss/yaml/go.sum | 3 + vendor/github.com/ghodss/yaml/yaml.go | 27 +- .../github.com/ghodss/yaml/yaml_go110_test.go | 54 + vendor/github.com/ghodss/yaml/yaml_test.go | 153 +- .../library-go/alpha-build-machinery/Makefile | 9 +- .../alpha-build-machinery/make/default.mk | 5 +- .../examples/multiple-binaries/.gitignore | 3 + .../make/examples/multiple-binaries/Makefile | 26 + .../examples/multiple-binaries/Makefile.test | 52 +- .../multiple-binaries/Makefile.test.log | 84 +- .../examples/multiple-binaries/cmd/oc/main.go | 8 +- .../make/examples/multiple-binaries/ocp.spec | 47 + .../multiple-binaries/pkg/version/version.go | 27 + .../alpha-build-machinery/make/lib/golang.mk | 28 +- .../make/targets/golang/build.mk | 6 +- .../make/targets/openshift/bindata.mk | 2 +- .../make/targets/openshift/images.mk | 8 +- .../make/targets/openshift/rpm.mk | 41 + .../cmd/crd-schema-gen/generator/generator.go | 4 +- .../openshift/library-go/glide.lock | 240 +++- .../openshift/library-go/glide.yaml | 30 +- .../admissiontesting.go | 25 + .../admissionrestconfig/intiializers.go | 30 + .../admission/admissiontimeout/decorator.go | 20 + .../admissiontimeout/timeoutadmission.go | 67 + .../admissiontimeout/timeoutadmission_test.go | 106 ++ .../apiserver/apiserverconfig/cachecontrol.go | 35 + .../apiserver/apiserverconfig/longrunning.go | 26 + .../personal_subjectaccessreview.go | 129 ++ .../project_request_info_resolver.go | 34 + .../apiserverconfig/requestinforesolver.go | 17 + .../pkg/apiserver/httprequest/httprequest.go | 129 ++ .../apiserver/httprequest/httprequest_test.go | 236 +++ .../openshift/library-go/pkg/apps/OWNERS | 10 + .../pkg/apps/appsserialization/scheme.go | 30 + .../pkg/apps/appsserialization/scheme_test.go | 47 + .../pkg/apps/appsserialization/serialize.go | 31 + .../library-go/pkg/apps/appsutil/const.go | 60 + .../pkg/apps/appsutil/rc_scale_client.go | 34 + .../library-go/pkg/apps/appsutil/util.go | 629 ++++++++ .../library-go/pkg/apps/appsutil/util_test.go | 425 ++++++ .../pkg/assets/create/create_test.go | 55 +- .../library-go/pkg/assets/create/creater.go | 43 +- .../operator-config-empty-status.yaml | 7 + .../create/testdata/operator-config.yaml | 2 + .../bootstrapauthenticator/bootstrap.go | 153 ++ .../authorizationutil/subject.go | 56 + .../authorization/authorizationutil/util.go | 48 + .../scopemetadata/clusterrole_describers.go | 86 ++ .../authorization/scopemetadata/describers.go | 17 + .../scopemetadata/user_describers.go | 68 + .../authorization/scopemetadata/validation.go | 152 ++ .../scopemetadata/validation_test.go | 141 ++ .../pkg/build/buildutil/buildutil.go | 85 ++ .../pkg/build/buildutil/buildutil_test.go | 1 + .../library-go/pkg/build/envresolve/env.go | 115 ++ .../pkg/build/envresolve/kube_copy.go | 150 ++ .../library-go/pkg/build/naming/namer.go | 73 + .../library-go/pkg/build/naming/namer_test.go | 101 ++ .../openshift/library-go/pkg/certs/pem.go | 57 + .../pkg/config/helpers/readresource.go | 167 +++ .../config/leaderelection/leaderelection.go | 1 + .../library-go/pkg/config/serving/options.go | 2 +- .../library-go/pkg/config/serving/server.go | 44 +- .../pkg/controller/controllercmd/builder.go | 2 +- .../pkg/controller/controllercmd/cmd.go | 28 +- .../controller/metrics/workqueue_metrics.go | 158 ++- .../openshift/library-go/pkg/crypto/crypto.go | 23 +- .../library-go/pkg/crypto/crypto_test.go | 7 + .../pkg/image/dockerv1client/client.go | 999 +++++++++++++ .../pkg/image/dockerv1client/client_test.go | 399 ++++++ .../pkg/image/dockerv1client/conversion.go | 26 + .../pkg/image/dockerv1client/types.go | 113 ++ .../library-go/pkg/image/imageutil/helpers.go | 379 +++++ .../pkg/image/imageutil/helpers_test.go | 155 ++ .../pkg/image/referencemutator/builds.go | 120 ++ .../pkg/image/referencemutator/builds_test.go | 289 ++++ .../pkg/image/referencemutator/meta.go | 109 ++ .../pkg/image/referencemutator/pods.go | 302 ++++ .../pkg/image/referencemutator/pods_test.go | 150 ++ .../pkg/image/registryclient/client.go | 683 +++++++++ .../pkg/image/registryclient/client_test.go | 652 +++++++++ .../pkg/image/registryclient/credentials.go | 90 ++ .../image/registryclient/credentials_test.go | 20 + .../pkg/image/trigger/annotations.go | 215 +++ .../pkg/image/trigger/annotations_test.go | 312 ++++ .../pkg/image/trigger/interfaces.go | 7 + .../library-go/pkg/image/trigger/types.go | 29 + .../legacygroupification/groupification.go | 228 +++ .../pkg/network/networkutils/networkutils.go | 65 + .../network/networkutils/networkutils_test.go | 100 ++ .../pkg/oauth/oauthdiscovery/discovery.go | 32 + .../pkg/oauth/oauthdiscovery/urls.go | 37 + .../oauthclientregistry.go | 501 +++++++ .../oauthclientregistry_test.go | 1261 +++++++++++++++++ .../client_cert_rotation_controller.go | 46 +- .../pkg/operator/condition/condition.go | 66 + .../cloudprovider/observe_cloudprovider.go | 14 +- .../observe_cloudprovider_test.go | 9 +- .../config_observer_controller.go | 4 +- .../config_observer_controller_test.go | 11 +- .../featuregates/observe_featuregates.go | 75 +- .../featuregates/observe_featuregates_test.go | 53 +- .../configobserver/network/observe_network.go | 88 +- .../configobserver/proxy/observe_proxy.go | 88 ++ .../proxy/observe_proxy_test.go | 91 ++ .../pkg/operator/events/recorder_in_memory.go | 5 +- .../dynamic_operator_client.go | 190 +++ .../dynamic_operator_client_test.go | 138 ++ .../dynamic_staticpod_operator_client.go | 201 +++ .../operator/loglevel/logging_controller.go | 24 +- .../loglevel/logging_controller_test.go | 68 + .../library-go/pkg/operator/loglevel/util.go | 73 +- .../management/management_state_controller.go | 6 +- .../pkg/operator/render/options/config.go | 3 + .../pkg/operator/render/options/manifest.go | 5 +- .../resource/resourceapply/apiregistration.go | 8 +- .../operator/resource/resourceapply/core.go | 23 +- .../operator/resource/resourceapply/rbac.go | 2 +- .../resourcemerge/generic_config_merger.go | 98 +- .../generic_config_merger_test.go | 62 + .../resourcesync_controller.go | 10 +- .../resourcesync_controller_test.go | 2 +- .../remove_stale_conditions.go | 116 ++ .../staticpod/certsyncpod/certsync_cmd.go | 13 +- .../certsyncpod/certsync_controller.go | 124 +- .../backing_resource_controller.go | 8 +- .../backing_resource_controller_test.go | 3 +- .../installer/installer_controller.go | 286 ++-- .../installer/installer_controller_test.go | 357 +++-- .../staticpod/controller/installer/int32.go | 187 +++ .../monitoring_resource_controller.go | 31 +- .../monitoring_resource_controller_test.go | 2 + .../controller/node/node_controller.go | 60 +- .../controller/node/node_controller_test.go | 146 +- .../controller/prune/prune_controller_test.go | 2 + .../revision/revision_controller.go | 51 +- .../revision/revision_controller_test.go | 50 + .../staticpodstate_controller.go | 12 +- .../pkg/operator/staticpod/controllers.go | 1 + .../operator/staticpod/installerpod/cmd.go | 2 +- .../pkg/operator/status/condition.go | 6 +- .../pkg/operator/status/status_controller.go | 15 +- .../operator/status/status_controller_test.go | 22 +- .../library-go/pkg/operator/status/version.go | 7 +- .../unsupportedconfigoverrides_controller.go | 6 +- .../pkg/operator/v1helpers/helpers.go | 43 +- .../pkg/operator/v1helpers/interfaces.go | 6 +- .../pkg/operator/v1helpers/test_helpers.go | 25 +- .../library-go/pkg/operator/watchdog/cmd.go | 345 +++++ .../pkg/operator/watchdog/cmd_test.go | 152 ++ .../library-go/pkg/operator/watchdog/proc.go | 76 + .../pkg/operator/watchdog/proc_test.go | 96 ++ .../clusterquotamapping.go | 409 ++++++ .../clusterquotamapping_test.go | 319 +++++ .../pkg/quota/clusterquotamapping/helpers.go | 139 ++ .../pkg/quota/clusterquotamapping/mapper.go | 289 ++++ .../library-go/pkg/quota/quotautil/error.go | 42 + .../library-go/pkg/quota/quotautil/helpers.go | 48 + .../pkg/security/ldapclient/client.go | 119 ++ .../pkg/security/ldapclient/interfaces.go | 10 + .../pkg/security/ldapquery/errors.go | 81 ++ .../pkg/security/ldapquery/query.go | 248 ++++ .../pkg/security/ldapquery/query_test.go | 320 +++++ .../pkg/security/ldapquery/types.go | 31 + .../pkg/security/ldaptestclient/testclient.go | 170 +++ .../ldaptestclient/testclientconfig.go | 30 + .../pkg/security/ldaputil/attribute.go | 47 + .../pkg/security/ldaputil/attribute_test.go | 70 + .../library-go/pkg/security/ldaputil/url.go | 247 ++++ .../pkg/security/ldaputil/url_test.go | 103 ++ .../library-go/pkg/security/uid/uid.go | 125 ++ .../library-go/pkg/security/uid/uid_test.go | 157 ++ .../library-go/pkg/serviceability/logrus.go | 14 + .../openshift/library-go/pkg/template/OWNERS | 11 + .../library-go/pkg/template/generator/doc.go | 3 + .../pkg/template/generator/examples/doc.go | 3 + .../generator/examples/remotevalue.go | 46 + .../generator/examples/remotevalue_test.go | 36 + .../pkg/template/generator/expressionvalue.go | 160 +++ .../generator/expressionvalue_test.go | 73 + .../pkg/template/generator/generator.go | 7 + .../pkg/template/templateprocessing/object.go | 120 ++ .../templateprocessing/object_test.go | 110 ++ .../template/templateprocessing/template.go | 295 ++++ .../templateprocessing/template_test.go | 589 ++++++++ .../testdata/guestbook.json | 305 ++++ .../testdata/guestbook_list.json | 312 ++++ .../dynamic_process.go | 59 + .../pkg/unidling/unidlingclient/scale.go | 193 +++ vendor/google.golang.org/appengine/README.md | 27 + .../datastore/internal/cloudkey/cloudkey.go | 120 ++ .../datastore/internal/cloudpb/entity.pb.go | 344 +++++ .../appengine/datastore/key.go | 4 + .../appengine/datastore/keycompat.go | 89 ++ .../appengine/datastore/keycompat_test.go | 89 ++ .../google.golang.org/appengine/file/file.go | 2 +- vendor/google.golang.org/appengine/go.mod | 9 +- vendor/google.golang.org/appengine/go.sum | 16 + 202 files changed, 21313 insertions(+), 547 deletions(-) create mode 100644 vendor/github.com/ghodss/yaml/go.mod create mode 100644 vendor/github.com/ghodss/yaml/go.sum create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/.gitignore create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/ocp.spec create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version/version.go create mode 100644 vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/rpm.mk create mode 100644 vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionregistrationtesting/admissiontesting.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig/intiializers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/decorator.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/cachecontrol.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/longrunning.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/personal_subjectaccessreview.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/project_request_info_resolver.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/requestinforesolver.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apps/OWNERS create mode 100644 vendor/github.com/openshift/library-go/pkg/apps/appsserialization/scheme.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apps/appsserialization/scheme_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apps/appsserialization/serialize.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apps/appsutil/const.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apps/appsutil/rc_scale_client.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apps/appsutil/util.go create mode 100644 vendor/github.com/openshift/library-go/pkg/apps/appsutil/util_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config-empty-status.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/authentication/bootstrapauthenticator/bootstrap.go create mode 100644 vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/subject.go create mode 100644 vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/util.go create mode 100644 vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/clusterrole_describers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/describers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/user_describers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation.go create mode 100644 vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/build/buildutil/buildutil.go create mode 100644 vendor/github.com/openshift/library-go/pkg/build/buildutil/buildutil_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/build/envresolve/env.go create mode 100644 vendor/github.com/openshift/library-go/pkg/build/envresolve/kube_copy.go create mode 100644 vendor/github.com/openshift/library-go/pkg/build/naming/namer.go create mode 100644 vendor/github.com/openshift/library-go/pkg/build/naming/namer_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/certs/pem.go create mode 100644 vendor/github.com/openshift/library-go/pkg/config/helpers/readresource.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/dockerv1client/client.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/dockerv1client/client_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/dockerv1client/conversion.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/dockerv1client/types.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/referencemutator/builds.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/referencemutator/builds_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/referencemutator/meta.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/referencemutator/pods.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/referencemutator/pods_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/registryclient/client_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/trigger/annotations.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/trigger/annotations_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/trigger/interfaces.go create mode 100644 vendor/github.com/openshift/library-go/pkg/image/trigger/types.go create mode 100644 vendor/github.com/openshift/library-go/pkg/legacyapi/legacygroupification/groupification.go create mode 100644 vendor/github.com/openshift/library-go/pkg/network/networkutils/networkutils.go create mode 100644 vendor/github.com/openshift/library-go/pkg/network/networkutils/networkutils_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/discovery.go create mode 100644 vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/urls.go create mode 100644 vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry.go create mode 100644 vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_staticpod_operator_client.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staleconditions/remove_stale_conditions.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/int32.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/watchdog/proc.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/watchdog/proc_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping.go create mode 100644 vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/helpers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/mapper.go create mode 100644 vendor/github.com/openshift/library-go/pkg/quota/quotautil/error.go create mode 100644 vendor/github.com/openshift/library-go/pkg/quota/quotautil/helpers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/security/ldapclient/client.go create mode 100644 vendor/github.com/openshift/library-go/pkg/security/ldapclient/interfaces.go create mode 100644 vendor/github.com/openshift/library-go/pkg/security/ldapquery/errors.go create mode 100644 vendor/github.com/openshift/library-go/pkg/security/ldapquery/query.go create mode 100644 vendor/github.com/openshift/library-go/pkg/security/ldapquery/query_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/security/ldapquery/types.go create mode 100644 vendor/github.com/openshift/library-go/pkg/security/ldaptestclient/testclient.go create mode 100644 vendor/github.com/openshift/library-go/pkg/security/ldaptestclient/testclientconfig.go create mode 100644 vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute.go create mode 100644 vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/security/ldaputil/url.go create mode 100644 vendor/github.com/openshift/library-go/pkg/security/ldaputil/url_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/security/uid/uid.go create mode 100644 vendor/github.com/openshift/library-go/pkg/security/uid/uid_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/template/OWNERS create mode 100644 vendor/github.com/openshift/library-go/pkg/template/generator/doc.go create mode 100644 vendor/github.com/openshift/library-go/pkg/template/generator/examples/doc.go create mode 100644 vendor/github.com/openshift/library-go/pkg/template/generator/examples/remotevalue.go create mode 100644 vendor/github.com/openshift/library-go/pkg/template/generator/examples/remotevalue_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue.go create mode 100644 vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/template/generator/generator.go create mode 100644 vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object.go create mode 100644 vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template.go create mode 100644 vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template_test.go create mode 100644 vendor/github.com/openshift/library-go/pkg/template/templateprocessing/testdata/guestbook.json create mode 100644 vendor/github.com/openshift/library-go/pkg/template/templateprocessing/testdata/guestbook_list.json create mode 100644 vendor/github.com/openshift/library-go/pkg/template/templateprocessingclient/dynamic_process.go create mode 100644 vendor/github.com/openshift/library-go/pkg/unidling/unidlingclient/scale.go create mode 100644 vendor/google.golang.org/appengine/datastore/internal/cloudkey/cloudkey.go create mode 100644 vendor/google.golang.org/appengine/datastore/internal/cloudpb/entity.pb.go create mode 100644 vendor/google.golang.org/appengine/datastore/keycompat.go create mode 100644 vendor/google.golang.org/appengine/datastore/keycompat_test.go diff --git a/glide.lock b/glide.lock index bbe807fc3..e9557a443 100644 --- a/glide.lock +++ b/glide.lock @@ -1,12 +1,12 @@ hash: 82980e0e06db282da4275faa0b1f1f3f4dfa1d07e511d6133fb2889d4123a0be -updated: 2019-04-22T10:22:42.49132063-04:00 +updated: 2019-07-31T13:09:32.062708984-07:00 imports: - name: github.com/davecgh/go-spew version: 782f4967f2dc4564575ca782fe2d04090b5faca8 subpackages: - spew - name: github.com/ghodss/yaml - version: c7ce16629ff4cd059ed96ed06419dd3856fd3577 + version: 25d852aebe32c875e9c044af3eef9c7dc6bc777f - name: github.com/gogo/protobuf version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 subpackages: @@ -49,7 +49,7 @@ imports: - name: github.com/modern-go/reflect2 version: 94122c33edd36123c84d5368cfb2b69df93a0ec8 - name: github.com/openshift/library-go - version: d7f4ab093b6f65ba057442c520950f44bacd8015 + version: 950af653b51af28697df79f1406fc9d21f722db8 subpackages: - pkg/assets - pkg/assets/create @@ -98,7 +98,7 @@ imports: subpackages: - rate - name: google.golang.org/appengine - version: 54a98f90d1c46b7731eb8fb305d2a321c30ef610 + version: b2f4a3cf3c67576a2ee09e1fe62656a5086ce880 subpackages: - internal - internal/base diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml index 930860e0a..98ad417e2 100644 --- a/vendor/github.com/ghodss/yaml/.travis.yml +++ b/vendor/github.com/ghodss/yaml/.travis.yml @@ -1,8 +1,8 @@ language: go go: - - "1.3" - - "1.4" + - "1.9" - "1.10" + - "1.11" script: - go test - go build diff --git a/vendor/github.com/ghodss/yaml/go.mod b/vendor/github.com/ghodss/yaml/go.mod new file mode 100644 index 000000000..8d9ad7b64 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/go.mod @@ -0,0 +1,3 @@ +module github.com/ghodss/yaml + +require gopkg.in/yaml.v2 v2.2.2 diff --git a/vendor/github.com/ghodss/yaml/go.sum b/vendor/github.com/ghodss/yaml/go.sum new file mode 100644 index 000000000..bd555a333 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/go.sum @@ -0,0 +1,3 @@ +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go index 6e7f14fc7..dfd264d6c 100644 --- a/vendor/github.com/ghodss/yaml/yaml.go +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -1,4 +1,14 @@ -package yaml +// Package yaml provides a wrapper around go-yaml designed to enable a better +// way of handling YAML when marshaling to and from structs. +// +// In short, this package first converts YAML to JSON using go-yaml and then +// uses json.Marshal and json.Unmarshal to convert to or from the struct. This +// means that it effectively reuses the JSON struct tags as well as the custom +// JSON methods MarshalJSON and UnmarshalJSON unlike go-yaml. +// +// See also http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang +// +package yaml // import "github.com/ghodss/yaml" import ( "bytes" @@ -33,8 +43,19 @@ type JSONOpt func(*json.Decoder) *json.Decoder // Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, // optionally configuring the behavior of the JSON unmarshal. func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { + return unmarshal(yaml.Unmarshal, y, o, opts) +} + +// UnmarshalStrict is like Unmarshal except that any mapping keys that are +// duplicates will result in an error. +// To also be strict about unknown fields, add the DisallowUnknownFields option. +func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { + return unmarshal(yaml.UnmarshalStrict, y, o, opts) +} + +func unmarshal(f func(in []byte, out interface{}) (err error), y []byte, o interface{}, opts []JSONOpt) error { vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo, yaml.Unmarshal) + j, err := yamlToJSON(y, &vo, f) if err != nil { return fmt.Errorf("error converting YAML to JSON: %v", err) } @@ -113,7 +134,7 @@ func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, // YAML objects are not completely compatible with JSON objects (e.g. you // can have non-string keys in YAML). So, convert the YAML-compatible object // to a JSON-compatible object, failing with an error if irrecoverable - // incompatibilties happen along the way. + // incompatibilities happen along the way. jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) if err != nil { return nil, err diff --git a/vendor/github.com/ghodss/yaml/yaml_go110_test.go b/vendor/github.com/ghodss/yaml/yaml_go110_test.go index b7767b7c4..753ee3f6f 100644 --- a/vendor/github.com/ghodss/yaml/yaml_go110_test.go +++ b/vendor/github.com/ghodss/yaml/yaml_go110_test.go @@ -4,6 +4,8 @@ package yaml import ( "fmt" + "reflect" + "strings" "testing" ) @@ -34,6 +36,58 @@ func TestUnmarshalWithTags(t *testing.T) { } +// TestUnmarshalStrictWithJSONOpts tests that we return an error if there are +// duplicate fields in the YAML input. +func TestUnmarshalStrictWithJSONOpts(t *testing.T) { + for _, tc := range []struct { + yaml []byte + opts []JSONOpt + want UnmarshalString + wantErr string + }{ + { + // By default, unknown field is ignored. + yaml: []byte("a: 1\nunknownField: 2"), + want: UnmarshalString{A: "1"}, + }, + { + // Unknown field produces an error with `DisallowUnknownFields` option. + yaml: []byte("a: 1\nunknownField: 2"), + opts: []JSONOpt{DisallowUnknownFields}, + wantErr: `unknown field "unknownField"`, + }, + } { + po := prettyFunctionName(tc.opts) + s := UnmarshalString{} + err := UnmarshalStrict(tc.yaml, &s, tc.opts...) + if tc.wantErr != "" && err == nil { + t.Errorf("UnmarshalStrict(%#q, &s, %v) = nil; want error", string(tc.yaml), po) + continue + } + if tc.wantErr == "" && err != nil { + t.Errorf("UnmarshalStrict(%#q, &s, %#v) = %v; want no error", string(tc.yaml), po, err) + continue + } + // We expect that duplicate fields are discovered during JSON unmarshalling. + if want := "error unmarshaling JSON"; tc.wantErr != "" && !strings.Contains(err.Error(), want) { + t.Errorf("UnmarshalStrict(%#q, &s, %#v) = %v; want err contains %#q", string(tc.yaml), po, err, want) + } + if tc.wantErr != "" && !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("UnmarshalStrict(%#q, &s, %#v) = %v; want err contains %#q", string(tc.yaml), po, err, tc.wantErr) + } + + // Only test content of `s` if parsing indicated no error. + // If we got an error, `s` may be partially parsed and contain some data. + if err != nil { + continue + } + + if !reflect.DeepEqual(s, tc.want) { + t.Errorf("UnmarshalStrict(%#q, &s, %#v) = %+#v; want %+#v", string(tc.yaml), po, s, tc.want) + } + } +} + func ExampleUnknown() { type WithTaggedField struct { Field string `json:"field"` diff --git a/vendor/github.com/ghodss/yaml/yaml_test.go b/vendor/github.com/ghodss/yaml/yaml_test.go index 9250cf242..e31b402c5 100644 --- a/vendor/github.com/ghodss/yaml/yaml_test.go +++ b/vendor/github.com/ghodss/yaml/yaml_test.go @@ -4,7 +4,9 @@ import ( "fmt" "math" "reflect" + "runtime" "strconv" + "strings" "testing" ) @@ -19,7 +21,7 @@ type MarshalTest struct { func TestMarshal(t *testing.T) { f32String := strconv.FormatFloat(math.MaxFloat32, 'g', -1, 32) s := MarshalTest{"a", math.MaxInt64, math.MaxFloat32} - e := []byte(fmt.Sprintf("A: a\nB: %d\nC: %s\n", math.MaxInt64, f32String)) + e := []byte(fmt.Sprintf("A: a\nB: %d\nC: %s\n", int64(math.MaxInt64), f32String)) y, err := Marshal(s) if err != nil { @@ -62,32 +64,32 @@ func TestUnmarshal(t *testing.T) { y := []byte("a: 1") s1 := UnmarshalString{} e1 := UnmarshalString{A: "1"} - unmarshal(t, y, &s1, &e1) + unmarshalEqual(t, y, &s1, &e1) y = []byte("a: true") s1 = UnmarshalString{} e1 = UnmarshalString{A: "true"} - unmarshal(t, y, &s1, &e1) + unmarshalEqual(t, y, &s1, &e1) y = []byte("true: 1") s1 = UnmarshalString{} e1 = UnmarshalString{True: "1"} - unmarshal(t, y, &s1, &e1) + unmarshalEqual(t, y, &s1, &e1) y = []byte("a:\n a: 1") s2 := UnmarshalNestedString{} e2 := UnmarshalNestedString{NestedString{"1"}} - unmarshal(t, y, &s2, &e2) + unmarshalEqual(t, y, &s2, &e2) y = []byte("a:\n - b: abc\n c: def\n - b: 123\n c: 456\n") s3 := UnmarshalSlice{} e3 := UnmarshalSlice{[]NestedSlice{NestedSlice{"abc", strPtr("def")}, NestedSlice{"123", strPtr("456")}}} - unmarshal(t, y, &s3, &e3) + unmarshalEqual(t, y, &s3, &e3) y = []byte("a:\n b: 1") s4 := UnmarshalStringMap{} e4 := UnmarshalStringMap{map[string]string{"b": "1"}} - unmarshal(t, y, &s4, &e4) + unmarshalEqual(t, y, &s4, &e4) y = []byte(` a: @@ -103,18 +105,145 @@ b: "a": &NamedThing{Name: "TestA"}, "b": &NamedThing{Name: "TestB"}, } - unmarshal(t, y, &s5, &e5) + unmarshalEqual(t, y, &s5, &e5) } -func unmarshal(t *testing.T, y []byte, s, e interface{}, opts ...JSONOpt) { +// TestUnmarshalNonStrict tests that we parse ambiguous YAML without error. +func TestUnmarshalNonStrict(t *testing.T) { + for _, tc := range []struct { + yaml []byte + want UnmarshalString + }{ + { + yaml: []byte("a: 1"), + want: UnmarshalString{A: "1"}, + }, + { + // Unknown field get ignored. + yaml: []byte("a: 1\nunknownField: 2"), + want: UnmarshalString{A: "1"}, + }, + { + // Unknown fields get ignored. + yaml: []byte("unknownOne: 2\na: 1\nunknownTwo: 2"), + want: UnmarshalString{A: "1"}, + }, + { + // Last declaration of `a` wins. + yaml: []byte("a: 1\na: 2"), + want: UnmarshalString{A: "2"}, + }, + { + // Even ignore first declaration of `a` with wrong type. + yaml: []byte("a: [1,2,3]\na: value-of-a"), + want: UnmarshalString{A: "value-of-a"}, + }, + { + // Last value of `a` and first and only mention of `true` are parsed. + yaml: []byte("true: string-value-of-yes\na: 1\na: [1,2,3]\na: value-of-a"), + want: UnmarshalString{A: "value-of-a", True: "string-value-of-yes"}, + }, + { + // In YAML, `YES` is a Boolean true. + yaml: []byte("true: YES"), + want: UnmarshalString{True: "true"}, + }, + } { + s := UnmarshalString{} + unmarshalEqual(t, tc.yaml, &s, &tc.want) + } +} + +// prettyFunctionName converts a slice of JSONOpt function pointers to a human +// readable string representation. +func prettyFunctionName(opts []JSONOpt) []string { + var r []string + for _, o := range opts { + r = append(r, runtime.FuncForPC(reflect.ValueOf(o).Pointer()).Name()) + } + return r +} + +func unmarshalEqual(t *testing.T, y []byte, s, e interface{}, opts ...JSONOpt) { + t.Helper() err := Unmarshal(y, s, opts...) if err != nil { - t.Errorf("error unmarshaling YAML: %v", err) + t.Errorf("Unmarshal(%#q, s, %v) = %v", string(y), prettyFunctionName(opts), err) + return } if !reflect.DeepEqual(s, e) { - t.Errorf("unmarshal YAML was unsuccessful, expected: %+#v, got: %+#v", - e, s) + t.Errorf("Unmarshal(%#q, s, %v) = %+#v; want %+#v", string(y), prettyFunctionName(opts), s, e) + } +} + +// TestUnmarshalStrict tests that we return an error on ambiguous YAML. +func TestUnmarshalStrict(t *testing.T) { + for _, tc := range []struct { + yaml []byte + want UnmarshalString + wantErr string + }{ + { + yaml: []byte("a: 1"), + want: UnmarshalString{A: "1"}, + }, + { + // Order does not matter. + yaml: []byte("true: 1\na: 2"), + want: UnmarshalString{A: "2", True: "1"}, + }, + { + // By default, unknown field is ignored. + yaml: []byte("a: 1\nunknownField: 2"), + want: UnmarshalString{A: "1"}, + }, + { + // Declaring `a` twice produces an error. + yaml: []byte("a: 1\na: 2"), + wantErr: `key "a" already set in map`, + }, + { + // Not ignoring first declaration of A with wrong type. + yaml: []byte("a: [1,2,3]\na: value-of-a"), + wantErr: `key "a" already set in map`, + }, + { + // Declaring field `true` twice. + yaml: []byte("true: string-value-of-yes\ntrue: 1"), + wantErr: `key true already set in map`, + }, + { + // In YAML, `YES` is a Boolean true. + yaml: []byte("true: YES"), + want: UnmarshalString{True: "true"}, + }, + } { + s := UnmarshalString{} + err := UnmarshalStrict(tc.yaml, &s) + if tc.wantErr != "" && err == nil { + t.Errorf("UnmarshalStrict(%#q, &s) = nil; want error", string(tc.yaml)) + continue + } + if tc.wantErr == "" && err != nil { + t.Errorf("UnmarshalStrict(%#q, &s) = %v; want no error", string(tc.yaml), err) + continue + } + // We only expect errors during unmarshalling YAML. + if want := "yaml: unmarshal errors"; tc.wantErr != "" && !strings.Contains(err.Error(), want) { + t.Errorf("UnmarshalStrict(%#q, &s) = %v; want err contains %#q", string(tc.yaml), err, want) + } + if tc.wantErr != "" && !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("UnmarshalStrict(%#q, &s) = %v; want err contains %#q", string(tc.yaml), err, tc.wantErr) + } + + // Even if there was an error, we continue the test: We expect that all + // errors occur during YAML unmarshalling. Such errors leaves `s` unmodified + // and the following check will compare default values of `UnmarshalString`. + + if !reflect.DeepEqual(s, tc.want) { + t.Errorf("UnmarshalStrict(%#q, &s) = %+#v; want %+#v", string(tc.yaml), s, tc.want) + } } } diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile b/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile index a758c97a7..c9456fe64 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile @@ -8,10 +8,15 @@ examples :=$(wildcard ./make/examples/*/Makefile.test) # $1 - makefile name relative to ./make/ folder # $2 - target # $3 - output folder -# We need to change dir to the final makefile directory or relative paths won't match +# We need to change dir to the final makefile directory or relative paths won't match. +# Dynamic values are replaced with "" so we can do diff against checkout versions. +# Avoid comparing local paths by stripping the prefix. define update-makefile-log mkdir -p "$(3)" -$(MAKE) -C "$(dir $(1))" -f "$(notdir $(1))" --no-print-directory --warn-undefined-variables $(2) 2>&1 | tee "$(3)"/"$(notdir $(1))"$(subst ..,.,.$(2).log) +set -o pipefail; $(MAKE) -j 1 -C "$(dir $(1))" -f "$(notdir $(1))" --no-print-directory --warn-undefined-variables $(2) 2>&1 | \ + sed 's/\.\(buildDate\|versionFromGit\|commitFromGit\|gitTreeState\)="[^"]*" /.\1="" /g' | \ + sed -E 's~/.*/(github.com/openshift/library-go/alpha-build-machinery/.*)~/\1~g' | \ + tee "$(3)"/"$(notdir $(1))"$(subst ..,.,.$(2).log) endef diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk index 6e6c03437..564fc1229 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk @@ -15,6 +15,9 @@ update: update-bindata # or self_dir could be modified for the next include by the included file. # Also doing this at the end of the file allows us to user self_dir before it could be modified. include $(addprefix $(self_dir), \ - targets/openshift/*.mk \ + targets/openshift/deps.mk \ + targets/openshift/images.mk \ + targets/openshift/bindata.mk \ + targets/openshift/codegen.mk \ golang.mk \ ) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/.gitignore b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/.gitignore new file mode 100644 index 000000000..d06fd1372 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/.gitignore @@ -0,0 +1,3 @@ +/oc +/openshift +/_output/ diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile index 17350782a..cf44849ee 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile @@ -1,3 +1,29 @@ include $(addprefix ../../, \ golang.mk \ + targets/openshift/rpm.mk \ ) + +# rpm wants build-id set +GO_LD_EXTRAFLAGS +=-B 0x$$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n') + +OUTPUT_DIR :=_output +CROSS_BUILD_BINDIR :=$(OUTPUT_DIR)/bin +RPM_EXTRAFLAGS :=--quiet --define 'version 2.42.0' --define 'dist .el7' --define 'release 6' + +cross-build-darwin-amd64: + +@GOOS=darwin GOARCH=amd64 $(MAKE) --no-print-directory build GO_BUILD_BINDIR:=$(CROSS_BUILD_BINDIR)/darwin_amd64 +.PHONY: cross-build-darwin-amd64 + +cross-build-windows-amd64: + +@GOOS=windows GOARCH=amd64 $(MAKE) --no-print-directory build GO_BUILD_BINDIR:=$(CROSS_BUILD_BINDIR)/windows_amd64 +.PHONY: cross-build-windows-amd64 + +cross-build: cross-build-darwin-amd64 cross-build-windows-amd64 +.PHONY: cross-build + +clean-cross-build: + $(RM) -r '$(CROSS_BUILD_BINDIR)' + if [ -d '$(OUTPUT_DIR)' ]; then rmdir --ignore-fail-on-non-empty '$(OUTPUT_DIR)'; fi +.PHONY: clean-cross-build + +clean: clean-cross-build diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test index 1922d246a..f933ce024 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test @@ -1,9 +1,53 @@ -all: - $(MAKE) -C . build +SHELL :=/bin/bash -euo pipefail + +test: | test-build test-cross-build test-rpm +.PHONY: test + +test-build: + $(MAKE) build [[ -f ./openshift ]] [[ -f ./oc ]] - $(MAKE) -C . clean + # test version is set correctly when linking + # majorFromGit, minorFromGit are deprecated upstream and set to empty value + # we avoid comparing time to avoid flakes + # we avoid comparing git tree state + diff <( ./oc | grep -v -e 'clean' -e 'dirty' | sed '$$d' ) <( \ + echo ""; \ + echo ""; \ + git rev-parse --short "HEAD^{commit}" 2>/dev/null; \ + git describe --long --tags --abbrev=7 --match 'v[0-9]*' || echo 'v0.0.0-unknown'; \ + ) + + $(MAKE) clean + [[ ! -f ./openshift ]] + [[ ! -f ./oc ]] + $(MAKE) clean +.PHONY: test-build + +test-cross-build: + [[ ! -d ./_output/ ]] + $(MAKE) cross-build [[ ! -f ./openshift ]] [[ ! -f ./oc ]] -.PHONY: all + [[ -f ./_output/bin/darwin_amd64/openshift ]] + [[ -f ./_output/bin/darwin_amd64/oc ]] + [[ -f ./_output/bin/windows_amd64/openshift.exe ]] + [[ -f ./_output/bin/windows_amd64/oc.exe ]] + + $(MAKE) clean + [[ ! -d ./_output/ ]] + $(MAKE) clean +.PHONY: test-cross-build + +test-rpm: + [[ ! -d ./_output/ ]] + + $(MAKE) rpm-build + [[ -f ./_output/rpms/x86_64/openshift-2.42.0-6.el7.x86_64.rpm ]] + [[ -f ./_output/srpms/openshift-2.42.0-6.el7.src.rpm ]] + + $(MAKE) clean + [[ ! -d ./_output/ ]] + $(MAKE) clean +.PHONY: test-rpm diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log index 5b7d7f2e8..e5acf191f 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log @@ -1,9 +1,85 @@ -make -C . build -go build github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc -go build github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift +make build +fatal: No names found, cannot describe anything. +fatal: No names found, cannot describe anything. +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift [[ -f ./openshift ]] [[ -f ./oc ]] -make -C . clean +# test version is set correctly when linking +# majorFromGit, minorFromGit are deprecated upstream and set to empty value +# we avoid comparing time to avoid flakes +# we avoid comparing git tree state +diff <( ./oc | grep -v -e 'clean' -e 'dirty' | sed '$d' ) <( \ + echo ""; \ + echo ""; \ + git rev-parse --short "HEAD^{commit}" 2>/dev/null; \ + git describe --long --tags --abbrev=7 --match 'v[0-9]*' || echo 'v0.0.0-unknown'; \ +) +fatal: No names found, cannot describe anything. +make clean rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi [[ ! -f ./openshift ]] [[ ! -f ./oc ]] +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -d ./_output/ ]] +make cross-build +fatal: No names found, cannot describe anything. +fatal: No names found, cannot describe anything. +mkdir -p '_output/bin/darwin_amd64' +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o '_output/bin/darwin_amd64/oc' github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc +mkdir -p '_output/bin/darwin_amd64' +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o '_output/bin/darwin_amd64/openshift' github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift +fatal: No names found, cannot describe anything. +fatal: No names found, cannot describe anything. +mkdir -p '_output/bin/windows_amd64' +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o '_output/bin/windows_amd64/oc.exe' github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc +mkdir -p '_output/bin/windows_amd64' +go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o '_output/bin/windows_amd64/openshift.exe' github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift +[[ ! -f ./openshift ]] +[[ ! -f ./oc ]] +[[ -f ./_output/bin/darwin_amd64/openshift ]] +[[ -f ./_output/bin/darwin_amd64/oc ]] +[[ -f ./_output/bin/windows_amd64/openshift.exe ]] +[[ -f ./_output/bin/windows_amd64/oc.exe ]] +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -d ./_output/ ]] +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -d ./_output/ ]] +make rpm-build +rpmbuild -ba --define "_topdir /github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries" --define "go_package github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries" --quiet --define 'version 2.42.0' --define 'dist .el7' --define 'release 6' ocp.spec +fatal: No names found, cannot describe anything. +fatal: No names found, cannot describe anything. +[[ -f ./_output/rpms/x86_64/openshift-2.42.0-6.el7.x86_64.rpm ]] +[[ -f ./_output/srpms/openshift-2.42.0-6.el7.src.rpm ]] +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +[[ ! -d ./_output/ ]] +make clean +rm -f oc openshift +rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi +rm -f -r '_output/bin' +if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc/main.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc/main.go index 790580777..cf699883e 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc/main.go +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc/main.go @@ -1,5 +1,11 @@ package main -func main() { +import ( + "fmt" + + "github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version" +) +func main() { + fmt.Print(version.String()) } diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/ocp.spec b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/ocp.spec new file mode 100644 index 000000000..fc4117e0b --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/ocp.spec @@ -0,0 +1,47 @@ +#debuginfo not supported with Go +%global debug_package %{nil} +# modifying the Go binaries breaks the DWARF debugging +%global __os_install_post %{_rpmconfigdir}/brp-compress + +%global golang_version 1.12 +%global product_name OpenShift + +%{!?version: %global version 0.0.1} +%{!?release: %global release 1} + +Name: openshift +Version: %{version} +Release: %{release}%{dist} +Summary: OpenShift client binaries +License: ASL 2.0 +URL: https://%{go_package} + +# If go_arches not defined fall through to implicit golang archs +%if 0%{?go_arches:1} +ExclusiveArch: %{go_arches} +%else +ExclusiveArch: x86_64 aarch64 ppc64le s390x +%endif + +#BuildRequires: bsdtar +BuildRequires: golang >= %{golang_version} + +%description +%{summary} + +%prep + +%build +make build + +%install +install -d %{buildroot}%{_bindir} + +install -p -m 755 oc %{buildroot}%{_bindir}/oc +install -p -m 755 openshift %{buildroot}%{_bindir}/openshift + +%files +%{_bindir}/oc +%{_bindir}/openshift + +%changelog diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version/version.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version/version.go new file mode 100644 index 000000000..4d118d321 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version/version.go @@ -0,0 +1,27 @@ +package version + +var ( + // commitFromGit is a constant representing the source version that + // generated this build. It should be set during build via -ldflags. + commitFromGit string + // versionFromGit is a constant representing the version tag that + // generated this build. It should be set during build via -ldflags. + versionFromGit = "unknown" + // major version + majorFromGit string + // minor version + minorFromGit string + // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') + buildDate string + // state of git tree, either "clean" or "dirty" + gitTreeState string +) + +func String() string { + return majorFromGit + "\n" + + minorFromGit + "\n" + + commitFromGit + "\n" + + versionFromGit + "\n" + + gitTreeState + "\n" + + buildDate + "\n" +} diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk index 8a904d5f9..5d59e70f9 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk @@ -1,4 +1,16 @@ GO ?=go +GOPATH ?=$(shell $(GO) env GOPATH) +gopath_list :=$(subst :, ,$(strip $(GOPATH))) +# Use every path in GOPATH to try to remove it as a prefix of current dir to determine the package name. +# If the prefix is not removed on subtitution, filter-out unchanged paths. +GO_PACKAGE ?=$(strip $(filter-out $(abspath .),$(foreach p,$(gopath_list),$(patsubst $(p)/src/%,%,$(abspath .))))) + +GOOS ?=$(shell $(GO) env GOOS) +GOHOSTOS ?=$(shell $(GO) env GOHOSTOS) +GOARCH ?=$(shell $(GO) env GOARCH) +GOHOSTARCH ?=$(shell $(GO) env GOHOSTARCH) +GOEXE ?=$(shell $(GO) env GOEXE) + GOFMT ?=gofmt GOFMT_FLAGS ?=-s -l GOLINT ?=golint @@ -11,6 +23,20 @@ GO_BUILD_PACKAGES ?=./cmd/... GO_BUILD_PACKAGES_EXPANDED ?=$(shell $(GO) list $(GO_BUILD_PACKAGES)) go_build_binaries =$(notdir $(GO_BUILD_PACKAGES_EXPANDED)) GO_BUILD_FLAGS ?= +GO_BUILD_BINDIR ?= + GO_TEST_FLAGS ?=-race -GO_PACKAGE :=$(notdir $(abspath . )) +GO_LD_EXTRAFLAGS ?= + +SOURCE_GIT_TAG ?=$(shell git describe --long --tags --abbrev=7 --match 'v[0-9]*' || echo 'v0.0.0-unknown') +SOURCE_GIT_COMMIT ?=$(shell git rev-parse --short "HEAD^{commit}" 2>/dev/null) +SOURCE_GIT_TREE_STATE ?=$(shell ( ( [ ! -d ".git/" ] || git diff --quiet ) && echo 'clean' ) || echo 'dirty') + +define version-ldflags +-X $(1).versionFromGit="$(SOURCE_GIT_TAG)" \ +-X $(1).commitFromGit="$(SOURCE_GIT_COMMIT)" \ +-X $(1).gitTreeState="$(SOURCE_GIT_TREE_STATE)" \ +-X $(1).buildDate="$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')" +endef +GO_LD_FLAGS ?=-ldflags "-s -w $(call version-ldflags,$(GO_PACKAGE)/pkg/version) $(GO_LD_EXTRAFLAGS)" diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk index 49d484093..1fe87b091 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk @@ -1,7 +1,10 @@ self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) define build-package - $(GO) build $(GO_BUILD_FLAGS) $(1) + $(if $(GO_BUILD_BINDIR),mkdir -p '$(GO_BUILD_BINDIR)',) + $(strip $(GO) build $(GO_BUILD_FLAGS) $(GO_LD_FLAGS) \ + $(if $(GO_BUILD_BINDIR),-o '$(GO_BUILD_BINDIR)/$(notdir $(1))$(GOEXE)',) \ + $(1)) endef @@ -12,6 +15,7 @@ build: clean-binaries: $(RM) $(go_build_binaries) +.PHONY: clean-binaries # We need to be careful to expand all the paths before any include is done # or self_dir could be modified for the next include by the included file. diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/bindata.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/bindata.mk index 07b9a3228..0e78cb927 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/bindata.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/bindata.mk @@ -3,7 +3,7 @@ TMP_GOPATH :=$(shell mktemp -d) .ensure-go-bindata: ln -s $(abspath ./vendor) "$(TMP_GOPATH)/src" - export GOPATH=$(TMP_GOPATH) && go install "./vendor/github.com/jteeuwen/go-bindata/..." + export GOPATH=$(TMP_GOPATH) && export GOBIN=$(TMP_GOPATH)/bin && go install "./vendor/github.com/jteeuwen/go-bindata/..." # $1 - input dirs # $2 - prefix diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/images.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/images.mk index cbd2d046c..b48741a73 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/images.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/images.mk @@ -2,12 +2,18 @@ IMAGE_REGISTRY ?= IMAGE_ORG ?=openshift IMAGE_TAG ?=latest + +# IMAGE_BUILD_EXTRA_FLAGS lets you add extra flags for imagebuilder +# e.g. to mount secrets and repo information into base image like: +# make images IMAGE_BUILD_EXTRA_FLAGS='-mount ~/projects/origin-repos/4.2/:/etc/yum.repos.d/' +IMAGE_BUILD_EXTRA_FLAGS ?= + # $1 - image name # $2 - Dockerfile path # $3 - context define build-image-internal image-$(1): - imagebuilder -f $(2) -t $(addsuffix /,$(IMAGE_REGISTRY))$(addsuffix /,$(IMAGE_ORG))$(1)$(addprefix :,$(IMAGE_TAG)) $(3) + $(strip imagebuilder --allow-pull $(IMAGE_BUILD_EXTRA_FLAGS) -f $(2) -t $(addsuffix /,$(IMAGE_REGISTRY))$(addsuffix /,$(IMAGE_ORG))$(1)$(addprefix :,$(IMAGE_TAG)) $(3)) .PHONY: image-$(1) images: image-$(1) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/rpm.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/rpm.mk new file mode 100644 index 000000000..b235197c7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/rpm.mk @@ -0,0 +1,41 @@ +RPM_OUTPUT_DIR ?=_output +RPM_TOPDIR ?=$(abspath ./) +RPM_BUILDDIR ?=$(RPM_TOPDIR) +RPM_BUILDROOT ?=$(RPM_TOPDIR) +RPM_SOURCEDIR ?=$(RPM_TOPDIR) +RPM_SPECDIR ?=$(RPM_TOPDIR) +RPM_RPMDIR ?=$(RPM_TOPDIR)/$(RPM_OUTPUT_DIR)/rpms +RPM_SRCRPMDIR ?=$(RPM_TOPDIR)/$(RPM_OUTPUT_DIR)/srpms + +RPM_SPECFILES ?=$(wildcard *.spec) +RPM_BUILDFLAGS ?=-ba +RPM_EXTRAFLAGS ?= + +rpm-build: + $(strip \ + rpmbuild $(RPM_BUILDFLAGS) \ + --define "_topdir $(RPM_TOPDIR)" \ + --define "_builddir $(RPM_BUILDDIR)" \ + --define "_buildrootdir $(RPM_BUILDROOT)" \ + --define "_rpmdir $(RPM_RPMDIR)" \ + --define "_srcrpmdir $(RPM_SRCRPMDIR)" \ + --define "_specdir $(RPM_SPECDIR)" \ + --define "_sourcedir $(RPM_SOURCEDIR)" \ + --define "go_package $(GO_PACKAGE)" \ + $(RPM_EXTRAFLAGS) \ + $(RPM_SPECFILES) \ + ) + +clean-rpms: + $(RM) -r '$(RPM_RPMDIR)' '$(RPM_SRCRPMDIR)' + if [ -d '$(RPM_OUTPUT_DIR)' ]; then rmdir --ignore-fail-on-non-empty '$(RPM_OUTPUT_DIR)'; fi +.PHONY: clean-rpms + +clean: clean-rpms + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ +) diff --git a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go b/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go index a56f07007..cb37958a2 100755 --- a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go +++ b/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go @@ -35,8 +35,8 @@ func Run() error { manifestDir := flag.String("manifests-dir", "manifests", "the directory with existing CRD manifests") outputDir := flag.String("output-dir", "", "optional directory to output the kubebuilder CRDs. By default a temporary directory is used.") verifyOnly := flag.Bool("verify-only", false, "do not write files, only compare and return with return code 1 if dirty") - domain := flag.String("domain", "", "the domain appended to group names (optional).") - repo := flag.String("repo", "", "the repository package name.") + domain := flag.String("domain", "", "the domain appended to group names.") + repo := flag.String("repo", "", "the repository package name (optional).") flag.Parse() diff --git a/vendor/github.com/openshift/library-go/glide.lock b/vendor/github.com/openshift/library-go/glide.lock index 483a42a5e..2c67dcfaf 100644 --- a/vendor/github.com/openshift/library-go/glide.lock +++ b/vendor/github.com/openshift/library-go/glide.lock @@ -1,8 +1,9 @@ -hash: 834b42ac04c13e26423b9cddffbd75a093f4d889cffb059911fae65aac364c7b -updated: 2019-04-17T10:52:09.03874547-04:00 +hash: 14182a87b2489ea8cd2db705bf09aad592752d9c9f7cc6cc840a76bcb179a2e8 +updated: 2019-07-14T22:28:29.452706+02:00 imports: - name: bitbucket.org/ww/goautoneg version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 + vcs: hg - name: github.com/Azure/go-ansiterm version: d6e3b3328b783f23731bc4d058875b0371ff8109 subpackages: @@ -15,6 +16,10 @@ imports: version: b38d23b8782a487059e8fc8773e9a5b228a77cb6 - name: github.com/certifi/gocertifi version: ee1a9a0726d2ae45f54118cac878c990d4016ded +- name: github.com/containerd/continuity + version: aaeac12a7ffcd198ae25440a9dff125c2e2703a7 + subpackages: + - pathdriver - name: github.com/coreos/etcd version: 27fc7e2296f506182f58ce846e48f36b34fe6842 subpackages: @@ -100,23 +105,75 @@ imports: version: 782f4967f2dc4564575ca782fe2d04090b5faca8 subpackages: - spew +- name: github.com/docker/distribution + version: 16128bbac47f75050e82f7e91b04df33775e0c23 + subpackages: + - digestset + - manifest + - manifest/schema1 + - manifest/schema2 + - metrics + - reference + - registry/api/errcode + - registry/api/v2 + - registry/client + - registry/client/auth + - registry/client/auth/challenge + - registry/client/transport + - registry/storage/cache + - registry/storage/cache/memory - name: github.com/docker/docker version: a9fbbdc8dd8794b20af358382ab780559bca589d subpackages: + - api/types + - api/types/blkiodev + - api/types/container + - api/types/filters + - api/types/mount + - api/types/network + - api/types/registry + - api/types/strslice + - api/types/swarm + - api/types/swarm/runtime + - api/types/versions + - opts + - pkg/archive + - pkg/fileutils + - pkg/homedir + - pkg/idtools + - pkg/ioutils + - pkg/jsonmessage + - pkg/longpath + - pkg/mount + - pkg/pools + - pkg/stdcopy + - pkg/system - pkg/term - pkg/term/windows -- name: github.com/elazarl/go-bindata-assetfs - version: 3dcc96556217539f50599357fb481ac0dc7439b9 +- name: github.com/docker/go-connections + version: fd1b1942c4d55f7f210a8387e612dc6ffee78ff6 + subpackages: + - nat +- name: github.com/docker/go-metrics + version: b84716841b82eab644a0c64fc8b42d480e49add5 +- name: github.com/docker/go-units + version: 519db1ee28dcc9fd2474ae59fca29a810482bfb1 +- name: github.com/docker/libnetwork + version: 14f9d751adc2d51b38d14b4e14419b76466d3b94 + subpackages: + - ipamutils +- name: github.com/docker/libtrust + version: aabc10ec26b754e797f9028f4589c5b7bd90dc20 - name: github.com/emicklei/go-restful version: ff4f55a206334ef123e4f79bbf348980da81ca46 subpackages: - log -- name: github.com/emicklei/go-restful-swagger12 - version: dcef7f55730566d41eae5db10e7d6981829720f6 - name: github.com/evanphx/json-patch version: 5858425f75500d40c52783dce87d085a483ce135 +- name: github.com/fsouza/go-dockerclient + version: da3951ba2e9e02bc0e7642150b3e265aed7e1df3 - name: github.com/getsentry/raven-go - version: 32a13797442ccb601b11761d74232773c1402d14 + version: c977f96e109525a5d8fa10a19165341f601f38b0 - name: github.com/ghodss/yaml version: c7ce16629ff4cd059ed96ed06419dd3856fd3577 - name: github.com/go-openapi/jsonpointer @@ -185,24 +242,20 @@ imports: version: c518dec07be9a636c38a4650e217be059b5952ec subpackages: - mat64 -- name: github.com/google/btree - version: 7d79101e329e5a3adf994758c578dab82b90c017 - name: github.com/google/gofuzz - version: 44d81051d367757e1c7c6a5a86423ece9afcf63c + version: 24818f796faf91cd76ec7bddd72458fbced7a6c1 - name: github.com/googleapis/gnostic version: 0c5108395e2debce0d731cf0287ddf7242066aba subpackages: - OpenAPIv2 - compiler - extensions -- name: github.com/gregjones/httpcache - version: 787624de3eb7bd915c329cba748687a3b22666a6 - subpackages: - - diskcache +- name: github.com/gorilla/mux + version: d83b6ffe499a29cc05fc977988d0392851779620 - name: github.com/grpc-ecosystem/go-grpc-prometheus version: 2500245aa6110c562d17020fb31a2c133d737799 - name: github.com/hashicorp/golang-lru - version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 + version: 20f1fb78b0740ba8c3cb143a61e86ba5c8669768 subpackages: - simplelru - name: github.com/imdario/mergo @@ -227,14 +280,34 @@ imports: version: c12348ce28de40eed0136aa2b644d0ee0650e56c subpackages: - pbutil +- name: github.com/Microsoft/go-winio + version: 881e3d46423d592d11da9873ff6581dc577a1d0f + subpackages: + - pkg/guid - name: github.com/modern-go/concurrent version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 - name: github.com/modern-go/reflect2 version: 94122c33edd36123c84d5368cfb2b69df93a0ec8 +- name: github.com/munnerz/goautoneg + version: a547fc61f48d567d5b4ec6f8aee5573d8efce11d +- name: github.com/Nvveen/Gotty + version: cd527374f1e5bff4938207604a14f2e38a9cf512 - name: github.com/NYTimes/gziphandler version: 56545f4a5d46df9a6648819d1664c3a03a13ffdb +- name: github.com/opencontainers/go-digest + version: ac19fd6e7483ff933754af248d80be865e543d22 +- name: github.com/opencontainers/image-spec + version: da296dcb1e473a9b4e2d148941d7faa9ac8fea3f + subpackages: + - specs-go + - specs-go/v1 +- name: github.com/opencontainers/runc + version: 6cccc1760d57d9e1bc856b96eeb7ee02b7b8101d + subpackages: + - libcontainer/system + - libcontainer/user - name: github.com/openshift/api - version: 7924f9106f8e132f4f33a0c7fb8841b49bfc2d83 + version: f15120709e0ac8de84e11616d8f0cac54e8f52e3 subpackages: - apps - apps/v1 @@ -275,13 +348,16 @@ imports: - servicecertsigner/v1alpha1 - template - template/v1 + - unidling/v1alpha1 - user - user/v1 - webconsole - webconsole/v1 - name: github.com/openshift/client-go - version: 0255926f53935175fe90b8e7672c4c06c17d79e6 + version: c44a8b61b9f46cd9e802384dfeda0bc9942db68a subpackages: + - apps/clientset/versioned/scheme + - apps/clientset/versioned/typed/apps/v1 - config/clientset/versioned - config/clientset/versioned/fake - config/clientset/versioned/scheme @@ -290,24 +366,39 @@ imports: - config/informers/externalversions/config/v1 - config/informers/externalversions/internalinterfaces - config/listers/config/v1 + - quota/clientset/versioned + - quota/clientset/versioned/fake + - quota/clientset/versioned/scheme + - quota/clientset/versioned/typed/quota/v1 + - quota/clientset/versioned/typed/quota/v1/fake + - quota/informers/externalversions + - quota/informers/externalversions/internalinterfaces + - quota/informers/externalversions/quota + - quota/informers/externalversions/quota/v1 + - quota/listers/quota/v1 + - route/clientset/versioned + - route/clientset/versioned/fake + - route/clientset/versioned/scheme + - route/clientset/versioned/typed/route/v1 + - route/clientset/versioned/typed/route/v1/fake - name: github.com/pborman/uuid version: ca53cad383cad2479bbba7f7a1a05797ec1386e4 -- name: github.com/peterbourgon/diskv - version: 5f041e8faa004a95c88a202771f4cc3e991971e6 - name: github.com/pkg/errors version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/pkg/profile version: f6fe06335df110bcf1ed6d4e852b760bfc15beee - name: github.com/prometheus/client_golang - version: e7e903064f5e9eb5da98208bae10b475d4db0f8c + version: 505eaef017263e299324067d40ca2c48f6a2cf50 subpackages: - prometheus + - prometheus/internal + - prometheus/promhttp - name: github.com/prometheus/client_model version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6 subpackages: - go - name: github.com/prometheus/common - version: 13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207 + version: cfeb6f9992ffa54aaa4f2170ade4067ee478b250 subpackages: - expfmt - internal/bitbucket.org/ww/goautoneg @@ -321,7 +412,7 @@ imports: - name: github.com/PuerkitoBio/urlesc version: 5bd2802263f21d8788851d5305584c82a5c75d7e - name: github.com/rogpeppe/go-internal - version: 438578804ca6f31be148c27683afc419ce47c06e + version: 6f68bf1e81f8552c7dbd47f3bc4371c2db0941a6 subpackages: - modfile - module @@ -345,14 +436,15 @@ imports: - blowfish - ssh/terminal - name: golang.org/x/net - version: 0ed95abb35c445290478a5348a7b38bb154135fd + version: 65e2d4e15006aab9813ff8769e768bbf4bb667a0 subpackages: - context + - context/ctxhttp + - http/httpguts - http2 - http2/hpack - idna - internal/timeseries - - lex/httplex - trace - websocket - name: golang.org/x/oauth2 @@ -389,7 +481,7 @@ imports: - go/ast/astutil - imports - name: google.golang.org/appengine - version: 54a98f90d1c46b7731eb8fb305d2a321c30ef610 + version: b2f4a3cf3c67576a2ee09e1fe62656a5086ce880 subpackages: - internal - internal/base @@ -431,17 +523,20 @@ imports: - status - tap - transport +- name: gopkg.in/asn1-ber.v1 + version: f715ec2f112d1e4195b827ad68cf44017a3ef2b1 - name: gopkg.in/inf.v0 version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 +- name: gopkg.in/ldap.v2 + version: bb7a9ca6e4fbc2129e3db588a34bc970ffe811a9 - name: gopkg.in/natefinch/lumberjack.v2 version: 20b71e5b60d756d3d2f80def009790325acc2b23 - name: gopkg.in/yaml.v2 version: 51d6538a90f86fe93ac480b35f37b2be17fef232 - name: k8s.io/api - version: 5cb15d34447165a97c76ed5a60e4e99c8a01ecfe + version: 40a48860b5abbba9aa891b02b32da429b08d96a0 subpackages: - admission/v1beta1 - - admissionregistration/v1alpha1 - admissionregistration/v1beta1 - apps/v1 - apps/v1beta1 @@ -458,16 +553,21 @@ imports: - batch/v1beta1 - batch/v2alpha1 - certificates/v1beta1 + - coordination/v1 - coordination/v1beta1 - core/v1 - events/v1beta1 - extensions/v1beta1 - imagepolicy/v1alpha1 - networking/v1 + - networking/v1beta1 + - node/v1alpha1 + - node/v1beta1 - policy/v1beta1 - rbac/v1 - rbac/v1alpha1 - rbac/v1beta1 + - scheduling/v1 - scheduling/v1alpha1 - scheduling/v1beta1 - settings/v1alpha1 @@ -475,16 +575,16 @@ imports: - storage/v1alpha1 - storage/v1beta1 - name: k8s.io/apiextensions-apiserver - version: 3c74db8dd172051b029f91536c681a1b43694809 - repo: https://github.com/openshift/kubernetes-apiextensions-apiserver + version: 53c4693659ed354d76121458fb819202dd1635fa subpackages: - pkg/apis/apiextensions - pkg/apis/apiextensions/v1beta1 - pkg/client/clientset/clientset/scheme - pkg/client/clientset/clientset/typed/apiextensions/v1beta1 - name: k8s.io/apimachinery - version: 86fb29eff6288413d76bd8506874fddd9fccdff0 + version: d7deff9243b165ee192f5551710ea4285dcfd615 subpackages: + - pkg/api/apitesting - pkg/api/equality - pkg/api/errors - pkg/api/meta @@ -535,13 +635,12 @@ imports: - third_party/forked/golang/json - third_party/forked/golang/reflect - name: k8s.io/apiserver - version: 79427f02047f9189a75b8cdaadccaf65a126853e + version: 8b27c41bdbb11ff103caa673315e097bf0289171 subpackages: - pkg/admission - pkg/admission/configuration - pkg/admission/initializer - pkg/admission/metrics - - pkg/admission/plugin/initialization - pkg/admission/plugin/namespace/lifecycle - pkg/admission/plugin/webhook/config - pkg/admission/plugin/webhook/config/apis/webhookadmission @@ -588,6 +687,8 @@ imports: - pkg/endpoints/discovery - pkg/endpoints/filters - pkg/endpoints/handlers + - pkg/endpoints/handlers/fieldmanager + - pkg/endpoints/handlers/fieldmanager/internal - pkg/endpoints/handlers/negotiation - pkg/endpoints/handlers/responsewriters - pkg/endpoints/metrics @@ -605,7 +706,6 @@ imports: - pkg/server/options - pkg/server/resourceconfig - pkg/server/routes - - pkg/server/routes/data/swagger - pkg/server/storage - pkg/storage - pkg/storage/cacher @@ -619,11 +719,8 @@ imports: - pkg/storage/value - pkg/util/dryrun - pkg/util/feature - - pkg/util/flag - pkg/util/flushwriter - - pkg/util/logs - pkg/util/openapi - - pkg/util/trace - pkg/util/webhook - pkg/util/wsstream - plugin/pkg/audit/buffered @@ -635,7 +732,7 @@ imports: - plugin/pkg/authenticator/token/webhook - plugin/pkg/authorizer/webhook - name: k8s.io/client-go - version: b40b2a5939e43f7ffe0028ad67586b7ce50bb675 + version: 6ee68ca5fd8355d024d02f9db0b3b667e8357a0f subpackages: - discovery - discovery/fake @@ -643,7 +740,6 @@ imports: - dynamic/fake - informers - informers/admissionregistration - - informers/admissionregistration/v1alpha1 - informers/admissionregistration/v1beta1 - informers/apps - informers/apps/v1 @@ -662,6 +758,7 @@ imports: - informers/certificates - informers/certificates/v1beta1 - informers/coordination + - informers/coordination/v1 - informers/coordination/v1beta1 - informers/core - informers/core/v1 @@ -672,6 +769,10 @@ imports: - informers/internalinterfaces - informers/networking - informers/networking/v1 + - informers/networking/v1beta1 + - informers/node + - informers/node/v1alpha1 + - informers/node/v1beta1 - informers/policy - informers/policy/v1beta1 - informers/rbac @@ -679,6 +780,7 @@ imports: - informers/rbac/v1alpha1 - informers/rbac/v1beta1 - informers/scheduling + - informers/scheduling/v1 - informers/scheduling/v1alpha1 - informers/scheduling/v1beta1 - informers/settings @@ -690,8 +792,6 @@ imports: - kubernetes - kubernetes/fake - kubernetes/scheme - - kubernetes/typed/admissionregistration/v1alpha1 - - kubernetes/typed/admissionregistration/v1alpha1/fake - kubernetes/typed/admissionregistration/v1beta1 - kubernetes/typed/admissionregistration/v1beta1/fake - kubernetes/typed/apps/v1 @@ -724,6 +824,8 @@ imports: - kubernetes/typed/batch/v2alpha1/fake - kubernetes/typed/certificates/v1beta1 - kubernetes/typed/certificates/v1beta1/fake + - kubernetes/typed/coordination/v1 + - kubernetes/typed/coordination/v1/fake - kubernetes/typed/coordination/v1beta1 - kubernetes/typed/coordination/v1beta1/fake - kubernetes/typed/core/v1 @@ -734,6 +836,12 @@ imports: - kubernetes/typed/extensions/v1beta1/fake - kubernetes/typed/networking/v1 - kubernetes/typed/networking/v1/fake + - kubernetes/typed/networking/v1beta1 + - kubernetes/typed/networking/v1beta1/fake + - kubernetes/typed/node/v1alpha1 + - kubernetes/typed/node/v1alpha1/fake + - kubernetes/typed/node/v1beta1 + - kubernetes/typed/node/v1beta1/fake - kubernetes/typed/policy/v1beta1 - kubernetes/typed/policy/v1beta1/fake - kubernetes/typed/rbac/v1 @@ -742,6 +850,8 @@ imports: - kubernetes/typed/rbac/v1alpha1/fake - kubernetes/typed/rbac/v1beta1 - kubernetes/typed/rbac/v1beta1/fake + - kubernetes/typed/scheduling/v1 + - kubernetes/typed/scheduling/v1/fake - kubernetes/typed/scheduling/v1alpha1 - kubernetes/typed/scheduling/v1alpha1/fake - kubernetes/typed/scheduling/v1beta1 @@ -754,7 +864,6 @@ imports: - kubernetes/typed/storage/v1alpha1/fake - kubernetes/typed/storage/v1beta1 - kubernetes/typed/storage/v1beta1/fake - - listers/admissionregistration/v1alpha1 - listers/admissionregistration/v1beta1 - listers/apps/v1 - listers/apps/v1beta1 @@ -767,15 +876,20 @@ imports: - listers/batch/v1beta1 - listers/batch/v2alpha1 - listers/certificates/v1beta1 + - listers/coordination/v1 - listers/coordination/v1beta1 - listers/core/v1 - listers/events/v1beta1 - listers/extensions/v1beta1 - listers/networking/v1 + - listers/networking/v1beta1 + - listers/node/v1alpha1 + - listers/node/v1beta1 - listers/policy/v1beta1 - listers/rbac/v1 - listers/rbac/v1alpha1 - listers/rbac/v1beta1 + - listers/scheduling/v1 - listers/scheduling/v1alpha1 - listers/scheduling/v1beta1 - listers/settings/v1alpha1 @@ -790,7 +904,16 @@ imports: - rest - rest/watch - restmapper + - scale + - scale/scheme + - scale/scheme/appsint + - scale/scheme/appsv1beta1 + - scale/scheme/appsv1beta2 + - scale/scheme/autoscalingv1 + - scale/scheme/extensionsint + - scale/scheme/extensionsv1beta1 - testing + - third_party/forked/golang/template - tools/auth - tools/cache - tools/clientcmd @@ -802,16 +925,23 @@ imports: - tools/metrics - tools/pager - tools/record + - tools/record/util - tools/reference + - tools/watch - transport - - util/buffer - util/cert - util/connrotation - util/flowcontrol - util/homedir - - util/integer + - util/jsonpath + - util/keyutil - util/retry - util/workqueue +- name: k8s.io/component-base + version: 4a91899592f42b2f5859587cc5a676a5b94d2ee3 + subpackages: + - cli/flag + - logs - name: k8s.io/gengo version: e17681d19d3ac4837a019ece36c2a0ec31ffe985 subpackages: @@ -821,9 +951,9 @@ imports: - parser - types - name: k8s.io/klog - version: 8139d8cb77af419532b33dfa7dd09fbc5f1d344f + version: 8e90cee79f823779174776412c13478955131846 - name: k8s.io/kube-aggregator - version: 3e0149950b0e22a3b8579db52bd50e40d0dac10e + version: da8327669ac57b6e6a06676eeb7de19c9780f76d subpackages: - pkg/apis/apiregistration - pkg/apis/apiregistration/v1 @@ -831,16 +961,22 @@ imports: - pkg/client/clientset_generated/clientset/scheme - pkg/client/clientset_generated/clientset/typed/apiregistration/v1 - name: k8s.io/kube-openapi - version: c59034cc13d587f5ef4e85ca0ade0c1866ae8e1d + version: b3a7cee44a305be0a69e1b9ac03018307287e1b0 subpackages: - - pkg/aggregator - pkg/builder - pkg/common - pkg/handler + - pkg/schemaconv - pkg/util - pkg/util/proto +- name: k8s.io/utils + version: c2654d5206da6b7b6ace12841e8f359bb89b443c + subpackages: + - buffer + - integer + - trace - name: sigs.k8s.io/controller-tools - version: 4e23e49e5d401ca6ced86aa30262d0cf2488c504 + version: 72ae52c08b9dd626cfb64ebef0fbf40ce667939b repo: https://github.com/openshift/kubernetes-sigs-controller-tools subpackages: - pkg/crd/generator @@ -849,6 +985,14 @@ imports: - pkg/internal/codegen/parse - pkg/internal/general - pkg/util +- name: sigs.k8s.io/structured-merge-diff + version: e85c7b244fd2cc57bb829d73a061f93a441e63ce + subpackages: + - fieldpath + - merge + - schema + - typed + - value - name: sigs.k8s.io/yaml version: fd68e9863619f6ec2fdd8625fe1f02e7c877e480 testImports: diff --git a/vendor/github.com/openshift/library-go/glide.yaml b/vendor/github.com/openshift/library-go/glide.yaml index 8f5129a9b..920b7ca6e 100644 --- a/vendor/github.com/openshift/library-go/glide.yaml +++ b/vendor/github.com/openshift/library-go/glide.yaml @@ -1,15 +1,17 @@ package: github.com/openshift/library-go import: - package: k8s.io/apimachinery - version: kubernetes-1.13.4 + version: kubernetes-1.14.0 - package: k8s.io/api - version: kubernetes-1.13.4 + version: kubernetes-1.14.0 - package: k8s.io/apiserver - version: kubernetes-1.13.4 + version: kubernetes-1.14.0 +- package: k8s.io/apiextensions-apiserver + version: kubernetes-1.14.0 - package: k8s.io/kube-aggregator - version: kubernetes-1.13.4 + version: kubernetes-1.14.0 - package: k8s.io/client-go - version: kubernetes-1.13.4 + version: kubernetes-1.14.0 - package: github.com/openshift/api version: master - package: github.com/openshift/client-go @@ -17,11 +19,8 @@ import: # crd-schema-gen # TODO: we need to this to get nullable patch, but we will replace this with new repo soon. -- package: k8s.io/apiextensions-apiserver - repo: https://github.com/openshift/kubernetes-apiextensions-apiserver - version: origin-4.1-kubernetes-1.13.4 - package: sigs.k8s.io/controller-tools - repo: https://github.com/openshift/kubernetes-sigs-controller-tools + repo: https://github.com/openshift/kubernetes-sigs-controller-tools version: origin-4.1-kubernetes-1.13.4 - package: k8s.io/gengo version: e17681d19d3ac4837a019ece36c2a0ec31ffe985 @@ -31,7 +30,7 @@ import: version: c87b6cf5033d2c6486046f045eeebdc3d910fd38 # sig-master - package: github.com/getsentry/raven-go - version: 32a13797442ccb601b11761d74232773c1402d14 + version: c977f96e109525a5d8fa10a19165341f601f38b0 # sig-master - transitive through raven-go, this matches the kube level - package: github.com/pkg/errors version: v0.8.0 @@ -57,3 +56,14 @@ import: # matches openshift/origin - package: github.com/gonum/graph version: 50b27dea7ebbfb052dfaf91681afc6fde28d8796 + +# devexp: +- package: github.com/fsouza/go-dockerclient + version: da3951ba2e9e02bc0e7642150b3e265aed7e1df3 # matching origin 4.2 level +- package: github.com/docker/distribution + version: 16128bbac47f75050e82f7e91b04df33775e0c23 # level currently used in origin to base the origin patches on. See https://github.com/openshift/image-registry/pull/126/commits/eb32acef7827ac2227c3aeaddc444880ed98edb3 leading to https://github.com/openshift/docker-distribution/commits/image-registry-3.11 + +# VCS issues +- package: bitbucket.org/ww/goautoneg + vcs: hg + diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionregistrationtesting/admissiontesting.go b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionregistrationtesting/admissiontesting.go new file mode 100644 index 000000000..ecaf34d8a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionregistrationtesting/admissiontesting.go @@ -0,0 +1,25 @@ +package admissionregistrationtesting + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/admission" +) + +func AdmissionRegistrationTest(registeredAdmission *admission.Plugins, orderedAdmissionPlugins []string, defaultOffPlugins sets.String) error { + errs := []error{} + registeredPlugins := sets.NewString(registeredAdmission.Registered()...) + orderedAdmissionPluginsSet := sets.NewString(orderedAdmissionPlugins...) + + // make sure that all orderedAdmissionPlugins are registered + if diff := orderedAdmissionPluginsSet.Difference(registeredPlugins); len(diff) > 0 { + errs = append(errs, fmt.Errorf("registered plugins missing admission plugins: %v", diff.List())) + } + if diff := defaultOffPlugins.Difference(orderedAdmissionPluginsSet); len(diff) > 0 { + errs = append(errs, fmt.Errorf("ordered admission plugins missing defaultOff plugins: %v", diff.List())) + } + + return errors.NewAggregate(errs) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig/intiializers.go b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig/intiializers.go new file mode 100644 index 000000000..5b4dc1036 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissionrestconfig/intiializers.go @@ -0,0 +1,30 @@ +package admissionrestconfig + +import ( + "k8s.io/apiserver/pkg/admission" + restclient "k8s.io/client-go/rest" +) + +func NewInitializer(restClientConfig restclient.Config) admission.PluginInitializer { + return &localInitializer{ + restClientConfig: restClientConfig, + } +} + +// WantsRESTClientConfig gives access to a RESTClientConfig. It's useful for doing unusual things with transports. +type WantsRESTClientConfig interface { + SetRESTClientConfig(restclient.Config) + admission.InitializationValidator +} + +type localInitializer struct { + restClientConfig restclient.Config +} + +// Initialize will check the initialization interfaces implemented by each plugin +// and provide the appropriate initialization data +func (i *localInitializer) Initialize(plugin admission.Interface) { + if wants, ok := plugin.(WantsRESTClientConfig); ok { + wants.SetRESTClientConfig(i.restClientConfig) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/decorator.go b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/decorator.go new file mode 100644 index 000000000..77cd2ef72 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/decorator.go @@ -0,0 +1,20 @@ +package admissiontimeout + +import ( + "time" + + "k8s.io/apiserver/pkg/admission" +) + +// AdmissionTimeout provides a decorator that will fail an admission plugin after a certain amount of time +type AdmissionTimeout struct { + Timeout time.Duration +} + +func (d AdmissionTimeout) WithTimeout(admissionPlugin admission.Interface, name string) admission.Interface { + return pluginHandlerWithTimeout{ + name: name, + admissionPlugin: admissionPlugin, + timeout: d.Timeout, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission.go b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission.go new file mode 100644 index 000000000..65a0219a5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission.go @@ -0,0 +1,67 @@ +package admissiontimeout + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" +) + +type pluginHandlerWithTimeout struct { + name string + admissionPlugin admission.Interface + timeout time.Duration +} + +var _ admission.ValidationInterface = &pluginHandlerWithTimeout{} +var _ admission.MutationInterface = &pluginHandlerWithTimeout{} + +func (p pluginHandlerWithTimeout) Handles(operation admission.Operation) bool { + return p.admissionPlugin.Handles(operation) +} + +func (p pluginHandlerWithTimeout) Admit(a admission.Attributes, o admission.ObjectInterfaces) error { + mutatingHandler, ok := p.admissionPlugin.(admission.MutationInterface) + if !ok { + return nil + } + + admissionDone := make(chan struct{}) + admissionErr := fmt.Errorf("default to mutation error") + go func() { + defer utilruntime.HandleCrash() + defer close(admissionDone) + admissionErr = mutatingHandler.Admit(a, o) + }() + + select { + case <-admissionDone: + return admissionErr + case <-time.After(p.timeout): + return errors.NewInternalError(fmt.Errorf("admission plugin %q failed to complete mutation in %v", p.name, p.timeout)) + } +} + +func (p pluginHandlerWithTimeout) Validate(a admission.Attributes, o admission.ObjectInterfaces) error { + validatingHandler, ok := p.admissionPlugin.(admission.ValidationInterface) + if !ok { + return nil + } + + admissionDone := make(chan struct{}) + admissionErr := fmt.Errorf("default to validation error") + go func() { + defer utilruntime.HandleCrash() + defer close(admissionDone) + admissionErr = validatingHandler.Validate(a, o) + }() + + select { + case <-admissionDone: + return admissionErr + case <-time.After(p.timeout): + return errors.NewInternalError(fmt.Errorf("admission plugin %q failed to complete validation in %v", p.name, p.timeout)) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission_test.go b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission_test.go new file mode 100644 index 000000000..7880c5974 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/admission/admissiontimeout/timeoutadmission_test.go @@ -0,0 +1,106 @@ +package admissiontimeout + +import ( + "fmt" + "strings" + "testing" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/admission" +) + +type admitFunc func(a admission.Attributes, o admission.ObjectInterfaces) error + +type dummyAdmit struct { + admitFn admitFunc +} + +func (p dummyAdmit) Handles(operation admission.Operation) bool { + return true +} + +func (p dummyAdmit) Admit(a admission.Attributes, o admission.ObjectInterfaces) error { + return p.admitFn(a, o) +} + +func (p dummyAdmit) Validate(a admission.Attributes, o admission.ObjectInterfaces) error { + return p.admitFn(a, o) +} + +func TestTimeoutAdmission(t *testing.T) { + utilruntime.ReallyCrash = false + + tests := []struct { + name string + + timeout time.Duration + admissionPlugin func() (admit admitFunc, stopCh chan struct{}) + expectedError string + }{ + { + name: "stops on time", + timeout: 50 * time.Millisecond, + admissionPlugin: func() (admitFunc, chan struct{}) { + stopCh := make(chan struct{}) + return func(a admission.Attributes, o admission.ObjectInterfaces) error { + <-stopCh + return nil + }, stopCh + }, + expectedError: `fake-name" failed to complete`, + }, + { + name: "stops on success", + timeout: 500 * time.Millisecond, + admissionPlugin: func() (admitFunc, chan struct{}) { + stopCh := make(chan struct{}) + return func(a admission.Attributes, o admission.ObjectInterfaces) error { + return fmt.Errorf("fake failure to finish") + }, stopCh + }, + expectedError: "fake failure to finish", + }, + { + name: "no crash on panic", + timeout: 500 * time.Millisecond, + admissionPlugin: func() (admitFunc, chan struct{}) { + stopCh := make(chan struct{}) + return func(a admission.Attributes, o admission.ObjectInterfaces) error { + panic("fail!") + }, stopCh + }, + expectedError: "default to ", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + admitFn, stopCh := test.admissionPlugin() + defer close(stopCh) + + fakePlugin := dummyAdmit{admitFn: admitFn} + decorator := AdmissionTimeout{Timeout: test.timeout} + decoratedPlugin := decorator.WithTimeout(fakePlugin, "fake-name") + + actualErr := decoratedPlugin.(admission.MutationInterface).Admit(nil, nil) + validateErr(t, actualErr, test.expectedError) + + actualErr = decoratedPlugin.(admission.ValidationInterface).Validate(nil, nil) + validateErr(t, actualErr, test.expectedError) + }) + } +} + +func validateErr(t *testing.T, actualErr error, expectedError string) { + t.Helper() + switch { + case actualErr == nil && len(expectedError) != 0: + t.Fatal(expectedError) + case actualErr == nil && len(expectedError) == 0: + case actualErr != nil && len(expectedError) == 0: + t.Fatal(actualErr) + case actualErr != nil && !strings.Contains(actualErr.Error(), expectedError): + t.Fatal(actualErr) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/cachecontrol.go b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/cachecontrol.go new file mode 100644 index 000000000..611735ccc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/cachecontrol.go @@ -0,0 +1,35 @@ +package apiserverconfig + +import ( + "net/http" + "strings" +) + +// cacheExcludedPaths is small and simple until the handlers include the cache headers they need +var cacheExcludedPathPrefixes = []string{ + "/swagger-2.0.0.json", + "/swagger-2.0.0.pb-v1", + "/swagger-2.0.0.pb-v1.gz", + "/swagger.json", + "/swaggerapi", + "/openapi/", +} + +// cacheControlFilter sets the Cache-Control header to the specified value. +func WithCacheControl(handler http.Handler, value string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if _, ok := w.Header()["Cache-Control"]; ok { + handler.ServeHTTP(w, req) + return + } + for _, prefix := range cacheExcludedPathPrefixes { + if strings.HasPrefix(req.URL.Path, prefix) { + handler.ServeHTTP(w, req) + return + } + } + + w.Header().Set("Cache-Control", value) + handler.ServeHTTP(w, req) + }) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/longrunning.go b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/longrunning.go new file mode 100644 index 000000000..5dde34ca7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/longrunning.go @@ -0,0 +1,26 @@ +package apiserverconfig + +import ( + "net/http" + "regexp" + + "k8s.io/apimachinery/pkg/util/sets" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + genericfilters "k8s.io/apiserver/pkg/server/filters" +) + +// request paths that match this regular expression will be treated as long running +// and not subjected to the default server timeout. +const originLongRunningEndpointsRE = "(/|^)(buildconfigs/.*/instantiatebinary|imagestreamimports)$" + +var ( + originLongRunningRequestRE = regexp.MustCompile(originLongRunningEndpointsRE) + kubeLongRunningFunc = genericfilters.BasicLongRunningRequestCheck( + sets.NewString("watch", "proxy"), + sets.NewString("attach", "exec", "proxy", "log", "portforward"), + ) +) + +func IsLongRunningRequest(r *http.Request, requestInfo *apirequest.RequestInfo) bool { + return originLongRunningRequestRE.MatchString(r.URL.Path) || kubeLongRunningFunc(r, requestInfo) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/personal_subjectaccessreview.go b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/personal_subjectaccessreview.go new file mode 100644 index 000000000..d97946b9b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/personal_subjectaccessreview.go @@ -0,0 +1,129 @@ +package apiserverconfig + +import ( + "bytes" + "io/ioutil" + "net/http" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apiserver/pkg/endpoints/request" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + + authorizationv1 "github.com/openshift/api/authorization/v1" +) + +type personalSARRequestInfoResolver struct { + // infoFactory is used to determine info for the request + infoFactory apirequest.RequestInfoResolver +} + +func newPersonalSARRequestInfoResolver(infoFactory apirequest.RequestInfoResolver) apirequest.RequestInfoResolver { + return &personalSARRequestInfoResolver{ + infoFactory: infoFactory, + } +} + +func (a *personalSARRequestInfoResolver) NewRequestInfo(req *http.Request) (*request.RequestInfo, error) { + requestInfo, err := a.infoFactory.NewRequestInfo(req) + if err != nil { + return requestInfo, err + } + + // only match SAR and LSAR requests for personal review + switch { + case !requestInfo.IsResourceRequest: + return requestInfo, nil + + case len(requestInfo.APIGroup) != 0 && requestInfo.APIGroup != "authorization.openshift.io": + return requestInfo, nil + + case len(requestInfo.Subresource) != 0: + return requestInfo, nil + + case requestInfo.Verb != "create": + return requestInfo, nil + + case requestInfo.Resource != "subjectaccessreviews" && requestInfo.Resource != "localsubjectaccessreviews": + return requestInfo, nil + } + + // at this point we're probably running a SAR or LSAR. Decode the body and check. This is expensive. + isSelfSAR, err := isPersonalAccessReviewFromRequest(req, requestInfo) + if err != nil { + return nil, err + } + if !isSelfSAR { + return requestInfo, nil + } + + // if we do have a self-SAR, rewrite the requestInfo to indicate this is a selfsubjectaccessreviews.authorization.k8s.io request + requestInfo.APIGroup = "authorization.k8s.io" + requestInfo.Resource = "selfsubjectaccessreviews" + + return requestInfo, nil +} + +// isPersonalAccessReviewFromRequest this variant handles the case where we have an httpRequest +func isPersonalAccessReviewFromRequest(req *http.Request, requestInfo *request.RequestInfo) (bool, error) { + // TODO once we're integrated with the api installer, we should have direct access to the deserialized content + // for now, this only happens on subjectaccessreviews with a personal check, pay the double retrieve and decode cost + body, err := ioutil.ReadAll(req.Body) + if err != nil { + return false, err + } + req.Body = ioutil.NopCloser(bytes.NewBuffer(body)) + + defaultGVK := schema.GroupVersionKind{Version: requestInfo.APIVersion, Group: requestInfo.APIGroup} + switch requestInfo.Resource { + case "subjectaccessreviews": + defaultGVK.Kind = "SubjectAccessReview" + case "localsubjectaccessreviews": + defaultGVK.Kind = "LocalSubjectAccessReview" + } + + obj, _, err := sarCodecFactory.UniversalDeserializer().Decode(body, &defaultGVK, nil) + if err != nil { + return false, err + } + switch castObj := obj.(type) { + case *authorizationv1.SubjectAccessReview: + return IsPersonalAccessReviewFromSAR(castObj), nil + + case *authorizationv1.LocalSubjectAccessReview: + return isPersonalAccessReviewFromLocalSAR(castObj), nil + + default: + return false, nil + } +} + +// IsPersonalAccessReviewFromSAR this variant handles the case where we have an SAR +func IsPersonalAccessReviewFromSAR(sar *authorizationv1.SubjectAccessReview) bool { + if len(sar.User) == 0 && len(sar.GroupsSlice) == 0 { + return true + } + + return false +} + +// isPersonalAccessReviewFromLocalSAR this variant handles the case where we have a local SAR +func isPersonalAccessReviewFromLocalSAR(sar *authorizationv1.LocalSubjectAccessReview) bool { + if len(sar.User) == 0 && len(sar.GroupsSlice) == 0 { + return true + } + + return false +} + +var ( + sarScheme = runtime.NewScheme() + sarCodecFactory = serializer.NewCodecFactory(sarScheme) +) + +func init() { + utilruntime.Must(authorizationv1.Install(sarScheme)) + utilruntime.Must(authorizationv1.DeprecatedInstallWithoutGroup(sarScheme)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/project_request_info_resolver.go b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/project_request_info_resolver.go new file mode 100644 index 000000000..7682302f8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/project_request_info_resolver.go @@ -0,0 +1,34 @@ +package apiserverconfig + +import ( + "net/http" + + apirequest "k8s.io/apiserver/pkg/endpoints/request" + + projectv1 "github.com/openshift/api/project/v1" +) + +type projectRequestInfoResolver struct { + // infoFactory is used to determine info for the request + infoFactory apirequest.RequestInfoResolver +} + +func newProjectRequestInfoResolver(infoFactory apirequest.RequestInfoResolver) apirequest.RequestInfoResolver { + return &projectRequestInfoResolver{ + infoFactory: infoFactory, + } +} + +func (a *projectRequestInfoResolver) NewRequestInfo(req *http.Request) (*apirequest.RequestInfo, error) { + requestInfo, err := a.infoFactory.NewRequestInfo(req) + if err != nil { + return requestInfo, err + } + + // if the resource is projects, we need to set the namespace to the value of the name. + if (len(requestInfo.APIGroup) == 0 || requestInfo.APIGroup == projectv1.GroupName) && requestInfo.Resource == "projects" && len(requestInfo.Name) > 0 { + requestInfo.Namespace = requestInfo.Name + } + + return requestInfo, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/requestinforesolver.go b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/requestinforesolver.go new file mode 100644 index 000000000..d14647d55 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/apiserverconfig/requestinforesolver.go @@ -0,0 +1,17 @@ +package apiserverconfig + +import ( + "k8s.io/apimachinery/pkg/util/sets" + apirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +func OpenshiftRequestInfoResolver() apirequest.RequestInfoResolver { + // Default API request info factory + requestInfoFactory := &apirequest.RequestInfoFactory{ + APIPrefixes: sets.NewString("api", "apis"), + GrouplessAPIPrefixes: sets.NewString("api"), + } + personalSARRequestInfoResolver := newPersonalSARRequestInfoResolver(requestInfoFactory) + projectRequestInfoResolver := newProjectRequestInfoResolver(personalSARRequestInfoResolver) + return projectRequestInfoResolver +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest.go b/vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest.go new file mode 100644 index 000000000..91539fb6a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest.go @@ -0,0 +1,129 @@ +package httprequest + +import ( + "net" + "net/http" + "strings" + + "bitbucket.org/ww/goautoneg" +) + +// PrefersHTML returns true if the request was made by something that looks like a browser, or can receive HTML +func PrefersHTML(req *http.Request) bool { + accepts := goautoneg.ParseAccept(req.Header.Get("Accept")) + acceptsHTML := false + acceptsJSON := false + for _, accept := range accepts { + if accept.Type == "text" && accept.SubType == "html" { + acceptsHTML = true + } else if accept.Type == "application" && accept.SubType == "json" { + acceptsJSON = true + } + } + + // If HTML is accepted, return true + if acceptsHTML { + return true + } + + // If JSON was specifically requested, return false + // This gives browsers a way to make requests and add an "Accept" header to request JSON + if acceptsJSON { + return false + } + + // In Intranet/Compatibility mode, IE sends an Accept header that does not contain "text/html". + if strings.HasPrefix(req.UserAgent(), "Mozilla") { + return true + } + + return false +} + +// SchemeHost returns the scheme and host used to make this request. +// Suitable for use to compute scheme/host in returned 302 redirect Location. +// Note the returned host is not normalized, and may or may not contain a port. +// Returned values are based on the following information: +// +// Host: +// * X-Forwarded-Host/X-Forwarded-Port headers +// * Host field on the request (parsed from Host header) +// * Host in the request's URL (parsed from Request-Line) +// +// Scheme: +// * X-Forwarded-Proto header +// * Existence of TLS information on the request implies https +// * Scheme in the request's URL (parsed from Request-Line) +// * Port (if included in calculated Host value, 443 implies https) +// * Otherwise, defaults to "http" +func SchemeHost(req *http.Request) (string /*scheme*/, string /*host*/) { + forwarded := func(attr string) string { + // Get the X-Forwarded- value + value := req.Header.Get("X-Forwarded-" + attr) + // Take the first comma-separated value, if multiple exist + value = strings.SplitN(value, ",", 2)[0] + // Trim whitespace + return strings.TrimSpace(value) + } + + hasExplicitHost := func(h string) bool { + _, _, err := net.SplitHostPort(h) + return err == nil + } + + forwardedHost := forwarded("Host") + host := "" + hostHadExplicitPort := false + switch { + case len(forwardedHost) > 0: + host = forwardedHost + hostHadExplicitPort = hasExplicitHost(host) + + // If both X-Forwarded-Host and X-Forwarded-Port are sent, use the explicit port info + if forwardedPort := forwarded("Port"); len(forwardedPort) > 0 { + if h, _, err := net.SplitHostPort(forwardedHost); err == nil { + host = net.JoinHostPort(h, forwardedPort) + } else { + host = net.JoinHostPort(forwardedHost, forwardedPort) + } + } + + case len(req.Host) > 0: + host = req.Host + hostHadExplicitPort = hasExplicitHost(host) + + case len(req.URL.Host) > 0: + host = req.URL.Host + hostHadExplicitPort = hasExplicitHost(host) + } + + port := "" + if _, p, err := net.SplitHostPort(host); err == nil { + port = p + } + + forwardedProto := forwarded("Proto") + scheme := "" + switch { + case len(forwardedProto) > 0: + scheme = forwardedProto + case req.TLS != nil: + scheme = "https" + case len(req.URL.Scheme) > 0: + scheme = req.URL.Scheme + case port == "443": + scheme = "https" + default: + scheme = "http" + } + + if !hostHadExplicitPort { + if (scheme == "https" && port == "443") || (scheme == "http" && port == "80") { + if hostWithoutPort, _, err := net.SplitHostPort(host); err == nil { + host = hostWithoutPort + } + } + } + + return scheme, host +} diff --git a/vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest_test.go b/vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest_test.go new file mode 100644 index 000000000..add344aed --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apiserver/httprequest/httprequest_test.go @@ -0,0 +1,236 @@ +package httprequest + +import ( + "crypto/tls" + "net/http" + "net/url" + "testing" +) + +func TestSchemeHost(t *testing.T) { + + testcases := map[string]struct { + req *http.Request + expectedScheme string + expectedHost string + }{ + "X-Forwarded-Host and X-Forwarded-Port combined": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "127.0.0.1", + Header: http.Header{ + "X-Forwarded-Host": []string{"example.com"}, + "X-Forwarded-Port": []string{"443"}, + "X-Forwarded-Proto": []string{"https"}, + }, + }, + expectedScheme: "https", + expectedHost: "example.com", + }, + "X-Forwarded-Port overwrites X-Forwarded-Host port": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "127.0.0.1", + Header: http.Header{ + "X-Forwarded-Host": []string{"example.com:1234"}, + "X-Forwarded-Port": []string{"443"}, + "X-Forwarded-Proto": []string{"https"}, + }, + }, + expectedScheme: "https", + expectedHost: "example.com:443", + }, + "X-Forwarded-* multiple attrs": { + req: &http.Request{ + URL: &url.URL{Host: "urlhost", Path: "/"}, + Host: "reqhost", + Header: http.Header{ + "X-Forwarded-Host": []string{"example.com,foo.com"}, + "X-Forwarded-Port": []string{"443,123"}, + "X-Forwarded-Proto": []string{"https,http"}, + }, + }, + expectedScheme: "https", + expectedHost: "example.com", + }, + "stripped X-Forwarded-Host and X-Forwarded-Port with non-standard port": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "127.0.0.1", + Header: http.Header{ + "X-Forwarded-Host": []string{"example.com"}, + "X-Forwarded-Port": []string{"80"}, + "X-Forwarded-Proto": []string{"https"}, + }, + }, + expectedScheme: "https", + expectedHost: "example.com:80", + }, + "detect scheme from X-Forwarded-Port": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "127.0.0.1", + Header: http.Header{ + "X-Forwarded-Host": []string{"example.com"}, + "X-Forwarded-Port": []string{"443"}, + }, + }, + expectedScheme: "https", + expectedHost: "example.com", + }, + + "req host": { + req: &http.Request{ + URL: &url.URL{Host: "urlhost", Path: "/"}, + Host: "example.com", + }, + expectedScheme: "http", + expectedHost: "example.com", + }, + "req host with port": { + req: &http.Request{ + URL: &url.URL{Host: "urlhost", Path: "/"}, + Host: "example.com:80", + }, + expectedScheme: "http", + expectedHost: "example.com:80", + }, + "req host with tls port": { + req: &http.Request{ + URL: &url.URL{Host: "urlhost", Path: "/"}, + Host: "example.com:443", + }, + expectedScheme: "https", + expectedHost: "example.com:443", + }, + + "req tls": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "example.com", + TLS: &tls.ConnectionState{}, + }, + expectedScheme: "https", + expectedHost: "example.com", + }, + + "req url": { + req: &http.Request{ + URL: &url.URL{Scheme: "https", Host: "example.com", Path: "/"}, + }, + expectedScheme: "https", + expectedHost: "example.com", + }, + "req url with port": { + req: &http.Request{ + URL: &url.URL{Scheme: "https", Host: "example.com:123", Path: "/"}, + }, + expectedScheme: "https", + expectedHost: "example.com:123", + }, + + // The following scenarios are captured from actual direct requests to pods + "non-tls pod": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "172.17.0.2:9080", + }, + expectedScheme: "http", + expectedHost: "172.17.0.2:9080", + }, + "tls pod": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "172.17.0.2:9443", + TLS: &tls.ConnectionState{ /* request has non-nil TLS connection state */ }, + }, + expectedScheme: "https", + expectedHost: "172.17.0.2:9443", + }, + + // The following scenarios are captured from actual requests to pods via services + "svc -> non-tls pod": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "service.default.svc.cluster.local:10080", + }, + expectedScheme: "http", + expectedHost: "service.default.svc.cluster.local:10080", + }, + "svc -> tls pod": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "service.default.svc.cluster.local:10443", + TLS: &tls.ConnectionState{ /* request has non-nil TLS connection state */ }, + }, + expectedScheme: "https", + expectedHost: "service.default.svc.cluster.local:10443", + }, + + // The following scenarios are captured from actual requests to pods via services via routes serviced by haproxy + "haproxy non-tls route -> svc -> non-tls pod": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "route-namespace.router.default.svc.cluster.local", + Header: http.Header{ + "X-Forwarded-Host": []string{"route-namespace.router.default.svc.cluster.local"}, + "X-Forwarded-Port": []string{"80"}, + "X-Forwarded-Proto": []string{"http"}, + "Forwarded": []string{"for=172.18.2.57;host=route-namespace.router.default.svc.cluster.local;proto=http"}, + "X-Forwarded-For": []string{"172.18.2.57"}, + }, + }, + expectedScheme: "http", + expectedHost: "route-namespace.router.default.svc.cluster.local", + }, + "haproxy edge terminated route -> svc -> non-tls pod": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "route-namespace.router.default.svc.cluster.local", + Header: http.Header{ + "X-Forwarded-Host": []string{"route-namespace.router.default.svc.cluster.local"}, + "X-Forwarded-Port": []string{"443"}, + "X-Forwarded-Proto": []string{"https"}, + "Forwarded": []string{"for=172.18.2.57;host=route-namespace.router.default.svc.cluster.local;proto=https"}, + "X-Forwarded-For": []string{"172.18.2.57"}, + }, + }, + expectedScheme: "https", + expectedHost: "route-namespace.router.default.svc.cluster.local", + }, + "haproxy edge terminated route -> svc -> non-tls pod with the explicit port": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "route-namespace.router.default.svc.cluster.local:443", + Header: http.Header{ + "X-Forwarded-Host": []string{"route-namespace.router.default.svc.cluster.local:443"}, + "X-Forwarded-Port": []string{"443"}, + "X-Forwarded-Proto": []string{"https"}, + "Forwarded": []string{"for=172.18.2.57;host=route-namespace.router.default.svc.cluster.local:443;proto=https"}, + "X-Forwarded-For": []string{"172.18.2.57"}, + }, + }, + expectedScheme: "https", + expectedHost: "route-namespace.router.default.svc.cluster.local:443", + }, + "haproxy passthrough route -> svc -> tls pod": { + req: &http.Request{ + URL: &url.URL{Path: "/"}, + Host: "route-namespace.router.default.svc.cluster.local", + TLS: &tls.ConnectionState{ /* request has non-nil TLS connection state */ }, + }, + expectedScheme: "https", + expectedHost: "route-namespace.router.default.svc.cluster.local", + }, + } + + for k, tc := range testcases { + scheme, host := SchemeHost(tc.req) + if scheme != tc.expectedScheme { + t.Errorf("%s: expected scheme %q, got %q", k, tc.expectedScheme, scheme) + } + if host != tc.expectedHost { + t.Errorf("%s: expected host %q, got %q", k, tc.expectedHost, host) + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/apps/OWNERS b/vendor/github.com/openshift/library-go/pkg/apps/OWNERS new file mode 100644 index 000000000..f0b0af2b5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/OWNERS @@ -0,0 +1,10 @@ +reviewers: + - smarterclayton + - mfojtik + - soltysh + - tnozicka +approvers: + - smarterclayton + - mfojtik + - soltysh + - tnozicka diff --git a/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/scheme.go b/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/scheme.go new file mode 100644 index 000000000..91789b1c4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/scheme.go @@ -0,0 +1,30 @@ +package appsserialization + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + appsv1 "github.com/openshift/api/apps/v1" +) + +var ( + // for decoding, we want to be tolerant of groupified and non-groupified + annotationDecodingScheme = runtime.NewScheme() + annotationDecoder runtime.Decoder + + // for encoding, we want to be strict on groupified + annotationEncodingScheme = runtime.NewScheme() + annotationEncoder runtime.Encoder +) + +func init() { + utilruntime.Must(appsv1.Install(annotationDecodingScheme)) + utilruntime.Must(appsv1.DeprecatedInstallWithoutGroup(annotationDecodingScheme)) + annotationDecoderCodecFactory := serializer.NewCodecFactory(annotationDecodingScheme) + annotationDecoder = annotationDecoderCodecFactory.UniversalDecoder(appsv1.GroupVersion) + + utilruntime.Must(appsv1.Install(annotationEncodingScheme)) + annotationEncoderCodecFactory := serializer.NewCodecFactory(annotationEncodingScheme) + annotationEncoder = annotationEncoderCodecFactory.LegacyCodec(appsv1.GroupVersion) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/scheme_test.go b/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/scheme_test.go new file mode 100644 index 000000000..8354d2684 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/scheme_test.go @@ -0,0 +1,47 @@ +package appsserialization + +import ( + "strings" + "testing" + + "k8s.io/apimachinery/pkg/runtime" + + "github.com/davecgh/go-spew/spew" + + appsv1 "github.com/openshift/api/apps/v1" +) + +const legacyDC = `{ + "apiVersion": "v1", + "kind": "DeploymentConfig", + "metadata": { + "name": "sinatra-app-example-a" + } +} +` + +func TestLegacyDecoding(t *testing.T) { + result, err := runtime.Decode(annotationDecoder, []byte(legacyDC)) + if err != nil { + t.Fatal(err) + } + if result.(*appsv1.DeploymentConfig).Name != "sinatra-app-example-a" { + t.Fatal(spew.Sdump(result)) + } + + groupfiedBytes, err := runtime.Encode(annotationEncoder, result) + if err != nil { + t.Fatal(err) + } + if !strings.Contains(string(groupfiedBytes), "apps.openshift.io/v1") { + t.Fatal(string(groupfiedBytes)) + } + + result2, err := runtime.Decode(annotationDecoder, groupfiedBytes) + if err != nil { + t.Fatal(err) + } + if result2.(*appsv1.DeploymentConfig).Name != "sinatra-app-example-a" { + t.Fatal(spew.Sdump(result2)) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/serialize.go b/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/serialize.go new file mode 100644 index 000000000..0433b77e4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/appsserialization/serialize.go @@ -0,0 +1,31 @@ +package appsserialization + +import ( + "fmt" + + appsv1 "github.com/openshift/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DecodeDeploymentConfig decodes a DeploymentConfig from controller using annotation codec. +// An error is returned if the controller doesn't contain an encoded config or decoding fail. +func DecodeDeploymentConfig(controller metav1.ObjectMetaAccessor) (*appsv1.DeploymentConfig, error) { + encodedConfig, exists := controller.GetObjectMeta().GetAnnotations()[appsv1.DeploymentEncodedConfigAnnotation] + if !exists { + return nil, fmt.Errorf("object %s does not have encoded deployment config annotation", controller.GetObjectMeta().GetName()) + } + config, err := runtime.Decode(annotationDecoder, []byte(encodedConfig)) + if err != nil { + return nil, err + } + externalConfig, ok := config.(*appsv1.DeploymentConfig) + if !ok { + return nil, fmt.Errorf("object %+v is not v1.DeploymentConfig", config) + } + return externalConfig, nil +} + +func EncodeDeploymentConfig(config *appsv1.DeploymentConfig) ([]byte, error) { + return runtime.Encode(annotationEncoder, config) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apps/appsutil/const.go b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/const.go new file mode 100644 index 000000000..ccb9150bc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/const.go @@ -0,0 +1,60 @@ +package appsutil + +const ( + // FailedRcCreateReason is added in a deployment config when it cannot create a new replication + // controller. + FailedRcCreateReason = "ReplicationControllerCreateError" + // NewReplicationControllerReason is added in a deployment config when it creates a new replication + // controller. + NewReplicationControllerReason = "NewReplicationControllerCreated" + // NewRcAvailableReason is added in a deployment config when its newest replication controller is made + // available ie. the number of new pods that have passed readiness checks and run for at least + // minReadySeconds is at least the minimum available pods that need to run for the deployment config. + NewRcAvailableReason = "NewReplicationControllerAvailable" + // TimedOutReason is added in a deployment config when its newest replication controller fails to show + // any progress within the given deadline (progressDeadlineSeconds). + TimedOutReason = "ProgressDeadlineExceeded" + // PausedConfigReason is added in a deployment config when it is paused. Lack of progress shouldn't be + // estimated once a deployment config is paused. + PausedConfigReason = "DeploymentConfigPaused" + // CancelledRolloutReason is added in a deployment config when its newest rollout was + // interrupted by cancellation. + CancelledRolloutReason = "RolloutCancelled" + + // DeploymentConfigLabel is the name of a label used to correlate a deployment with the + DeploymentConfigLabel = "deploymentconfig" + + // DeploymentLabel is the name of a label used to correlate a deployment with the Pod created + DeploymentLabel = "deployment" + + // MaxDeploymentDurationSeconds represents the maximum duration that a deployment is allowed to run. + // This is set as the default value for ActiveDeadlineSeconds for the deployer pod. + // Currently set to 6 hours. + MaxDeploymentDurationSeconds int64 = 21600 + + // DefaultRecreateTimeoutSeconds is the default TimeoutSeconds for RecreateDeploymentStrategyParams. + // Used by strategies: + DefaultRecreateTimeoutSeconds int64 = 10 * 60 + DefaultRollingTimeoutSeconds int64 = 10 * 60 + + // PreHookPodSuffix is the suffix added to all pre hook pods + PreHookPodSuffix = "hook-pre" + // MidHookPodSuffix is the suffix added to all mid hook pods + MidHookPodSuffix = "hook-mid" + // PostHookPodSuffix is the suffix added to all post hook pods + PostHookPodSuffix = "hook-post" + + // Used only internally by utils: + + // DeploymentStatusReasonAnnotation represents the reason for deployment being in a given state + // Used for specifying the reason for cancellation or failure of a deployment + DeploymentIgnorePodAnnotation = "deploy.openshift.io/deployer-pod.ignore" + DeploymentReplicasAnnotation = "openshift.io/deployment.replicas" + + DeploymentFailedUnrelatedDeploymentExists = "unrelated pod with the same name as this deployment is already running" + DeploymentFailedUnableToCreateDeployerPod = "unable to create deployer pod" + DeploymentFailedDeployerPodNoLongerExists = "deployer pod no longer exists" + + deploymentCancelledByUser = "cancelled by the user" + deploymentCancelledNewerDeploymentExists = "newer deployment was found running" +) diff --git a/vendor/github.com/openshift/library-go/pkg/apps/appsutil/rc_scale_client.go b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/rc_scale_client.go new file mode 100644 index 000000000..8c0cf1ce0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/rc_scale_client.go @@ -0,0 +1,34 @@ +package appsutil + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + scaleclient "k8s.io/client-go/scale" + "k8s.io/client-go/scale/scheme/autoscalingv1" +) + +// rcMapper pins preferred version to v1 and scale kind to autoscaling/v1 Scale +// this avoids putting complete server discovery (including extension APIs) in the critical path for deployments +type rcMapper struct{} + +func (rcMapper) ResourceFor(gvr schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if gvr.Group == "" && gvr.Resource == "replicationcontrollers" { + return schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"}, nil + } + return schema.GroupVersionResource{}, fmt.Errorf("unknown replication controller resource: %#v", gvr) +} + +func (rcMapper) ScaleForResource(gvr schema.GroupVersionResource) (schema.GroupVersionKind, error) { + rcGvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"} + if gvr == rcGvr { + return autoscalingv1.SchemeGroupVersion.WithKind("Scale"), nil + } + return schema.GroupVersionKind{}, fmt.Errorf("unknown replication controller resource: %#v", gvr) +} + +func NewReplicationControllerScaleClient(client kubernetes.Interface) scaleclient.ScalesGetter { + return scaleclient.New(client.CoreV1().RESTClient(), rcMapper{}, dynamic.LegacyAPIPathResolverFunc, rcMapper{}) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apps/appsutil/util.go b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/util.go new file mode 100644 index 000000000..7da474c59 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/util.go @@ -0,0 +1,629 @@ +package appsutil + +import ( + "context" + "fmt" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + intstrutil "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/watch" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + + appsv1 "github.com/openshift/api/apps/v1" + "github.com/openshift/library-go/pkg/apps/appsserialization" + "github.com/openshift/library-go/pkg/build/naming" +) + +// DeployerPodNameForDeployment returns the name of a pod for a given deployment +func DeployerPodNameForDeployment(deployment string) string { + return naming.GetPodName(deployment, "deploy") +} + +// WaitForRunningDeployerPod waits a given period of time until the deployer pod +// for given replication controller is not running. +func WaitForRunningDeployerPod(podClient corev1client.PodsGetter, rc *corev1.ReplicationController, timeout time.Duration) error { + podName := DeployerPodNameForDeployment(rc.Name) + canGetLogs := func(p *corev1.Pod) bool { + return corev1.PodSucceeded == p.Status.Phase || corev1.PodFailed == p.Status.Phase || corev1.PodRunning == p.Status.Phase + } + + fieldSelector := fields.OneTermEqualSelector("metadata.name", podName).String() + lw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = fieldSelector + return podClient.Pods(rc.Namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = fieldSelector + return podClient.Pods(rc.Namespace).Watch(options) + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + _, err := watchtools.UntilWithSync(ctx, lw, &corev1.Pod{}, nil, func(e watch.Event) (bool, error) { + switch e.Type { + case watch.Added, watch.Modified: + newPod, ok := e.Object.(*corev1.Pod) + if !ok { + return true, fmt.Errorf("unknown event object %#v", e.Object) + } + + return canGetLogs(newPod), nil + + case watch.Deleted: + return true, fmt.Errorf("pod got deleted %#v", e.Object) + + case watch.Error: + return true, fmt.Errorf("encountered error while watching for pod: %v", e.Object) + + default: + return true, fmt.Errorf("unexpected event type: %T", e.Type) + } + }) + return err +} + +func newControllerRef(config *appsv1.DeploymentConfig) *metav1.OwnerReference { + deploymentConfigControllerRefKind := appsv1.GroupVersion.WithKind("DeploymentConfig") + blockOwnerDeletion := true + isController := true + return &metav1.OwnerReference{ + APIVersion: deploymentConfigControllerRefKind.GroupVersion().String(), + Kind: deploymentConfigControllerRefKind.Kind, + Name: config.Name, + UID: config.UID, + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &isController, + } +} + +// MakeDeployment creates a deployment represented as a ReplicationController and based on the given DeploymentConfig. +// The controller replica count will be zero. +func MakeDeployment(config *appsv1.DeploymentConfig) (*v1.ReplicationController, error) { + // EncodeDeploymentConfig encodes config as a string using codec. + encodedConfig, err := appsserialization.EncodeDeploymentConfig(config) + if err != nil { + return nil, err + } + + deploymentName := LatestDeploymentNameForConfig(config) + podSpec := config.Spec.Template.Spec.DeepCopy() + + // Fix trailing and leading whitespace in the image field + // This is needed to sanitize old deployment configs where spaces were permitted but + // kubernetes 3.7 (#47491) tightened the validation of container image fields. + for i := range podSpec.Containers { + podSpec.Containers[i].Image = strings.TrimSpace(podSpec.Containers[i].Image) + } + + controllerLabels := make(labels.Set) + for k, v := range config.Labels { + controllerLabels[k] = v + } + // Correlate the deployment with the config. + // TODO: Using the annotation constant for now since the value is correct + // but we could consider adding a new constant to the public types. + controllerLabels[appsv1.DeploymentConfigAnnotation] = config.Name + + // Ensure that pods created by this deployment controller can be safely associated back + // to the controller, and that multiple deployment controllers for the same config don't + // manipulate each others' pods. + selector := map[string]string{} + for k, v := range config.Spec.Selector { + selector[k] = v + } + selector[DeploymentConfigLabel] = config.Name + selector[DeploymentLabel] = deploymentName + + podLabels := make(labels.Set) + for k, v := range config.Spec.Template.Labels { + podLabels[k] = v + } + podLabels[DeploymentConfigLabel] = config.Name + podLabels[DeploymentLabel] = deploymentName + + podAnnotations := make(labels.Set) + for k, v := range config.Spec.Template.Annotations { + podAnnotations[k] = v + } + podAnnotations[appsv1.DeploymentAnnotation] = deploymentName + podAnnotations[appsv1.DeploymentConfigAnnotation] = config.Name + podAnnotations[appsv1.DeploymentVersionAnnotation] = strconv.FormatInt(config.Status.LatestVersion, 10) + + controllerRef := newControllerRef(config) + zero := int32(0) + deployment := &v1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Name: deploymentName, + Namespace: config.Namespace, + Annotations: map[string]string{ + appsv1.DeploymentConfigAnnotation: config.Name, + appsv1.DeploymentEncodedConfigAnnotation: string(encodedConfig), + appsv1.DeploymentStatusAnnotation: string(appsv1.DeploymentStatusNew), + appsv1.DeploymentVersionAnnotation: strconv.FormatInt(config.Status.LatestVersion, 10), + // This is the target replica count for the new deployment. + appsv1.DesiredReplicasAnnotation: strconv.Itoa(int(config.Spec.Replicas)), + DeploymentReplicasAnnotation: strconv.Itoa(0), + }, + Labels: controllerLabels, + OwnerReferences: []metav1.OwnerReference{*controllerRef}, + }, + Spec: v1.ReplicationControllerSpec{ + // The deployment should be inactive initially + Replicas: &zero, + Selector: selector, + MinReadySeconds: config.Spec.MinReadySeconds, + Template: &v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabels, + Annotations: podAnnotations, + }, + Spec: *podSpec, + }, + }, + } + if config.Status.Details != nil && len(config.Status.Details.Message) > 0 { + deployment.Annotations[appsv1.DeploymentStatusReasonAnnotation] = config.Status.Details.Message + } + if value, ok := config.Annotations[DeploymentIgnorePodAnnotation]; ok { + deployment.Annotations[DeploymentIgnorePodAnnotation] = value + } + + return deployment, nil +} + +// SetDeploymentCondition updates the deployment to include the provided condition. If the condition that +// we are about to add already exists and has the same status and reason then we are not going to update. +func SetDeploymentCondition(status *appsv1.DeploymentConfigStatus, condition appsv1.DeploymentCondition) { + currentCond := GetDeploymentCondition(*status, condition.Type) + if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason { + return + } + // Preserve lastTransitionTime if we are not switching between statuses of a condition. + if currentCond != nil && currentCond.Status == condition.Status { + condition.LastTransitionTime = currentCond.LastTransitionTime + } + + newConditions := filterOutCondition(status.Conditions, condition.Type) + status.Conditions = append(newConditions, condition) +} + +// RemoveDeploymentCondition removes the deployment condition with the provided type. +func RemoveDeploymentCondition(status *appsv1.DeploymentConfigStatus, condType appsv1.DeploymentConditionType) { + status.Conditions = filterOutCondition(status.Conditions, condType) +} + +// filterOutCondition returns a new slice of deployment conditions without conditions with the provided type. +func filterOutCondition(conditions []appsv1.DeploymentCondition, condType appsv1.DeploymentConditionType) []appsv1.DeploymentCondition { + var newConditions []appsv1.DeploymentCondition + for _, c := range conditions { + if c.Type == condType { + continue + } + newConditions = append(newConditions, c) + } + return newConditions +} + +// IsOwnedByConfig checks whether the provided replication controller is part of a +// deployment configuration. +// TODO: Switch to use owner references once we got those working. +func IsOwnedByConfig(obj metav1.Object) bool { + _, ok := obj.GetAnnotations()[appsv1.DeploymentConfigAnnotation] + return ok +} + +// DeploymentsForCleanup determines which deployments for a configuration are relevant for the +// revision history limit quota +func DeploymentsForCleanup(configuration *appsv1.DeploymentConfig, deployments []*v1.ReplicationController) []v1.ReplicationController { + // if the past deployment quota has been exceeded, we need to prune the oldest deployments + // until we are not exceeding the quota any longer, so we sort oldest first + sort.Sort(sort.Reverse(ByLatestVersionDesc(deployments))) + + relevantDeployments := []v1.ReplicationController{} + activeDeployment := ActiveDeployment(deployments) + if activeDeployment == nil { + // if cleanup policy is set but no successful deployments have happened, there will be + // no active deployment. We can consider all of the deployments in this case except for + // the latest one + for i := range deployments { + deployment := deployments[i] + if deploymentVersionFor(deployment) != configuration.Status.LatestVersion { + relevantDeployments = append(relevantDeployments, *deployment) + } + } + } else { + // if there is an active deployment, we need to filter out any deployments that we don't + // care about, namely the active deployment and any newer deployments + for i := range deployments { + deployment := deployments[i] + if deployment != activeDeployment && deploymentVersionFor(deployment) < deploymentVersionFor(activeDeployment) { + relevantDeployments = append(relevantDeployments, *deployment) + } + } + } + + return relevantDeployments +} + +// LabelForDeployment builds a string identifier for a Deployment. +func LabelForDeployment(deployment *v1.ReplicationController) string { + return fmt.Sprintf("%s/%s", deployment.Namespace, deployment.Name) +} + +// LabelForDeploymentConfig builds a string identifier for a DeploymentConfig. +func LabelForDeploymentConfig(config runtime.Object) string { + accessor, _ := meta.Accessor(config) + return fmt.Sprintf("%s/%s", accessor.GetNamespace(), accessor.GetName()) +} + +// LatestDeploymentNameForConfig returns a stable identifier for deployment config +func LatestDeploymentNameForConfig(config *appsv1.DeploymentConfig) string { + return LatestDeploymentNameForConfigAndVersion(config.Name, config.Status.LatestVersion) +} + +// DeploymentNameForConfigVersion returns the name of the version-th deployment +// for the config that has the provided name +func DeploymentNameForConfigVersion(name string, version int64) string { + return fmt.Sprintf("%s-%d", name, version) +} + +// LatestDeploymentNameForConfigAndVersion returns a stable identifier for config based on its version. +func LatestDeploymentNameForConfigAndVersion(name string, version int64) string { + return fmt.Sprintf("%s-%d", name, version) +} + +func DeployerPodNameFor(obj runtime.Object) string { + return AnnotationFor(obj, appsv1.DeploymentPodAnnotation) +} + +func DeploymentConfigNameFor(obj runtime.Object) string { + return AnnotationFor(obj, appsv1.DeploymentConfigAnnotation) +} + +func DeploymentStatusReasonFor(obj runtime.Object) string { + return AnnotationFor(obj, appsv1.DeploymentStatusReasonAnnotation) +} + +func DeleteStatusReasons(rc *v1.ReplicationController) { + delete(rc.Annotations, appsv1.DeploymentStatusReasonAnnotation) + delete(rc.Annotations, appsv1.DeploymentCancelledAnnotation) +} + +func SetCancelledByUserReason(rc *v1.ReplicationController) { + rc.Annotations[appsv1.DeploymentCancelledAnnotation] = "true" + rc.Annotations[appsv1.DeploymentStatusReasonAnnotation] = deploymentCancelledByUser +} + +func SetCancelledByNewerDeployment(rc *v1.ReplicationController) { + rc.Annotations[appsv1.DeploymentCancelledAnnotation] = "true" + rc.Annotations[appsv1.DeploymentStatusReasonAnnotation] = deploymentCancelledNewerDeploymentExists +} + +// HasSynced checks if the provided deployment config has been noticed by the deployment +// config controller. +func HasSynced(dc *appsv1.DeploymentConfig, generation int64) bool { + return dc.Status.ObservedGeneration >= generation +} + +// HasChangeTrigger returns whether the provided deployment configuration has +// a config change trigger or not +func HasChangeTrigger(config *appsv1.DeploymentConfig) bool { + for _, trigger := range config.Spec.Triggers { + if trigger.Type == appsv1.DeploymentTriggerOnConfigChange { + return true + } + } + return false +} + +// HasTrigger returns whether the provided deployment configuration has any trigger +// defined or not. +func HasTrigger(config *appsv1.DeploymentConfig) bool { + return HasChangeTrigger(config) || HasImageChangeTrigger(config) +} + +// HasLastTriggeredImage returns whether all image change triggers in provided deployment +// configuration has the lastTriggerImage field set (iow. all images were updated for +// them). Returns false if deployment configuration has no image change trigger defined. +func HasLastTriggeredImage(config *appsv1.DeploymentConfig) bool { + hasImageTrigger := false + for _, trigger := range config.Spec.Triggers { + if trigger.Type == appsv1.DeploymentTriggerOnImageChange { + hasImageTrigger = true + if len(trigger.ImageChangeParams.LastTriggeredImage) == 0 { + return false + } + } + } + return hasImageTrigger +} + +// IsInitialDeployment returns whether the deployment configuration is the first version +// of this configuration. +func IsInitialDeployment(config *appsv1.DeploymentConfig) bool { + return config.Status.LatestVersion == 0 +} + +// IsRollingConfig returns true if the strategy type is a rolling update. +func IsRollingConfig(config *appsv1.DeploymentConfig) bool { + return config.Spec.Strategy.Type == appsv1.DeploymentStrategyTypeRolling +} + +// ResolveFenceposts is copy from k8s deployment_utils to avoid unnecessary imports +func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { + surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, int(desired), true) + if err != nil { + return 0, 0, err + } + unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, int(desired), false) + if err != nil { + return 0, 0, err + } + + if surge == 0 && unavailable == 0 { + // Validation should never allow the user to explicitly use zero values for both maxSurge + // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. + // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the + // theory that surge might not work due to quota. + unavailable = 1 + } + + return int32(surge), int32(unavailable), nil +} + +// MaxUnavailable returns the maximum unavailable pods a rolling deployment config can take. +func MaxUnavailable(config *appsv1.DeploymentConfig) int32 { + if !IsRollingConfig(config) { + return int32(0) + } + // Error caught by validation + _, maxUnavailable, _ := ResolveFenceposts(config.Spec.Strategy.RollingParams.MaxSurge, config.Spec.Strategy.RollingParams.MaxUnavailable, config.Spec.Replicas) + return maxUnavailable +} + +// MaxSurge returns the maximum surge pods a rolling deployment config can take. +func MaxSurge(config appsv1.DeploymentConfig) int32 { + if !IsRollingConfig(&config) { + return int32(0) + } + // Error caught by validation + maxSurge, _, _ := ResolveFenceposts(config.Spec.Strategy.RollingParams.MaxSurge, config.Spec.Strategy.RollingParams.MaxUnavailable, config.Spec.Replicas) + return maxSurge +} + +// AnnotationFor returns the annotation with key for obj. +func AnnotationFor(obj runtime.Object, key string) string { + objectMeta, err := meta.Accessor(obj) + if err != nil { + return "" + } + if objectMeta == nil || reflect.ValueOf(objectMeta).IsNil() { + return "" + } + return objectMeta.GetAnnotations()[key] +} + +// ActiveDeployment returns the latest complete deployment, or nil if there is +// no such deployment. The active deployment is not always the same as the +// latest deployment. +func ActiveDeployment(input []*v1.ReplicationController) *v1.ReplicationController { + var activeDeployment *v1.ReplicationController + var lastCompleteDeploymentVersion int64 = 0 + for i := range input { + deployment := input[i] + deploymentVersion := DeploymentVersionFor(deployment) + if IsCompleteDeployment(deployment) && deploymentVersion > lastCompleteDeploymentVersion { + activeDeployment = deployment + lastCompleteDeploymentVersion = deploymentVersion + } + } + return activeDeployment +} + +// ConfigSelector returns a label Selector which can be used to find all +// deployments for a DeploymentConfig. +// +// TODO: Using the annotation constant for now since the value is correct +// but we could consider adding a new constant to the public types. +func ConfigSelector(name string) labels.Selector { + return labels.SelectorFromValidatedSet(labels.Set{appsv1.DeploymentConfigAnnotation: name}) +} + +// IsCompleteDeployment returns true if the passed deployment is in state complete. +func IsCompleteDeployment(deployment runtime.Object) bool { + return DeploymentStatusFor(deployment) == appsv1.DeploymentStatusComplete +} + +// IsFailedDeployment returns true if the passed deployment failed. +func IsFailedDeployment(deployment runtime.Object) bool { + return DeploymentStatusFor(deployment) == appsv1.DeploymentStatusFailed +} + +// IsTerminatedDeployment returns true if the passed deployment has terminated (either +// complete or failed). +func IsTerminatedDeployment(deployment runtime.Object) bool { + return IsCompleteDeployment(deployment) || IsFailedDeployment(deployment) +} + +func IsDeploymentCancelled(deployment runtime.Object) bool { + value := AnnotationFor(deployment, appsv1.DeploymentCancelledAnnotation) + return strings.EqualFold(value, "true") +} + +// DeployerPodSelector returns a label Selector which can be used to find all +// deployer pods associated with a deployment with name. +func DeployerPodSelector(name string) labels.Selector { + return labels.SelectorFromValidatedSet(labels.Set{appsv1.DeployerPodForDeploymentLabel: name}) +} + +func DeploymentStatusFor(deployment runtime.Object) appsv1.DeploymentStatus { + return appsv1.DeploymentStatus(AnnotationFor(deployment, appsv1.DeploymentStatusAnnotation)) +} + +func SetDeploymentLatestVersionAnnotation(rc *v1.ReplicationController, version string) { + if rc.Annotations == nil { + rc.Annotations = map[string]string{} + } + rc.Annotations[appsv1.DeploymentVersionAnnotation] = version +} + +func DeploymentVersionFor(obj runtime.Object) int64 { + v, err := strconv.ParseInt(AnnotationFor(obj, appsv1.DeploymentVersionAnnotation), 10, 64) + if err != nil { + return -1 + } + return v +} + +func DeploymentNameFor(obj runtime.Object) string { + return AnnotationFor(obj, appsv1.DeploymentAnnotation) +} + +func deploymentVersionFor(obj runtime.Object) int64 { + v, err := strconv.ParseInt(AnnotationFor(obj, appsv1.DeploymentVersionAnnotation), 10, 64) + if err != nil { + return -1 + } + return v +} + +// LatestDeploymentInfo returns info about the latest deployment for a config, +// or nil if there is no latest deployment. The latest deployment is not +// always the same as the active deployment. +func LatestDeploymentInfo(config *appsv1.DeploymentConfig, deployments []*v1.ReplicationController) (bool, *v1.ReplicationController) { + if config.Status.LatestVersion == 0 || len(deployments) == 0 { + return false, nil + } + sort.Sort(ByLatestVersionDesc(deployments)) + candidate := deployments[0] + return deploymentVersionFor(candidate) == config.Status.LatestVersion, candidate +} + +// GetDeploymentCondition returns the condition with the provided type. +func GetDeploymentCondition(status appsv1.DeploymentConfigStatus, condType appsv1.DeploymentConditionType) *appsv1.DeploymentCondition { + for i := range status.Conditions { + c := status.Conditions[i] + if c.Type == condType { + return &c + } + } + return nil +} + +// GetReplicaCountForDeployments returns the sum of all replicas for the +// given deployments. +func GetReplicaCountForDeployments(deployments []*v1.ReplicationController) int32 { + totalReplicaCount := int32(0) + for _, deployment := range deployments { + count := deployment.Spec.Replicas + if count == nil { + continue + } + totalReplicaCount += *count + } + return totalReplicaCount +} + +// GetStatusReplicaCountForDeployments returns the sum of the replicas reported in the +// status of the given deployments. +func GetStatusReplicaCountForDeployments(deployments []*v1.ReplicationController) int32 { + totalReplicaCount := int32(0) + for _, deployment := range deployments { + totalReplicaCount += deployment.Status.Replicas + } + return totalReplicaCount +} + +// GetReadyReplicaCountForReplicationControllers returns the number of ready pods corresponding to +// the given replication controller. +func GetReadyReplicaCountForReplicationControllers(replicationControllers []*v1.ReplicationController) int32 { + totalReadyReplicas := int32(0) + for _, rc := range replicationControllers { + if rc != nil { + totalReadyReplicas += rc.Status.ReadyReplicas + } + } + return totalReadyReplicas +} + +// GetAvailableReplicaCountForReplicationControllers returns the number of available pods corresponding to +// the given replication controller. +func GetAvailableReplicaCountForReplicationControllers(replicationControllers []*v1.ReplicationController) int32 { + totalAvailableReplicas := int32(0) + for _, rc := range replicationControllers { + if rc != nil { + totalAvailableReplicas += rc.Status.AvailableReplicas + } + } + return totalAvailableReplicas +} + +// HasImageChangeTrigger returns whether the provided deployment configuration has +// an image change trigger or not. +func HasImageChangeTrigger(config *appsv1.DeploymentConfig) bool { + for _, trigger := range config.Spec.Triggers { + if trigger.Type == appsv1.DeploymentTriggerOnImageChange { + return true + } + } + return false +} + +// CanTransitionPhase returns whether it is allowed to go from the current to the next phase. +func CanTransitionPhase(current, next appsv1.DeploymentStatus) bool { + switch current { + case appsv1.DeploymentStatusNew: + switch next { + case appsv1.DeploymentStatusPending, + appsv1.DeploymentStatusRunning, + appsv1.DeploymentStatusFailed, + appsv1.DeploymentStatusComplete: + return true + } + case appsv1.DeploymentStatusPending: + switch next { + case appsv1.DeploymentStatusRunning, + appsv1.DeploymentStatusFailed, + appsv1.DeploymentStatusComplete: + return true + } + case appsv1.DeploymentStatusRunning: + switch next { + case appsv1.DeploymentStatusFailed, appsv1.DeploymentStatusComplete: + return true + } + } + return false +} + +type ByLatestVersionAsc []*v1.ReplicationController + +func (d ByLatestVersionAsc) Len() int { return len(d) } +func (d ByLatestVersionAsc) Swap(i, j int) { d[i], d[j] = d[j], d[i] } +func (d ByLatestVersionAsc) Less(i, j int) bool { + return DeploymentVersionFor(d[i]) < DeploymentVersionFor(d[j]) +} + +// ByLatestVersionDesc sorts deployments by LatestVersion descending. +type ByLatestVersionDesc []*v1.ReplicationController + +func (d ByLatestVersionDesc) Len() int { return len(d) } +func (d ByLatestVersionDesc) Swap(i, j int) { d[i], d[j] = d[j], d[i] } +func (d ByLatestVersionDesc) Less(i, j int) bool { + return DeploymentVersionFor(d[j]) < DeploymentVersionFor(d[i]) +} diff --git a/vendor/github.com/openshift/library-go/pkg/apps/appsutil/util_test.go b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/util_test.go new file mode 100644 index 000000000..f4298e1c7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/apps/appsutil/util_test.go @@ -0,0 +1,425 @@ +package appsutil + +import ( + "reflect" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + appsv1 "github.com/openshift/api/apps/v1" +) + +func TestPodName(t *testing.T) { + deployment := &corev1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testName", + }, + } + expected := "testName-deploy" + actual := DeployerPodNameForDeployment(deployment.Name) + if expected != actual { + t.Errorf("Unexpected pod name for deployment. Expected: %s Got: %s", expected, actual) + } +} + +func TestCanTransitionPhase(t *testing.T) { + tests := []struct { + name string + current, next appsv1.DeploymentStatus + expected bool + }{ + { + name: "New->New", + current: appsv1.DeploymentStatusNew, + next: appsv1.DeploymentStatusNew, + expected: false, + }, + { + name: "New->Pending", + current: appsv1.DeploymentStatusNew, + next: appsv1.DeploymentStatusPending, + expected: true, + }, + { + name: "New->Running", + current: appsv1.DeploymentStatusNew, + next: appsv1.DeploymentStatusRunning, + expected: true, + }, + { + name: "New->Complete", + current: appsv1.DeploymentStatusNew, + next: appsv1.DeploymentStatusComplete, + expected: true, + }, + { + name: "New->Failed", + current: appsv1.DeploymentStatusNew, + next: appsv1.DeploymentStatusFailed, + expected: true, + }, + { + name: "Pending->New", + current: appsv1.DeploymentStatusPending, + next: appsv1.DeploymentStatusNew, + expected: false, + }, + { + name: "Pending->Pending", + current: appsv1.DeploymentStatusPending, + next: appsv1.DeploymentStatusPending, + expected: false, + }, + { + name: "Pending->Running", + current: appsv1.DeploymentStatusPending, + next: appsv1.DeploymentStatusRunning, + expected: true, + }, + { + name: "Pending->Failed", + current: appsv1.DeploymentStatusPending, + next: appsv1.DeploymentStatusFailed, + expected: true, + }, + { + name: "Pending->Complete", + current: appsv1.DeploymentStatusPending, + next: appsv1.DeploymentStatusComplete, + expected: true, + }, + { + name: "Running->New", + current: appsv1.DeploymentStatusRunning, + next: appsv1.DeploymentStatusNew, + expected: false, + }, + { + name: "Running->Pending", + current: appsv1.DeploymentStatusRunning, + next: appsv1.DeploymentStatusPending, + expected: false, + }, + { + name: "Running->Running", + current: appsv1.DeploymentStatusRunning, + next: appsv1.DeploymentStatusRunning, + expected: false, + }, + { + name: "Running->Failed", + current: appsv1.DeploymentStatusRunning, + next: appsv1.DeploymentStatusFailed, + expected: true, + }, + { + name: "Running->Complete", + current: appsv1.DeploymentStatusRunning, + next: appsv1.DeploymentStatusComplete, + expected: true, + }, + { + name: "Complete->New", + current: appsv1.DeploymentStatusComplete, + next: appsv1.DeploymentStatusNew, + expected: false, + }, + { + name: "Complete->Pending", + current: appsv1.DeploymentStatusComplete, + next: appsv1.DeploymentStatusPending, + expected: false, + }, + { + name: "Complete->Running", + current: appsv1.DeploymentStatusComplete, + next: appsv1.DeploymentStatusRunning, + expected: false, + }, + { + name: "Complete->Failed", + current: appsv1.DeploymentStatusComplete, + next: appsv1.DeploymentStatusFailed, + expected: false, + }, + { + name: "Complete->Complete", + current: appsv1.DeploymentStatusComplete, + next: appsv1.DeploymentStatusComplete, + expected: false, + }, + { + name: "Failed->New", + current: appsv1.DeploymentStatusFailed, + next: appsv1.DeploymentStatusNew, + expected: false, + }, + { + name: "Failed->Pending", + current: appsv1.DeploymentStatusFailed, + next: appsv1.DeploymentStatusPending, + expected: false, + }, + { + name: "Failed->Running", + current: appsv1.DeploymentStatusFailed, + next: appsv1.DeploymentStatusRunning, + expected: false, + }, + { + name: "Failed->Complete", + current: appsv1.DeploymentStatusFailed, + next: appsv1.DeploymentStatusComplete, + expected: false, + }, + { + name: "Failed->Failed", + current: appsv1.DeploymentStatusFailed, + next: appsv1.DeploymentStatusFailed, + expected: false, + }, + } + + for _, test := range tests { + got := CanTransitionPhase(test.current, test.next) + if got != test.expected { + t.Errorf("%s: expected %t, got %t", test.name, test.expected, got) + } + } +} + +var ( + now = metav1.Now() + later = metav1.Time{Time: now.Add(time.Minute)} + earlier = metav1.Time{Time: now.Add(-time.Minute)} + + condProgressing = func() appsv1.DeploymentCondition { + return appsv1.DeploymentCondition{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + LastTransitionTime: now, + } + } + + condProgressingDifferentTime = func() appsv1.DeploymentCondition { + return appsv1.DeploymentCondition{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + LastTransitionTime: later, + } + } + + condProgressingDifferentReason = func() appsv1.DeploymentCondition { + return appsv1.DeploymentCondition{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + LastTransitionTime: later, + Reason: NewReplicationControllerReason, + } + } + + condNotProgressing = func() appsv1.DeploymentCondition { + return appsv1.DeploymentCondition{ + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionFalse, + LastUpdateTime: earlier, + LastTransitionTime: earlier, + } + } + + condAvailable = func() appsv1.DeploymentCondition { + return appsv1.DeploymentCondition{ + Type: appsv1.DeploymentAvailable, + Status: corev1.ConditionTrue, + } + } +) + +func TestGetCondition(t *testing.T) { + exampleStatus := func() appsv1.DeploymentConfigStatus { + return appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{condProgressing(), condAvailable()}, + } + } + + tests := []struct { + name string + + status appsv1.DeploymentConfigStatus + condType appsv1.DeploymentConditionType + condStatus corev1.ConditionStatus + + expected bool + }{ + { + name: "condition exists", + + status: exampleStatus(), + condType: appsv1.DeploymentAvailable, + + expected: true, + }, + { + name: "condition does not exist", + + status: exampleStatus(), + condType: appsv1.DeploymentReplicaFailure, + + expected: false, + }, + } + + for _, test := range tests { + cond := GetDeploymentCondition(test.status, test.condType) + exists := cond != nil + if exists != test.expected { + t.Errorf("%s: expected condition to exist: %t, got: %t", test.name, test.expected, exists) + } + } +} + +func TestSetCondition(t *testing.T) { + tests := []struct { + name string + + status *appsv1.DeploymentConfigStatus + cond appsv1.DeploymentCondition + + expectedStatus *appsv1.DeploymentConfigStatus + }{ + { + name: "set for the first time", + + status: &appsv1.DeploymentConfigStatus{}, + cond: condAvailable(), + + expectedStatus: &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{ + condAvailable(), + }, + }, + }, + { + name: "simple set", + + status: &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{ + condProgressing(), + }, + }, + cond: condAvailable(), + + expectedStatus: &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{ + condProgressing(), condAvailable(), + }, + }, + }, + { + name: "replace if status changes", + + status: &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{ + condNotProgressing(), + }, + }, + cond: condProgressing(), + + expectedStatus: &appsv1.DeploymentConfigStatus{Conditions: []appsv1.DeploymentCondition{condProgressing()}}, + }, + { + name: "replace if reason changes", + + status: &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{ + condProgressing(), + }, + }, + cond: condProgressingDifferentReason(), + + expectedStatus: &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{ + { + Type: appsv1.DeploymentProgressing, + Status: corev1.ConditionTrue, + // Note that LastTransitionTime stays the same. + LastTransitionTime: now, + // Only the reason changes. + Reason: NewReplicationControllerReason, + }, + }, + }, + }, + { + name: "don't replace if status and reason don't change", + + status: &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{ + condProgressing(), + }, + }, + cond: condProgressingDifferentTime(), + + expectedStatus: &appsv1.DeploymentConfigStatus{Conditions: []appsv1.DeploymentCondition{condProgressing()}}, + }, + } + + for _, test := range tests { + t.Logf("running test %q", test.name) + SetDeploymentCondition(test.status, test.cond) + if !reflect.DeepEqual(test.status, test.expectedStatus) { + t.Errorf("expected status: %v, got: %v", test.expectedStatus, test.status) + } + } +} + +func TestRemoveCondition(t *testing.T) { + exampleStatus := func() *appsv1.DeploymentConfigStatus { + return &appsv1.DeploymentConfigStatus{ + Conditions: []appsv1.DeploymentCondition{condProgressing(), condAvailable()}, + } + } + + tests := []struct { + name string + + status *appsv1.DeploymentConfigStatus + condType appsv1.DeploymentConditionType + + expectedStatus *appsv1.DeploymentConfigStatus + }{ + { + name: "remove from empty status", + + status: &appsv1.DeploymentConfigStatus{}, + condType: appsv1.DeploymentProgressing, + + expectedStatus: &appsv1.DeploymentConfigStatus{}, + }, + { + name: "simple remove", + + status: &appsv1.DeploymentConfigStatus{Conditions: []appsv1.DeploymentCondition{condProgressing()}}, + condType: appsv1.DeploymentProgressing, + + expectedStatus: &appsv1.DeploymentConfigStatus{}, + }, + { + name: "doesn't remove anything", + + status: exampleStatus(), + condType: appsv1.DeploymentReplicaFailure, + + expectedStatus: exampleStatus(), + }, + } + + for _, test := range tests { + RemoveDeploymentCondition(test.status, test.condType) + if !reflect.DeepEqual(test.status, test.expectedStatus) { + t.Errorf("%s: expected status: %v, got: %v", test.name, test.expectedStatus, test.status) + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/create_test.go b/vendor/github.com/openshift/library-go/pkg/assets/create/create_test.go index e9ce7e964..dad537533 100644 --- a/vendor/github.com/openshift/library-go/pkg/assets/create/create_test.go +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/create_test.go @@ -149,6 +149,25 @@ func TestCreate(t *testing.T) { testConfigMap.SetName("aggregator-client-ca") testConfigMap.SetNamespace("openshift-kube-apiserver") + testOperatorConfig := &unstructured.Unstructured{} + testOperatorConfig.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "kubeapiserver.operator.openshift.io", + Version: "v1alpha1", + Kind: "KubeAPIServerOperatorConfig", + }) + testOperatorConfig.SetName("instance") + + testOperatorConfigWithStatus := &unstructured.Unstructured{} + testOperatorConfigWithStatus.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "kubeapiserver.operator.openshift.io", + Version: "v1alpha1", + Kind: "KubeAPIServerOperatorConfig", + }) + testOperatorConfigWithStatus.SetName("instance") + testOperatorConfigStatusVal := make(map[string]interface{}) + testOperatorConfigStatusVal["initializedValue"] = "something before" + unstructured.SetNestedField(testOperatorConfigWithStatus.Object, testOperatorConfigStatusVal, "status") + tests := []struct { name string discovery []*restmapper.APIGroupResources @@ -165,7 +184,7 @@ func TestCreate(t *testing.T) { { name: "fail to create kube apiserver operator config", discovery: resourcesWithoutKubeAPIServer, - expectFailedCount: 1, + expectFailedCount: 2, expectError: true, expectReload: true, }, @@ -174,6 +193,38 @@ func TestCreate(t *testing.T) { discovery: resources, existingObjects: []runtime.Object{testConfigMap}, }, + { + name: "create all resources", + discovery: resources, + existingObjects: []runtime.Object{testOperatorConfig}, + evalActions: func(t *testing.T, actions []ktesting.Action) { + if got, exp := len(actions), 8; got != exp { + t.Errorf("expected %d actions, found %d", exp, got) + return + } + + ups, ok := actions[6].(ktesting.UpdateAction) + if !ok { + t.Errorf("expecting Update action for actions[5], got %T", actions[5]) + return + } + if got, exp := ups.GetSubresource(), "status"; got != exp { + t.Errorf("ecpecting the subresource to be %q, got %q", exp, got) + return + } + }, + }, + { + name: "create all resources", + discovery: resources, + existingObjects: []runtime.Object{testOperatorConfigWithStatus}, + evalActions: func(t *testing.T, actions []ktesting.Action) { + if got, exp := len(actions), 7; got != exp { + t.Errorf("expected %d actions, found %d", exp, got) + return + } + }, + }, } fakeScheme := runtime.NewScheme() @@ -230,7 +281,7 @@ func TestLoad(t *testing.T) { { name: "read all manifests", assetDir: "testdata", - expectedManifestCount: 5, + expectedManifestCount: 6, }, { name: "handle missing dir", diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go b/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go index cec47ed26..cd5f6a749 100644 --- a/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go @@ -163,36 +163,61 @@ func create(ctx context.Context, manifests map[string]*unstructured.Unstructured continue } + var resource dynamic.ResourceInterface if mappings.Scope.Name() == meta.RESTScopeNameRoot { - _, err = client.Resource(mappings.Resource).Create(manifests[path], metav1.CreateOptions{}) + resource = client.Resource(mappings.Resource) } else { - _, err = client.Resource(mappings.Resource).Namespace(manifests[path].GetNamespace()).Create(manifests[path], metav1.CreateOptions{}) + resource = client.Resource(mappings.Resource).Namespace(manifests[path].GetNamespace()) } - resourceString := mappings.Resource.Resource + "." + mappings.Resource.Version + "." + mappings.Resource.Group + "/" + manifests[path].GetName() + " -n " + manifests[path].GetNamespace() + incluster, err := resource.Create(manifests[path], metav1.CreateOptions{}) + + if err == nil && options.Verbose { + fmt.Fprintf(options.StdErr, "Created %q %s\n", path, resourceString) + } + // Resource already exists means we already succeeded // This should never happen as we remove already created items from the manifest list, unless the resource existed beforehand. if kerrors.IsAlreadyExists(err) { if options.Verbose { fmt.Fprintf(options.StdErr, "Skipped %q %s as it already exists\n", path, resourceString) } - delete(manifests, path) - continue + incluster, err = resource.Get(manifests[path].GetName(), metav1.GetOptions{}) + if err != nil { + if options.Verbose { + fmt.Fprintf(options.StdErr, "Failed to get already existing %q %s: %v\n", path, resourceString, err) + } + errs[path] = fmt.Errorf("failed to get %s: %v", resourceString, err) + continue + } } if err != nil { if options.Verbose { fmt.Fprintf(options.StdErr, "Failed to create %q %s: %v\n", path, resourceString, err) } - errs[path] = fmt.Errorf("failed to create: %v", err) + errs[path] = fmt.Errorf("failed to create %s: %v", resourceString, err) continue } - if options.Verbose { - fmt.Fprintf(options.StdErr, "Created %q %s\n", path, resourceString) + if _, ok := manifests[path].Object["status"]; ok { + _, found := incluster.Object["status"] + if !found { + incluster.Object["status"] = manifests[path].Object["status"] + incluster, err = resource.UpdateStatus(incluster, metav1.UpdateOptions{}) + if err != nil && !kerrors.IsNotFound(err) { + if options.Verbose { + fmt.Fprintf(options.StdErr, "Failed to update status for the %q %s: %v\n", path, resourceString, err) + } + errs[path] = fmt.Errorf("failed to update status for %s: %v", resourceString, err) + continue + } + if err == nil && options.Verbose { + fmt.Fprintf(options.StdErr, "Updated status for %q %s\n", path, resourceString) + } + } } - // Creation succeeded lets remove the manifest from the list to avoid creating it second time delete(manifests, path) } diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config-empty-status.yaml b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config-empty-status.yaml new file mode 100644 index 000000000..81133ceaa --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config-empty-status.yaml @@ -0,0 +1,7 @@ +apiVersion: kubeapiserver.operator.openshift.io/v1alpha1 +kind: KubeAPIServerOperatorConfig +metadata: + name: instance-empty-status +spec: + managementState: Managed +status: diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config.yaml b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config.yaml index fafd307b3..a946007c1 100644 --- a/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config.yaml +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/testdata/operator-config.yaml @@ -4,3 +4,5 @@ metadata: name: instance spec: managementState: Managed +status: + initializedValue: something diff --git a/vendor/github.com/openshift/library-go/pkg/authentication/bootstrapauthenticator/bootstrap.go b/vendor/github.com/openshift/library-go/pkg/authentication/bootstrapauthenticator/bootstrap.go new file mode 100644 index 000000000..a62c3a3ad --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authentication/bootstrapauthenticator/bootstrap.go @@ -0,0 +1,153 @@ +package bootstrapauthenticator + +import ( + "context" + "crypto/sha512" + "encoding/base64" + "fmt" + "time" + + "golang.org/x/crypto/bcrypt" + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/authenticator" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/kubernetes/typed/core/v1" +) + +const ( + // BootstrapUser is the magic bootstrap OAuth user that can perform any action + BootstrapUser = "kube:admin" + // support basic auth which does not allow : in username + bootstrapUserBasicAuth = "kubeadmin" + // force the use of a secure password length + // expected format is 5char-5char-5char-5char + minPasswordLen = 23 +) + +var ( + // make it obvious that we refuse to honor short passwords + errPasswordTooShort = fmt.Errorf("%s password must be at least %d characters long", bootstrapUserBasicAuth, minPasswordLen) + + // we refuse to honor a secret that is too new when compared to kube-system + // since kube-system always exists and cannot be deleted + // and creation timestamp is controlled by the api, we can use this to + // detect if the secret was recreated after the initial bootstrapping + errSecretRecreated = fmt.Errorf("%s secret cannot be recreated", bootstrapUserBasicAuth) +) + +func New(getter BootstrapUserDataGetter) authenticator.Password { + return &bootstrapPassword{ + getter: getter, + names: sets.NewString(BootstrapUser, bootstrapUserBasicAuth), + } +} + +type bootstrapPassword struct { + getter BootstrapUserDataGetter + names sets.String +} + +func (b *bootstrapPassword) AuthenticatePassword(ctx context.Context, username, password string) (*authenticator.Response, bool, error) { + if !b.names.Has(username) { + return nil, false, nil + } + + data, ok, err := b.getter.Get() + if err != nil || !ok { + return nil, ok, err + } + + // check length after we know that the secret is functional since + // we do not want to complain when the bootstrap user is disabled + if len(password) < minPasswordLen { + return nil, false, errPasswordTooShort + } + + if err := bcrypt.CompareHashAndPassword(data.PasswordHash, []byte(password)); err != nil { + if err == bcrypt.ErrMismatchedHashAndPassword { + klog.V(4).Infof("%s password mismatch", bootstrapUserBasicAuth) + return nil, false, nil + } + return nil, false, err + } + + // do not set other fields, see identitymapper.userToInfo func + return &authenticator.Response{ + User: &user.DefaultInfo{ + Name: BootstrapUser, + UID: data.UID, // uid ties this authentication to the current state of the secret + }, + }, true, nil +} + +type BootstrapUserData struct { + PasswordHash []byte + UID string +} + +type BootstrapUserDataGetter interface { + Get() (data *BootstrapUserData, ok bool, err error) + // TODO add a method like: + // IsPermanentlyDisabled() bool + // and use it to gate the wiring of components related to the bootstrap user. + // when the oauth server is running embedded in the kube api server, this method would always + // return false because the control plane would not be functional at the time of the check. + // when running as an external process, we can assume a functional control plane to perform the check. +} + +func NewBootstrapUserDataGetter(secrets v1.SecretsGetter, namespaces v1.NamespacesGetter) BootstrapUserDataGetter { + return &bootstrapUserDataGetter{ + secrets: secrets.Secrets(metav1.NamespaceSystem), + namespaces: namespaces.Namespaces(), + } +} + +type bootstrapUserDataGetter struct { + secrets v1.SecretInterface + namespaces v1.NamespaceInterface +} + +func (b *bootstrapUserDataGetter) Get() (*BootstrapUserData, bool, error) { + secret, err := b.secrets.Get(bootstrapUserBasicAuth, metav1.GetOptions{}) + if errors.IsNotFound(err) { + klog.V(4).Infof("%s secret does not exist", bootstrapUserBasicAuth) + return nil, false, nil + } + if err != nil { + return nil, false, err + } + if secret.DeletionTimestamp != nil { + klog.V(4).Infof("%s secret is being deleted", bootstrapUserBasicAuth) + return nil, false, nil + } + namespace, err := b.namespaces.Get(metav1.NamespaceSystem, metav1.GetOptions{}) + if err != nil { + return nil, false, err + } + if secret.CreationTimestamp.After(namespace.CreationTimestamp.Add(time.Hour)) { + return nil, false, errSecretRecreated + } + + hashedPassword := secret.Data[bootstrapUserBasicAuth] + + // make sure the value is a valid bcrypt hash + if _, err := bcrypt.Cost(hashedPassword); err != nil { + return nil, false, err + } + + exactSecret := string(secret.UID) + secret.ResourceVersion + both := append([]byte(exactSecret), hashedPassword...) + + // use a hash to avoid leaking any derivative of the password + // this makes it easy for us to tell if the secret changed + uidBytes := sha512.Sum512(both) + + return &BootstrapUserData{ + PasswordHash: hashedPassword, + UID: base64.RawURLEncoding.EncodeToString(uidBytes[:]), + }, true, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/subject.go b/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/subject.go new file mode 100644 index 000000000..74c179e68 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/subject.go @@ -0,0 +1,56 @@ +package authorizationutil + +import ( + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apiserver/pkg/authentication/serviceaccount" +) + +func BuildRBACSubjects(users, groups []string) []rbacv1.Subject { + subjects := []rbacv1.Subject{} + + for _, user := range users { + saNamespace, saName, err := serviceaccount.SplitUsername(user) + if err == nil { + subjects = append(subjects, rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: saNamespace, Name: saName}) + } else { + subjects = append(subjects, rbacv1.Subject{Kind: rbacv1.UserKind, APIGroup: rbacv1.GroupName, Name: user}) + } + } + + for _, group := range groups { + subjects = append(subjects, rbacv1.Subject{Kind: rbacv1.GroupKind, APIGroup: rbacv1.GroupName, Name: group}) + } + + return subjects +} + +func RBACSubjectsToUsersAndGroups(subjects []rbacv1.Subject, defaultNamespace string) (users []string, groups []string) { + for _, subject := range subjects { + + switch { + case subject.APIGroup == rbacv1.GroupName && subject.Kind == rbacv1.GroupKind: + groups = append(groups, subject.Name) + case subject.APIGroup == rbacv1.GroupName && subject.Kind == rbacv1.UserKind: + users = append(users, subject.Name) + case subject.APIGroup == "" && subject.Kind == rbacv1.ServiceAccountKind: + // default the namespace to namespace we're working in if + // it's available. This allows rolebindings that reference + // SAs in the local namespace to avoid having to qualify + // them. + ns := defaultNamespace + if len(subject.Namespace) > 0 { + ns = subject.Namespace + } + if len(ns) > 0 { + name := serviceaccount.MakeUsername(ns, subject.Name) + users = append(users, name) + } else { + // maybe error? this fails safe at any rate + } + default: + // maybe error? This fails safe at any rate + } + } + + return users, groups +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/util.go b/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/util.go new file mode 100644 index 000000000..0953c41e0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/authorizationutil/util.go @@ -0,0 +1,48 @@ +package authorizationutil + +import ( + "errors" + + authorizationv1 "k8s.io/api/authorization/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apiserver/pkg/authentication/user" + authorizationclient "k8s.io/client-go/kubernetes/typed/authorization/v1" +) + +// AddUserToSAR adds the requisite user information to a SubjectAccessReview. +// It returns the modified SubjectAccessReview. +func AddUserToSAR(user user.Info, sar *authorizationv1.SubjectAccessReview) *authorizationv1.SubjectAccessReview { + sar.Spec.User = user.GetName() + // reminiscent of the bad old days of C. Copies copy the min number of elements of both source and dest + sar.Spec.Groups = make([]string, len(user.GetGroups())) + copy(sar.Spec.Groups, user.GetGroups()) + sar.Spec.Extra = map[string]authorizationv1.ExtraValue{} + + for k, v := range user.GetExtra() { + sar.Spec.Extra[k] = authorizationv1.ExtraValue(v) + } + + return sar +} + +// Authorize verifies that a given user is permitted to carry out a given +// action. If this cannot be determined, or if the user is not permitted, an +// error is returned. +func Authorize(sarClient authorizationclient.SubjectAccessReviewInterface, user user.Info, resourceAttributes *authorizationv1.ResourceAttributes) error { + sar := AddUserToSAR(user, &authorizationv1.SubjectAccessReview{ + Spec: authorizationv1.SubjectAccessReviewSpec{ + ResourceAttributes: resourceAttributes, + }, + }) + + resp, err := sarClient.Create(sar) + if err == nil && resp != nil && resp.Status.Allowed { + return nil + } + + if err == nil { + err = errors.New(resp.Status.Reason) + } + return kerrors.NewForbidden(schema.GroupResource{Group: resourceAttributes.Group, Resource: resourceAttributes.Resource}, resourceAttributes.Name, err) +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/clusterrole_describers.go b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/clusterrole_describers.go new file mode 100644 index 000000000..e9b7518f3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/clusterrole_describers.go @@ -0,0 +1,86 @@ +package scopemetadata + +import ( + "fmt" + "strings" +) + +// role:: +type ClusterRoleEvaluator struct{} + +var clusterRoleEvaluatorInstance = ClusterRoleEvaluator{} + +func (ClusterRoleEvaluator) Handles(scope string) bool { + return ClusterRoleEvaluatorHandles(scope) +} + +func (e ClusterRoleEvaluator) Validate(scope string) error { + _, _, _, err := ClusterRoleEvaluatorParseScope(scope) + return err +} + +func (e ClusterRoleEvaluator) Describe(scope string) (string, string, error) { + roleName, scopeNamespace, escalating, err := ClusterRoleEvaluatorParseScope(scope) + if err != nil { + return "", "", err + } + + // Anything you can do [in project "foo" | server-wide] that is also allowed by the "admin" role[, except access escalating resources like secrets] + + scopePhrase := "" + if scopeNamespace == scopesAllNamespaces { + scopePhrase = "server-wide" + } else { + scopePhrase = fmt.Sprintf("in project %q", scopeNamespace) + } + + warning := "" + escalatingPhrase := "" + if escalating { + warning = fmt.Sprintf("Includes access to escalating resources like secrets") + } else { + escalatingPhrase = ", except access escalating resources like secrets" + } + + description := fmt.Sprintf("Anything you can do %s that is also allowed by the %q role%s", scopePhrase, roleName, escalatingPhrase) + + return description, warning, nil +} + +func ClusterRoleEvaluatorHandles(scope string) bool { + return strings.HasPrefix(scope, clusterRoleIndicator) +} + +// ClusterRoleEvaluatorParseScope parses the requested scope, determining the requested role name, namespace, and if +// access to escalating objects is required. It will return an error if it doesn't parse cleanly +func ClusterRoleEvaluatorParseScope(scope string) (string /*role name*/, string /*namespace*/, bool /*escalating*/, error) { + if !ClusterRoleEvaluatorHandles(scope) { + return "", "", false, fmt.Errorf("bad format for scope %v", scope) + } + return parseClusterRoleScope(scope) +} + +func parseClusterRoleScope(scope string) (string /*role name*/, string /*namespace*/, bool /*escalating*/, error) { + if !strings.HasPrefix(scope, clusterRoleIndicator) { + return "", "", false, fmt.Errorf("bad format for scope %v", scope) + } + escalating := false + if strings.HasSuffix(scope, ":!") { + escalating = true + // clip that last segment before parsing the rest + scope = scope[:strings.LastIndex(scope, ":")] + } + + tokens := strings.SplitN(scope, ":", 2) + if len(tokens) != 2 { + return "", "", false, fmt.Errorf("bad format for scope %v", scope) + } + + // namespaces can't have colons, but roles can. pick last. + lastColonIndex := strings.LastIndex(tokens[1], ":") + if lastColonIndex <= 0 || lastColonIndex == (len(tokens[1])-1) { + return "", "", false, fmt.Errorf("bad format for scope %v", scope) + } + + return tokens[1][0:lastColonIndex], tokens[1][lastColonIndex+1:], escalating, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/describers.go b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/describers.go new file mode 100644 index 000000000..65280256c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/describers.go @@ -0,0 +1,17 @@ +package scopemetadata + +// ScopeDescriber takes a scope and returns metadata about it +type ScopeDescriber interface { + // Handles returns true if this evaluator can evaluate this scope + Handles(scope string) bool + // Validate returns an error if the scope is malformed + Validate(scope string) error + // Describe returns a description, warning (typically used to warn about escalation dangers), or an error if the scope is malformed + Describe(scope string) (description string, warning string, err error) +} + +// ScopeDescribers map prefixes to a function that handles that prefix +var ScopeDescribers = []ScopeDescriber{ + UserEvaluator{}, + ClusterRoleEvaluator{}, +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/user_describers.go b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/user_describers.go new file mode 100644 index 000000000..586a7d787 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/user_describers.go @@ -0,0 +1,68 @@ +package scopemetadata + +import ( + "fmt" +) + +// these must agree with the scope authorizer, but it's an API we cannot realistically change +const ( + scopesAllNamespaces = "*" + + userIndicator = "user:" + clusterRoleIndicator = "role:" + + UserInfo = userIndicator + "info" + UserAccessCheck = userIndicator + "check-access" + + // UserListScopedProjects gives explicit permission to see the projects that this token can see. + UserListScopedProjects = userIndicator + "list-scoped-projects" + + // UserListAllProjects gives explicit permission to see the projects a user can see. This is often used to prime secondary ACL systems + // unrelated to openshift and to display projects for selection in a secondary UI. + UserListAllProjects = userIndicator + "list-projects" + + // UserFull includes all permissions of the user + userFull = userIndicator + "full" +) + +// user: +type UserEvaluator struct{} + +func (UserEvaluator) Handles(scope string) bool { + return UserEvaluatorHandles(scope) +} + +func (e UserEvaluator) Validate(scope string) error { + if e.Handles(scope) { + return nil + } + + return fmt.Errorf("unrecognized scope: %v", scope) +} + +var defaultSupportedScopesMap = map[string]string{ + UserInfo: "Read-only access to your user information (including username, identities, and group membership)", + UserAccessCheck: `Read-only access to view your privileges (for example, "can I create builds?")`, + UserListScopedProjects: `Read-only access to list your projects viewable with this token and view their metadata (display name, description, etc.)`, + UserListAllProjects: `Read-only access to list your projects and view their metadata (display name, description, etc.)`, + userFull: `Full read/write access with all of your permissions`, +} + +func (UserEvaluator) Describe(scope string) (string, string, error) { + switch scope { + case UserInfo, UserAccessCheck, UserListScopedProjects, UserListAllProjects: + return defaultSupportedScopesMap[scope], "", nil + case userFull: + return defaultSupportedScopesMap[scope], `Includes any access you have to escalating resources like secrets`, nil + default: + return "", "", fmt.Errorf("unrecognized scope: %v", scope) + } +} + +func UserEvaluatorHandles(scope string) bool { + switch scope { + case userFull, UserInfo, UserAccessCheck, UserListScopedProjects, UserListAllProjects: + return true + } + return false +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation.go b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation.go new file mode 100644 index 000000000..59a7009b9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation.go @@ -0,0 +1,152 @@ +package scopemetadata + +import ( + "fmt" + + kutilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/validation/field" + + oauthv1 "github.com/openshift/api/oauth/v1" +) + +func ValidateScopes(scopes []string, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if len(scopes) == 0 { + allErrs = append(allErrs, field.Required(fldPath, "may not be empty")) + } + + for i, scope := range scopes { + illegalCharacter := false + // https://tools.ietf.org/html/rfc6749#section-3.3 (full list of allowed chars is %x21 / %x23-5B / %x5D-7E) + // for those without an ascii table, that's `!`, `#-[`, `]-~` inclusive. + for _, ch := range scope { + switch { + case ch == '!': + case ch >= '#' && ch <= '[': + case ch >= ']' && ch <= '~': + default: + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), scope, fmt.Sprintf("%v not allowed", ch))) + illegalCharacter = true + } + } + if illegalCharacter { + continue + } + + found := false + for _, evaluator := range ScopeDescribers { + if !evaluator.Handles(scope) { + continue + } + + found = true + if err := evaluator.Validate(scope); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), scope, err.Error())) + break + } + } + + if !found { + allErrs = append(allErrs, field.Invalid(fldPath.Index(i), scope, "no scope handler found")) + } + } + + return allErrs +} + +func ValidateScopeRestrictions(client *oauthv1.OAuthClient, scopes ...string) error { + if len(scopes) == 0 { + return fmt.Errorf("%s may not request unscoped tokens", client.Name) + } + + if len(client.ScopeRestrictions) == 0 { + return nil + } + + errs := []error{} + for _, scope := range scopes { + if err := validateScopeRestrictions(client, scope); err != nil { + errs = append(errs, err) + } + } + + return kutilerrors.NewAggregate(errs) +} + +func validateScopeRestrictions(client *oauthv1.OAuthClient, scope string) error { + errs := []error{} + + for _, restriction := range client.ScopeRestrictions { + if len(restriction.ExactValues) > 0 { + if err := validateLiteralScopeRestrictions(scope, restriction.ExactValues); err != nil { + errs = append(errs, err) + continue + } + return nil + } + + if restriction.ClusterRole != nil { + if !ClusterRoleEvaluatorHandles(scope) { + continue + } + if err := validateClusterRoleScopeRestrictions(scope, *restriction.ClusterRole); err != nil { + errs = append(errs, err) + continue + } + return nil + } + } + + // if we got here, then nothing matched. If we already have errors, do nothing, otherwise add one to make it report failed. + if len(errs) == 0 { + errs = append(errs, fmt.Errorf("%v did not match any scope restriction", scope)) + } + + return kutilerrors.NewAggregate(errs) +} + +func validateLiteralScopeRestrictions(scope string, literals []string) error { + for _, literal := range literals { + if literal == scope { + return nil + } + } + + return fmt.Errorf("%v not found in %v", scope, literals) +} + +func validateClusterRoleScopeRestrictions(scope string, restriction oauthv1.ClusterRoleScopeRestriction) error { + role, namespace, escalating, err := ClusterRoleEvaluatorParseScope(scope) + if err != nil { + return err + } + + foundName := false + for _, restrictedRoleName := range restriction.RoleNames { + if restrictedRoleName == "*" || restrictedRoleName == role { + foundName = true + break + } + } + if !foundName { + return fmt.Errorf("%v does not use an approved name", scope) + } + + foundNamespace := false + for _, restrictedNamespace := range restriction.Namespaces { + if restrictedNamespace == "*" || restrictedNamespace == namespace { + foundNamespace = true + break + } + } + if !foundNamespace { + return fmt.Errorf("%v does not use an approved namespace", scope) + } + + if escalating && !restriction.AllowEscalation { + return fmt.Errorf("%v is not allowed to escalate", scope) + } + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation_test.go b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation_test.go new file mode 100644 index 000000000..ddb3ff774 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/authorization/scopemetadata/validation_test.go @@ -0,0 +1,141 @@ +package scopemetadata + +import ( + "strings" + "testing" + + oauthv1 "github.com/openshift/api/oauth/v1" +) + +func TestValidateScopeRestrictions(t *testing.T) { + testCases := []struct { + name string + scopes []string + client *oauthv1.OAuthClient + + expectedErrors []string + }{ + { + name: "unrestricted allows any", + scopes: []string{"one"}, + client: &oauthv1.OAuthClient{}, + }, + { + name: "unrestricted allows empty", + scopes: []string{""}, + client: &oauthv1.OAuthClient{}, + }, + { + name: "missing scopes check precedes unrestricted", + scopes: []string{}, + client: &oauthv1.OAuthClient{}, + expectedErrors: []string{"may not request unscoped tokens"}, + }, + { + name: "simple literal", + scopes: []string{"one"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ExactValues: []string{"two", "one"}}}, + }, + }, + { + name: "simple must match", + scopes: []string{"missing"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ExactValues: []string{"two", "one"}}}, + }, + expectedErrors: []string{`missing not found in [two one]`}, + }, + { + name: "cluster role name must match", + scopes: []string{clusterRoleIndicator + "three:alfa"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ClusterRole: &oauthv1.ClusterRoleScopeRestriction{ + RoleNames: []string{"one", "two"}, + Namespaces: []string{"alfa", "bravo"}, + AllowEscalation: false, + }}}, + }, + expectedErrors: []string{`role:three:alfa does not use an approved name`}, + }, + { + name: "cluster role namespace must match", + scopes: []string{clusterRoleIndicator + "two:charlie"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ClusterRole: &oauthv1.ClusterRoleScopeRestriction{ + RoleNames: []string{"one", "two"}, + Namespaces: []string{"alfa", "bravo"}, + AllowEscalation: false, + }}}, + }, + expectedErrors: []string{`role:two:charlie does not use an approved namespace`}, + }, + { + name: "cluster role escalation must match", + scopes: []string{clusterRoleIndicator + "two:bravo:!"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ClusterRole: &oauthv1.ClusterRoleScopeRestriction{ + RoleNames: []string{"one", "two"}, + Namespaces: []string{"alfa", "bravo"}, + AllowEscalation: false, + }}}, + }, + expectedErrors: []string{`role:two:bravo:! is not allowed to escalate`}, + }, + { + name: "cluster role matches", + scopes: []string{clusterRoleIndicator + "two:bravo:!"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ClusterRole: &oauthv1.ClusterRoleScopeRestriction{ + RoleNames: []string{"one", "two"}, + Namespaces: []string{"alfa", "bravo"}, + AllowEscalation: true, + }}}, + }, + }, + { + name: "cluster role matches 2", + scopes: []string{clusterRoleIndicator + "two:bravo"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ClusterRole: &oauthv1.ClusterRoleScopeRestriction{ + RoleNames: []string{"one", "two"}, + Namespaces: []string{"alfa", "bravo"}, + AllowEscalation: false, + }}}, + }, + }, + { + name: "cluster role star matches", + scopes: []string{clusterRoleIndicator + "two:bravo"}, + client: &oauthv1.OAuthClient{ + ScopeRestrictions: []oauthv1.ScopeRestriction{{ClusterRole: &oauthv1.ClusterRoleScopeRestriction{ + RoleNames: []string{"one", "two", "*"}, + Namespaces: []string{"alfa", "bravo", "*"}, + AllowEscalation: true, + }}}, + }, + }, + } + + for _, tc := range testCases { + err := ValidateScopeRestrictions(tc.client, tc.scopes...) + if err != nil && len(tc.expectedErrors) == 0 { + t.Errorf("%s: unexpected error: %v", tc.name, err) + continue + } + if err == nil && len(tc.expectedErrors) > 0 { + t.Errorf("%s: missing error: %v", tc.name, tc.expectedErrors) + continue + } + if err == nil && len(tc.expectedErrors) == 0 { + continue + } + + for _, expectedErr := range tc.expectedErrors { + if !strings.Contains(err.Error(), expectedErr) { + t.Errorf("%s: error %v missing %v", tc.name, err, expectedErr) + } + } + } + +} diff --git a/vendor/github.com/openshift/library-go/pkg/build/buildutil/buildutil.go b/vendor/github.com/openshift/library-go/pkg/build/buildutil/buildutil.go new file mode 100644 index 000000000..0ebc3d9a9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/build/buildutil/buildutil.go @@ -0,0 +1,85 @@ +package buildutil + +import ( + corev1 "k8s.io/api/core/v1" + + buildv1 "github.com/openshift/api/build/v1" +) + +// GetInputReference returns the From ObjectReference associated with the +// BuildStrategy. +func GetInputReference(strategy buildv1.BuildStrategy) *corev1.ObjectReference { + switch { + case strategy.SourceStrategy != nil: + return &strategy.SourceStrategy.From + case strategy.DockerStrategy != nil: + return strategy.DockerStrategy.From + case strategy.CustomStrategy != nil: + return &strategy.CustomStrategy.From + default: + return nil + } +} + +// GetBuildEnv gets the build strategy environment +func GetBuildEnv(build *buildv1.Build) []corev1.EnvVar { + switch { + case build.Spec.Strategy.SourceStrategy != nil: + return build.Spec.Strategy.SourceStrategy.Env + case build.Spec.Strategy.DockerStrategy != nil: + return build.Spec.Strategy.DockerStrategy.Env + case build.Spec.Strategy.CustomStrategy != nil: + return build.Spec.Strategy.CustomStrategy.Env + case build.Spec.Strategy.JenkinsPipelineStrategy != nil: + return build.Spec.Strategy.JenkinsPipelineStrategy.Env + default: + return nil + } +} + +// SetBuildEnv replaces the current build environment +func SetBuildEnv(build *buildv1.Build, env []corev1.EnvVar) { + var oldEnv *[]corev1.EnvVar + + switch { + case build.Spec.Strategy.SourceStrategy != nil: + oldEnv = &build.Spec.Strategy.SourceStrategy.Env + case build.Spec.Strategy.DockerStrategy != nil: + oldEnv = &build.Spec.Strategy.DockerStrategy.Env + case build.Spec.Strategy.CustomStrategy != nil: + oldEnv = &build.Spec.Strategy.CustomStrategy.Env + case build.Spec.Strategy.JenkinsPipelineStrategy != nil: + oldEnv = &build.Spec.Strategy.JenkinsPipelineStrategy.Env + default: + return + } + *oldEnv = env +} + +// FindTriggerPolicy retrieves the BuildTrigger(s) of a given type from a build configuration. +// Returns nil if no matches are found. +func FindTriggerPolicy(triggerType buildv1.BuildTriggerType, config *buildv1.BuildConfig) (buildTriggers []buildv1.BuildTriggerPolicy) { + for _, specTrigger := range config.Spec.Triggers { + if specTrigger.Type == triggerType { + buildTriggers = append(buildTriggers, specTrigger) + } + } + return buildTriggers +} + +// ConfigNameForBuild returns the name of the build config from a +// build name. +func ConfigNameForBuild(build *buildv1.Build) string { + if build == nil { + return "" + } + if build.Annotations != nil { + if _, exists := build.Annotations[buildv1.BuildConfigAnnotation]; exists { + return build.Annotations[buildv1.BuildConfigAnnotation] + } + } + if _, exists := build.Labels[buildv1.BuildConfigLabel]; exists { + return build.Labels[buildv1.BuildConfigLabel] + } + return build.Labels[buildv1.BuildConfigLabelDeprecated] +} diff --git a/vendor/github.com/openshift/library-go/pkg/build/buildutil/buildutil_test.go b/vendor/github.com/openshift/library-go/pkg/build/buildutil/buildutil_test.go new file mode 100644 index 000000000..33134c919 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/build/buildutil/buildutil_test.go @@ -0,0 +1 @@ +package buildutil diff --git a/vendor/github.com/openshift/library-go/pkg/build/envresolve/env.go b/vendor/github.com/openshift/library-go/pkg/build/envresolve/env.go new file mode 100644 index 000000000..07663c34d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/build/envresolve/env.go @@ -0,0 +1,115 @@ +package envresolve + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" +) + +// ResourceStore defines a new resource store data structure +type ResourceStore struct { + SecretStore map[string]*corev1.Secret + ConfigMapStore map[string]*corev1.ConfigMap +} + +// NewResourceStore returns a pointer to a new resource store data structure +func NewResourceStore() *ResourceStore { + return &ResourceStore{ + SecretStore: make(map[string]*corev1.Secret), + ConfigMapStore: make(map[string]*corev1.ConfigMap), + } +} + +// getSecretRefValue returns the value of a secret in the supplied namespace +func getSecretRefValue(client kubernetes.Interface, namespace string, store *ResourceStore, secretSelector *corev1.SecretKeySelector) (string, error) { + secret, ok := store.SecretStore[secretSelector.Name] + if !ok { + var err error + secret, err = client.CoreV1().Secrets(namespace).Get(secretSelector.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + store.SecretStore[secretSelector.Name] = secret + } + if data, ok := secret.Data[secretSelector.Key]; ok { + return string(data), nil + } + return "", fmt.Errorf("key %s not found in secret %s", secretSelector.Key, secretSelector.Name) + +} + +// getConfigMapRefValue returns the value of a configmap in the supplied namespace +func getConfigMapRefValue(client kubernetes.Interface, namespace string, store *ResourceStore, configMapSelector *corev1.ConfigMapKeySelector) (string, error) { + configMap, ok := store.ConfigMapStore[configMapSelector.Name] + if !ok { + var err error + configMap, err = client.CoreV1().ConfigMaps(namespace).Get(configMapSelector.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + store.ConfigMapStore[configMapSelector.Name] = configMap + } + if data, ok := configMap.Data[configMapSelector.Key]; ok { + return string(data), nil + } + return "", fmt.Errorf("key %s not found in config map %s", configMapSelector.Key, configMapSelector.Name) +} + +// getFieldRef returns the value of the supplied path in the given object +func getFieldRef(obj runtime.Object, from *corev1.EnvVarSource) (string, error) { + return ExtractFieldPathAsString(obj, from.FieldRef.FieldPath) +} + +// getResourceFieldRef returns the value of a resource in the given container +func getResourceFieldRef(from *corev1.EnvVarSource, c *corev1.Container) (string, error) { + return ExtractContainerResourceValue(from.ResourceFieldRef, c) +} + +// GenEnvVarRefValue returns the value referenced by the supplied EnvVarSource given the other supplied information +func GetEnvVarRefValue(kc kubernetes.Interface, ns string, store *ResourceStore, from *corev1.EnvVarSource, obj runtime.Object, c *corev1.Container) (string, error) { + if from.SecretKeyRef != nil { + return getSecretRefValue(kc, ns, store, from.SecretKeyRef) + } + + if from.ConfigMapKeyRef != nil { + return getConfigMapRefValue(kc, ns, store, from.ConfigMapKeyRef) + } + + if from.FieldRef != nil { + return getFieldRef(obj, from) + } + + if from.ResourceFieldRef != nil { + return getResourceFieldRef(from, c) + } + + return "", fmt.Errorf("invalid valueFrom") +} + +// GenEnvVarRefString returns a text description of the supplied EnvVarSource +func GetEnvVarRefString(from *corev1.EnvVarSource) string { + if from.ConfigMapKeyRef != nil { + return fmt.Sprintf("configmap %s, key %s", from.ConfigMapKeyRef.Name, from.ConfigMapKeyRef.Key) + } + + if from.SecretKeyRef != nil { + return fmt.Sprintf("secret %s, key %s", from.SecretKeyRef.Name, from.SecretKeyRef.Key) + } + + if from.FieldRef != nil { + return fmt.Sprintf("field path %s", from.FieldRef.FieldPath) + } + + if from.ResourceFieldRef != nil { + containerPrefix := "" + if from.ResourceFieldRef.ContainerName != "" { + containerPrefix = fmt.Sprintf("%s/", from.ResourceFieldRef.ContainerName) + } + return fmt.Sprintf("resource field %s%s", containerPrefix, from.ResourceFieldRef.Resource) + } + + return "invalid valueFrom" +} diff --git a/vendor/github.com/openshift/library-go/pkg/build/envresolve/kube_copy.go b/vendor/github.com/openshift/library-go/pkg/build/envresolve/kube_copy.go new file mode 100644 index 000000000..1d01fa424 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/build/envresolve/kube_copy.go @@ -0,0 +1,150 @@ +package envresolve + +import ( + "fmt" + "math" + "strconv" + "strings" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" +) + +// these are all from fieldpath.go + +// FormatMap formats map[string]string to a string. +func FormatMap(m map[string]string) (fmtStr string) { + // output with keys in sorted order to provide stable output + keys := sets.NewString() + for key := range m { + keys.Insert(key) + } + for _, key := range keys.List() { + fmtStr += fmt.Sprintf("%v=%q\n", key, m[key]) + } + fmtStr = strings.TrimSuffix(fmtStr, "\n") + + return +} + +// ExtractFieldPathAsString extracts the field from the given object +// and returns it as a string. The object must be a pointer to an +// API type. +func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", nil + } + + if path, subscript, ok := SplitMaybeSubscriptedPath(fieldPath); ok { + switch path { + case "metadata.annotations": + if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetAnnotations()[subscript], nil + case "metadata.labels": + if errs := validation.IsQualifiedName(subscript); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetLabels()[subscript], nil + default: + return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath) + } + } + + switch fieldPath { + case "metadata.annotations": + return FormatMap(accessor.GetAnnotations()), nil + case "metadata.labels": + return FormatMap(accessor.GetLabels()), nil + case "metadata.name": + return accessor.GetName(), nil + case "metadata.namespace": + return accessor.GetNamespace(), nil + case "metadata.uid": + return string(accessor.GetUID()), nil + } + + return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath) +} + +// SplitMaybeSubscriptedPath checks whether the specified fieldPath is +// subscripted, and +// - if yes, this function splits the fieldPath into path and subscript, and +// returns (path, subscript, true). +// - if no, this function returns (fieldPath, "", false). +// +// Example inputs and outputs: +// - "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true) +// - "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true) +// - "metadata.labels['']" --> ("metadata.labels", "", true) +// - "metadata.labels" --> ("metadata.labels", "", false) +func SplitMaybeSubscriptedPath(fieldPath string) (string, string, bool) { + if !strings.HasSuffix(fieldPath, "']") { + return fieldPath, "", false + } + s := strings.TrimSuffix(fieldPath, "']") + parts := strings.SplitN(s, "['", 2) + if len(parts) < 2 { + return fieldPath, "", false + } + if len(parts[0]) == 0 { + return fieldPath, "", false + } + return parts[0], parts[1], true +} + +// these are from api/v1/helpers.go + +// ExtractContainerResourceValue extracts the value of a resource +// in an already known container +func ExtractContainerResourceValue(fs *v1.ResourceFieldSelector, container *v1.Container) (string, error) { + divisor := resource.Quantity{} + if divisor.Cmp(fs.Divisor) == 0 { + divisor = resource.MustParse("1") + } else { + divisor = fs.Divisor + } + + switch fs.Resource { + case "limits.cpu": + return convertResourceCPUToString(container.Resources.Limits.Cpu(), divisor) + case "limits.memory": + return convertResourceMemoryToString(container.Resources.Limits.Memory(), divisor) + case "limits.ephemeral-storage": + return convertResourceEphemeralStorageToString(container.Resources.Limits.StorageEphemeral(), divisor) + case "requests.cpu": + return convertResourceCPUToString(container.Resources.Requests.Cpu(), divisor) + case "requests.memory": + return convertResourceMemoryToString(container.Resources.Requests.Memory(), divisor) + case "requests.ephemeral-storage": + return convertResourceEphemeralStorageToString(container.Resources.Requests.StorageEphemeral(), divisor) + } + + return "", fmt.Errorf("Unsupported container resource : %v", fs.Resource) +} + +// convertResourceCPUToString converts cpu value to the format of divisor and returns +// ceiling of the value. +func convertResourceCPUToString(cpu *resource.Quantity, divisor resource.Quantity) (string, error) { + c := int64(math.Ceil(float64(cpu.MilliValue()) / float64(divisor.MilliValue()))) + return strconv.FormatInt(c, 10), nil +} + +// convertResourceMemoryToString converts memory value to the format of divisor and returns +// ceiling of the value. +func convertResourceMemoryToString(memory *resource.Quantity, divisor resource.Quantity) (string, error) { + m := int64(math.Ceil(float64(memory.Value()) / float64(divisor.Value()))) + return strconv.FormatInt(m, 10), nil +} + +// convertResourceEphemeralStorageToString converts ephemeral storage value to the format of divisor and returns +// ceiling of the value. +func convertResourceEphemeralStorageToString(ephemeralStorage *resource.Quantity, divisor resource.Quantity) (string, error) { + m := int64(math.Ceil(float64(ephemeralStorage.Value()) / float64(divisor.Value()))) + return strconv.FormatInt(m, 10), nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/build/naming/namer.go b/vendor/github.com/openshift/library-go/pkg/build/naming/namer.go new file mode 100644 index 000000000..1e06745b3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/build/naming/namer.go @@ -0,0 +1,73 @@ +package naming + +import ( + "fmt" + "hash/fnv" + + kvalidation "k8s.io/apimachinery/pkg/util/validation" +) + +// GetName returns a name given a base ("deployment-5") and a suffix ("deploy") +// It will first attempt to join them with a dash. If the resulting name is longer +// than maxLength: if the suffix is too long, it will truncate the base name and add +// an 8-character hash of the [base]-[suffix] string. If the suffix is not too long, +// it will truncate the base, add the hash of the base and return [base]-[hash]-[suffix] +func GetName(base, suffix string, maxLength int) string { + if maxLength <= 0 { + return "" + } + name := fmt.Sprintf("%s-%s", base, suffix) + if len(name) <= maxLength { + return name + } + + baseLength := maxLength - 10 /*length of -hash-*/ - len(suffix) + + // if the suffix is too long, ignore it + if baseLength < 0 { + prefix := base[0:min(len(base), max(0, maxLength-9))] + // Calculate hash on initial base-suffix string + shortName := fmt.Sprintf("%s-%s", prefix, hash(name)) + return shortName[:min(maxLength, len(shortName))] + } + + prefix := base[0:baseLength] + // Calculate hash on initial base-suffix string + return fmt.Sprintf("%s-%s-%s", prefix, hash(base), suffix) +} + +// GetPodName calls GetName with the length restriction for pods +func GetPodName(base, suffix string) string { + return GetName(base, suffix, kvalidation.DNS1123SubdomainMaxLength) +} + +// GetConfigMapName calls GetName with the length restriction for ConfigMaps +func GetConfigMapName(base, suffix string) string { + return GetName(base, suffix, kvalidation.DNS1123SubdomainMaxLength) +} + +// max returns the greater of its 2 inputs +func max(a, b int) int { + if b > a { + return b + } + return a +} + +// min returns the lesser of its 2 inputs +func min(a, b int) int { + if b < a { + return b + } + return a +} + +// hash calculates the hexadecimal representation (8-chars) +// of the hash of the passed in string using the FNV-a algorithm +func hash(s string) string { + hash := fnv.New32a() + hash.Write([]byte(s)) + intHash := hash.Sum32() + result := fmt.Sprintf("%08x", intHash) + return result +} diff --git a/vendor/github.com/openshift/library-go/pkg/build/naming/namer_test.go b/vendor/github.com/openshift/library-go/pkg/build/naming/namer_test.go new file mode 100644 index 000000000..33db1a801 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/build/naming/namer_test.go @@ -0,0 +1,101 @@ +package naming + +import ( + "math/rand" + "testing" + + kvalidation "k8s.io/apimachinery/pkg/util/validation" +) + +func TestGetName(t *testing.T) { + for i := 0; i < 10; i++ { + shortName := randSeq(rand.Intn(kvalidation.DNS1123SubdomainMaxLength-1) + 1) + longName := randSeq(kvalidation.DNS1123SubdomainMaxLength + rand.Intn(100)) + + tests := []struct { + base, suffix, expected string + }{ + { + base: shortName, + suffix: "deploy", + expected: shortName + "-deploy", + }, + { + base: longName, + suffix: "deploy", + expected: longName[:kvalidation.DNS1123SubdomainMaxLength-16] + "-" + hash(longName) + "-deploy", + }, + { + base: shortName, + suffix: longName, + expected: shortName + "-" + hash(shortName+"-"+longName), + }, + { + base: "", + suffix: shortName, + expected: "-" + shortName, + }, + { + base: "", + suffix: longName, + expected: "-" + hash("-"+longName), + }, + { + base: shortName, + suffix: "", + expected: shortName + "-", + }, + { + base: longName, + suffix: "", + expected: longName[:kvalidation.DNS1123SubdomainMaxLength-10] + "-" + hash(longName) + "-", + }, + } + + for _, test := range tests { + result := GetName(test.base, test.suffix, kvalidation.DNS1123SubdomainMaxLength) + if result != test.expected { + t.Errorf("Got unexpected result. Expected: %s Got: %s", test.expected, result) + } + } + } +} + +func TestGetNameIsDifferent(t *testing.T) { + shortName := randSeq(32) + deployerName := GetName(shortName, "deploy", kvalidation.DNS1123SubdomainMaxLength) + builderName := GetName(shortName, "build", kvalidation.DNS1123SubdomainMaxLength) + if deployerName == builderName { + t.Errorf("Expecting names to be different: %s\n", deployerName) + } + longName := randSeq(kvalidation.DNS1123SubdomainMaxLength + 10) + deployerName = GetName(longName, "deploy", kvalidation.DNS1123SubdomainMaxLength) + builderName = GetName(longName, "build", kvalidation.DNS1123SubdomainMaxLength) + if deployerName == builderName { + t.Errorf("Expecting names to be different: %s\n", deployerName) + } +} + +func TestGetNameReturnShortNames(t *testing.T) { + base := randSeq(32) + for maxLength := 0; maxLength < len(base)+2; maxLength++ { + for suffixLen := 0; suffixLen <= maxLength+1; suffixLen++ { + suffix := randSeq(suffixLen) + got := GetName(base, suffix, maxLength) + if len(got) > maxLength { + t.Fatalf("len(GetName(%[1]q, %[2]q, %[3]d)) = len(%[4]q) = %[5]d; want %[3]d", base, suffix, maxLength, got, len(got)) + } + } + } +} + +// From k8s.io/kubernetes/pkg/api/generator.go +var letters = []rune("abcdefghijklmnopqrstuvwxyz0123456789-") + +func randSeq(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} diff --git a/vendor/github.com/openshift/library-go/pkg/certs/pem.go b/vendor/github.com/openshift/library-go/pkg/certs/pem.go new file mode 100644 index 000000000..c3f7ff306 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/certs/pem.go @@ -0,0 +1,57 @@ +package certs + +import ( + "bytes" + "encoding/pem" + "io/ioutil" + "os" + "path/filepath" +) + +const ( + // StringSourceEncryptedBlockType is the PEM block type used to store an encrypted string + StringSourceEncryptedBlockType = "ENCRYPTED STRING" + // StringSourceKeyBlockType is the PEM block type used to store an encrypting key + StringSourceKeyBlockType = "ENCRYPTING KEY" +) + +func BlockFromFile(path string, blockType string) (*pem.Block, bool, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, false, err + } + block, ok := BlockFromBytes(data, blockType) + return block, ok, nil +} + +func BlockFromBytes(data []byte, blockType string) (*pem.Block, bool) { + for { + block, remaining := pem.Decode(data) + if block == nil { + return nil, false + } + if block.Type == blockType { + return block, true + } + data = remaining + } +} + +func BlockToFile(path string, block *pem.Block, mode os.FileMode) error { + b, err := BlockToBytes(block) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(path), os.FileMode(0755)); err != nil { + return err + } + return ioutil.WriteFile(path, b, mode) +} + +func BlockToBytes(block *pem.Block) ([]byte, error) { + b := bytes.Buffer{} + if err := pem.Encode(&b, block); err != nil { + return nil, err + } + return b.Bytes(), nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/helpers/readresource.go b/vendor/github.com/openshift/library-go/pkg/config/helpers/readresource.go new file mode 100644 index 000000000..0c68ee27c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/config/helpers/readresource.go @@ -0,0 +1,167 @@ +package helpers + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "reflect" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + kyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/klog" + "sigs.k8s.io/yaml" +) + +// InstallFunc is the "normal" function for installing scheme +type InstallFunc func(scheme *runtime.Scheme) error + +// ReadYAMLToInternal reads content of a reader and returns the runtime.Object that matches it. It chooses the match from +// the scheme installation that you provide. It converts to internal for you. +func ReadYAMLToInternal(reader io.Reader, schemeFns ...InstallFunc) (runtime.Object, error) { + if reader == nil || reflect.ValueOf(reader).IsNil() { + return nil, nil + } + data, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + jsonData, err := kyaml.ToJSON(data) + if err != nil { + // maybe we were already json + jsonData = data + } + + scheme := runtime.NewScheme() + for _, schemeFn := range schemeFns { + err := schemeFn(scheme) + if err != nil { + return nil, err + } + } + codec := serializer.NewCodecFactory(scheme).LegacyCodec(scheme.PrioritizedVersionsAllGroups()...) + + obj, err := runtime.Decode(codec, jsonData) + if err != nil { + return nil, captureSurroundingJSONForError("error reading config: ", jsonData, err) + } + // make sure there are no extra fields in jsonData + if err := strictDecodeCheck(jsonData, obj, scheme); err != nil { + return nil, err + } + + return obj, nil +} + +// ReadYAML reads content of a reader and returns the runtime.Object that matches it. It chooses the match from +// the scheme installation that you provide. It does not convert and it does not default. +func ReadYAML(reader io.Reader, schemeFns ...InstallFunc) (runtime.Object, error) { + if reader == nil || reflect.ValueOf(reader).IsNil() { + return nil, nil + } + data, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + jsonData, err := kyaml.ToJSON(data) + if err != nil { + // maybe we were already json + jsonData = data + } + + scheme := runtime.NewScheme() + for _, schemeFn := range schemeFns { + err := schemeFn(scheme) + if err != nil { + return nil, err + } + } + codec := serializer.NewCodecFactory(scheme).UniversalDeserializer() + + obj, err := runtime.Decode(codec, jsonData) + if err != nil { + return nil, captureSurroundingJSONForError("error reading config: ", jsonData, err) + } + // make sure there are no extra fields in jsonData + if err := strictDecodeCheck(jsonData, obj, scheme); err != nil { + return nil, err + } + + return obj, nil +} + +// TODO: we ultimately want a better decoder for JSON that allows us exact line numbers and better +// surrounding text description. This should be removed / replaced when that happens. +func captureSurroundingJSONForError(prefix string, data []byte, err error) error { + if syntaxErr, ok := err.(*json.SyntaxError); err != nil && ok { + offset := syntaxErr.Offset + begin := offset - 20 + if begin < 0 { + begin = 0 + } + end := offset + 20 + if end > int64(len(data)) { + end = int64(len(data)) + } + return fmt.Errorf("%s%v (found near '%s')", prefix, err, string(data[begin:end])) + } + if err != nil { + return fmt.Errorf("%s%v", prefix, err) + } + return err +} + +// strictDecodeCheck fails decodes when jsonData contains fields not included in the external version of obj +func strictDecodeCheck(jsonData []byte, obj runtime.Object, scheme *runtime.Scheme) error { + out, err := getExternalZeroValue(obj, scheme) // we need the external version of obj as that has the correct JSON struct tags + if err != nil { + klog.Errorf("Encountered config error %v in object %T, raw JSON:\n%s", err, obj, string(jsonData)) // TODO just return the error and die + // never error for now, we need to determine a safe way to make this check fatal + return nil + } + d := json.NewDecoder(bytes.NewReader(jsonData)) + d.DisallowUnknownFields() + // note that we only care about the error, out is discarded + if err := d.Decode(out); err != nil { + klog.Errorf("Encountered config error %v in object %T, raw JSON:\n%s", err, obj, string(jsonData)) // TODO just return the error and die + } + // never error for now, we need to determine a safe way to make this check fatal + return nil +} + +// getExternalZeroValue returns the zero value of the external version of obj +func getExternalZeroValue(obj runtime.Object, scheme *runtime.Scheme) (runtime.Object, error) { + gvks, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + if len(gvks) == 0 { // should never happen + return nil, fmt.Errorf("no gvks found for %#v", obj) + } + return scheme.New(gvks[0]) +} + +// WriteYAML serializes a yaml file based on the scheme functions provided +func WriteYAML(obj runtime.Object, schemeFns ...InstallFunc) ([]byte, error) { + scheme := runtime.NewScheme() + for _, schemeFn := range schemeFns { + err := schemeFn(scheme) + if err != nil { + return nil, err + } + } + codec := serializer.NewCodecFactory(scheme).LegacyCodec(scheme.PrioritizedVersionsAllGroups()...) + + json, err := runtime.Encode(codec, obj) + if err != nil { + return nil, err + } + + content, err := yaml.JSONToYAML(json) + if err != nil { + return nil, err + } + return content, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go index f3399e867..7bdcc3056 100644 --- a/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go +++ b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go @@ -47,6 +47,7 @@ func ToConfigMapLeaderElection(clientConfig *rest.Config, config configv1.Leader config.Namespace, config.Name, kubeClient.CoreV1(), + kubeClient.CoordinationV1(), resourcelock.ResourceLockConfig{ Identity: identity, EventRecorder: eventRecorder, diff --git a/vendor/github.com/openshift/library-go/pkg/config/serving/options.go b/vendor/github.com/openshift/library-go/pkg/config/serving/options.go index efb446ba4..bb710454d 100644 --- a/vendor/github.com/openshift/library-go/pkg/config/serving/options.go +++ b/vendor/github.com/openshift/library-go/pkg/config/serving/options.go @@ -6,7 +6,7 @@ import ( "strconv" genericapiserveroptions "k8s.io/apiserver/pkg/server/options" - utilflag "k8s.io/apiserver/pkg/util/flag" + utilflag "k8s.io/component-base/cli/flag" configv1 "github.com/openshift/api/config/v1" ) diff --git a/vendor/github.com/openshift/library-go/pkg/config/serving/server.go b/vendor/github.com/openshift/library-go/pkg/config/serving/server.go index af74b74d5..3869d5c2e 100644 --- a/vendor/github.com/openshift/library-go/pkg/config/serving/server.go +++ b/vendor/github.com/openshift/library-go/pkg/config/serving/server.go @@ -1,17 +1,23 @@ package serving import ( + "context" + "time" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/wait" genericapiserver "k8s.io/apiserver/pkg/server" genericapiserveroptions "k8s.io/apiserver/pkg/server/options" + "k8s.io/klog" configv1 "github.com/openshift/api/config/v1" operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" ) -func ToServerConfig(servingInfo configv1.HTTPServingInfo, authenticationConfig operatorv1alpha1.DelegatedAuthentication, authorizationConfig operatorv1alpha1.DelegatedAuthorization, kubeConfigFile string) (*genericapiserver.Config, error) { +func ToServerConfig(ctx context.Context, servingInfo configv1.HTTPServingInfo, authenticationConfig operatorv1alpha1.DelegatedAuthentication, authorizationConfig operatorv1alpha1.DelegatedAuthorization, + kubeConfigFile string) (*genericapiserver.Config, error) { scheme := runtime.NewScheme() metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion) config := genericapiserver.NewConfig(serializer.NewCodecFactory(scheme)) @@ -20,23 +26,51 @@ func ToServerConfig(servingInfo configv1.HTTPServingInfo, authenticationConfig o if err != nil { return nil, err } + if err := servingOptions.ApplyTo(&config.SecureServing, &config.LoopbackClientConfig); err != nil { return nil, err } + var lastApplyErr error + + pollCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + if !authenticationConfig.Disabled { authenticationOptions := genericapiserveroptions.NewDelegatingAuthenticationOptions() authenticationOptions.RemoteKubeConfigFile = kubeConfigFile - if err := authenticationOptions.ApplyTo(&config.Authentication, config.SecureServing, config.OpenAPIConfig); err != nil { - return nil, err + + // In some cases the API server can return connection refused when getting the "extension-apiserver-authentication" + // config map. + err := wait.PollImmediateUntil(1*time.Second, func() (done bool, err error) { + lastApplyErr = authenticationOptions.ApplyTo(&config.Authentication, config.SecureServing, config.OpenAPIConfig) + if lastApplyErr != nil { + klog.V(4).Infof("Error initializing delegating authentication (will retry): %v", err) + return false, nil + } + return true, nil + }, pollCtx.Done()) + if err != nil { + return nil, lastApplyErr } } if !authorizationConfig.Disabled { authorizationOptions := genericapiserveroptions.NewDelegatingAuthorizationOptions() authorizationOptions.RemoteKubeConfigFile = kubeConfigFile - if err := authorizationOptions.ApplyTo(&config.Authorization); err != nil { - return nil, err + + // In some cases the API server can return connection refused when getting the "extension-apiserver-authentication" + // config map. + err := wait.PollImmediateUntil(1*time.Second, func() (done bool, err error) { + lastApplyErr = authorizationOptions.ApplyTo(&config.Authorization) + if lastApplyErr != nil { + klog.V(4).Infof("Error initializing delegating authorization (will retry): %v", err) + return false, nil + } + return true, nil + }, pollCtx.Done()) + if err != nil { + return nil, lastApplyErr } } diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go index afaad37f6..01ed53da6 100644 --- a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/builder.go @@ -199,7 +199,7 @@ func (b *ControllerBuilder) Run(config *unstructured.Unstructured, ctx context.C if b.kubeAPIServerConfigFile != nil { kubeConfig = *b.kubeAPIServerConfigFile } - serverConfig, err := serving.ToServerConfig(*b.servingInfo, *b.authenticationConfig, *b.authorizationConfig, kubeConfig) + serverConfig, err := serving.ToServerConfig(ctx, *b.servingInfo, *b.authenticationConfig, *b.authorizationConfig, kubeConfig) if err != nil { return err } diff --git a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go index 099b42a87..ff4f89792 100644 --- a/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go +++ b/vendor/github.com/openshift/library-go/pkg/controller/controllercmd/cmd.go @@ -11,15 +11,17 @@ import ( "time" "github.com/spf13/cobra" - "k8s.io/klog" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/version" - "k8s.io/apiserver/pkg/util/logs" + "k8s.io/apiserver/pkg/server" + "k8s.io/component-base/logs" + "k8s.io/klog" operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" + "github.com/openshift/library-go/pkg/config/configdefaults" "github.com/openshift/library-go/pkg/crypto" "github.com/openshift/library-go/pkg/serviceability" @@ -51,22 +53,42 @@ func NewControllerCommandConfig(componentName string, version version.Info, star // NewCommand returns a new command that a caller must set the Use and Descriptions on. It wires default log, profiling, // leader election and other "normal" behaviors. +// Deprecated: Use the NewCommandWithContext instead, this is here to be less disturbing for existing usages. func (c *ControllerCommandConfig) NewCommand() *cobra.Command { + return c.NewCommandWithContext(context.TODO()) + +} + +// NewCommandWithContext returns a new command that a caller must set the Use and Descriptions on. It wires default log, profiling, +// leader election and other "normal" behaviors. +// The context passed will be passed down to controller loops and observers and cancelled on SIGTERM and SIGINT signals. +func (c *ControllerCommandConfig) NewCommandWithContext(ctx context.Context) *cobra.Command { cmd := &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { // boiler plate for the "normal" command rand.Seed(time.Now().UTC().UnixNano()) logs.InitLogs() + + // handle SIGTERM and SIGINT by cancelling the context. + shutdownCtx, cancel := context.WithCancel(ctx) + shutdownHandler := server.SetupSignalHandler() + go func() { + defer cancel() + <-shutdownHandler + klog.Infof("Received SIGTERM or SIGINT signal, shutting down controller.") + }() + defer logs.FlushLogs() defer serviceability.BehaviorOnPanic(os.Getenv("OPENSHIFT_ON_PANIC"), c.version)() defer serviceability.Profile(os.Getenv("OPENSHIFT_PROFILE")).Stop() + serviceability.StartProfiler() if err := c.basicFlags.Validate(); err != nil { klog.Fatal(err) } - if err := c.StartController(context.Background()); err != nil { + if err := c.StartController(shutdownCtx); err != nil { klog.Fatal(err) } }, diff --git a/vendor/github.com/openshift/library-go/pkg/controller/metrics/workqueue_metrics.go b/vendor/github.com/openshift/library-go/pkg/controller/metrics/workqueue_metrics.go index e14f854e6..e8d7edea9 100644 --- a/vendor/github.com/openshift/library-go/pkg/controller/metrics/workqueue_metrics.go +++ b/vendor/github.com/openshift/library-go/pkg/controller/metrics/workqueue_metrics.go @@ -2,6 +2,7 @@ package metrics import ( "k8s.io/client-go/util/workqueue" + "k8s.io/klog" "github.com/prometheus/client_golang/prometheus" ) @@ -13,78 +14,195 @@ func init() { workqueue.SetProvider(prometheusMetricsProvider{}) } +// Package prometheus sets the workqueue DefaultMetricsFactory to produce +// prometheus metrics. To use this package, you just have to import it. + +// Metrics subsystem and keys used by the workqueue. +const ( + WorkQueueSubsystem = "workqueue" + DepthKey = "depth" + AddsKey = "adds_total" + QueueLatencyKey = "queue_duration_seconds" + WorkDurationKey = "work_duration_seconds" + UnfinishedWorkKey = "unfinished_work_seconds" + LongestRunningProcessorKey = "longest_running_processor_seconds" + RetriesKey = "retries_total" +) + +func init() { + workqueue.SetProvider(prometheusMetricsProvider{}) +} + type prometheusMetricsProvider struct{} func (prometheusMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric { depth := prometheus.NewGauge(prometheus.GaugeOpts{ - Subsystem: name, - Name: "depth", - Help: "Current depth of workqueue: " + name, + Subsystem: WorkQueueSubsystem, + Name: DepthKey, + Help: "Current depth of workqueue", + ConstLabels: prometheus.Labels{"name": name}, }) prometheus.Register(depth) return depth } func (prometheusMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric { + adds := prometheus.NewCounter(prometheus.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: AddsKey, + Help: "Total number of adds handled by workqueue", + ConstLabels: prometheus.Labels{"name": name}, + }) + prometheus.Register(adds) + return adds +} + +func (prometheusMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { + latency := prometheus.NewHistogram(prometheus.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: QueueLatencyKey, + Help: "How long in seconds an item stays in workqueue before being requested.", + ConstLabels: prometheus.Labels{"name": name}, + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }) + prometheus.Register(latency) + return latency +} + +func (prometheusMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { + workDuration := prometheus.NewHistogram(prometheus.HistogramOpts{ + Subsystem: WorkQueueSubsystem, + Name: WorkDurationKey, + Help: "How long in seconds processing an item from workqueue takes.", + ConstLabels: prometheus.Labels{"name": name}, + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), + }) + prometheus.Register(workDuration) + return workDuration +} + +func (prometheusMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { + unfinished := prometheus.NewGauge(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: UnfinishedWorkKey, + Help: "How many seconds of work has done that " + + "is in progress and hasn't been observed by work_duration. Large " + + "values indicate stuck threads. One can deduce the number of stuck " + + "threads by observing the rate at which this increases.", + ConstLabels: prometheus.Labels{"name": name}, + }) + prometheus.Register(unfinished) + return unfinished +} + +func (prometheusMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { + unfinished := prometheus.NewGauge(prometheus.GaugeOpts{ + Subsystem: WorkQueueSubsystem, + Name: LongestRunningProcessorKey, + Help: "How many seconds has the longest running " + + "processor for workqueue been running.", + ConstLabels: prometheus.Labels{"name": name}, + }) + prometheus.Register(unfinished) + return unfinished +} + +func (prometheusMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { + retries := prometheus.NewCounter(prometheus.CounterOpts{ + Subsystem: WorkQueueSubsystem, + Name: RetriesKey, + Help: "Total number of retries handled by workqueue", + ConstLabels: prometheus.Labels{"name": name}, + }) + prometheus.Register(retries) + return retries +} + +// TODO(danielqsj): Remove the following metrics, they are deprecated +func (prometheusMetricsProvider) NewDeprecatedDepthMetric(name string) workqueue.GaugeMetric { + depth := prometheus.NewGauge(prometheus.GaugeOpts{ + Subsystem: name, + Name: "depth", + Help: "(Deprecated) Current depth of workqueue: " + name, + }) + if err := prometheus.Register(depth); err != nil { + klog.Errorf("failed to register depth metric %v: %v", name, err) + } + return depth +} + +func (prometheusMetricsProvider) NewDeprecatedAddsMetric(name string) workqueue.CounterMetric { adds := prometheus.NewCounter(prometheus.CounterOpts{ Subsystem: name, Name: "adds", - Help: "Total number of adds handled by workqueue: " + name, + Help: "(Deprecated) Total number of adds handled by workqueue: " + name, }) - prometheus.Register(adds) + if err := prometheus.Register(adds); err != nil { + klog.Errorf("failed to register adds metric %v: %v", name, err) + } return adds } -func (prometheusMetricsProvider) NewLatencyMetric(name string) workqueue.SummaryMetric { +func (prometheusMetricsProvider) NewDeprecatedLatencyMetric(name string) workqueue.SummaryMetric { latency := prometheus.NewSummary(prometheus.SummaryOpts{ Subsystem: name, Name: "queue_latency", - Help: "How long an item stays in workqueue" + name + " before being requested.", + Help: "(Deprecated) How long an item stays in workqueue" + name + " before being requested.", }) - prometheus.Register(latency) + if err := prometheus.Register(latency); err != nil { + klog.Errorf("failed to register latency metric %v: %v", name, err) + } return latency } -func (prometheusMetricsProvider) NewWorkDurationMetric(name string) workqueue.SummaryMetric { +func (prometheusMetricsProvider) NewDeprecatedWorkDurationMetric(name string) workqueue.SummaryMetric { workDuration := prometheus.NewSummary(prometheus.SummaryOpts{ Subsystem: name, Name: "work_duration", - Help: "How long processing an item from workqueue" + name + " takes.", + Help: "(Deprecated) How long processing an item from workqueue" + name + " takes.", }) - prometheus.Register(workDuration) + if err := prometheus.Register(workDuration); err != nil { + klog.Errorf("failed to register work_duration metric %v: %v", name, err) + } return workDuration } -func (prometheusMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { +func (prometheusMetricsProvider) NewDeprecatedUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { unfinished := prometheus.NewGauge(prometheus.GaugeOpts{ Subsystem: name, Name: "unfinished_work_seconds", - Help: "How many seconds of work " + name + " has done that " + + Help: "(Deprecated) How many seconds of work " + name + " has done that " + "is in progress and hasn't been observed by work_duration. Large " + "values indicate stuck threads. One can deduce the number of stuck " + "threads by observing the rate at which this increases.", }) - prometheus.Register(unfinished) + if err := prometheus.Register(unfinished); err != nil { + klog.Errorf("failed to register unfinished_work_seconds metric %v: %v", name, err) + } return unfinished } -func (prometheusMetricsProvider) NewLongestRunningProcessorMicrosecondsMetric(name string) workqueue.SettableGaugeMetric { +func (prometheusMetricsProvider) NewDeprecatedLongestRunningProcessorMicrosecondsMetric(name string) workqueue.SettableGaugeMetric { unfinished := prometheus.NewGauge(prometheus.GaugeOpts{ Subsystem: name, Name: "longest_running_processor_microseconds", - Help: "How many microseconds has the longest running " + + Help: "(Deprecated) How many microseconds has the longest running " + "processor for " + name + " been running.", }) - prometheus.Register(unfinished) + if err := prometheus.Register(unfinished); err != nil { + klog.Errorf("failed to register longest_running_processor_microseconds metric %v: %v", name, err) + } return unfinished } -func (prometheusMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric { +func (prometheusMetricsProvider) NewDeprecatedRetriesMetric(name string) workqueue.CounterMetric { retries := prometheus.NewCounter(prometheus.CounterOpts{ Subsystem: name, Name: "retries", - Help: "Total number of retries handled by workqueue: " + name, + Help: "(Deprecated) Total number of retries handled by workqueue: " + name, }) - prometheus.Register(retries) + if err := prometheus.Register(retries); err != nil { + klog.Errorf("failed to register retries metric %v: %v", name, err) + } return retries } diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go index 7919321f8..5a593d2b1 100644 --- a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go @@ -31,10 +31,19 @@ import ( "k8s.io/client-go/util/cert" ) +// TLS versions that are known to golang. Go 1.12 adds TLS 1.3 support with a build flag. var versions = map[string]uint16{ "VersionTLS10": tls.VersionTLS10, "VersionTLS11": tls.VersionTLS11, "VersionTLS12": tls.VersionTLS12, + "VersionTLS13": tls.VersionTLS13, +} + +// TLS versions that are enabled. +var supportedVersions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, } // TLSVersionToNameOrDie given a tls version as an int, return its readable name @@ -71,9 +80,21 @@ func TLSVersionOrDie(versionName string) uint16 { } return version } + +// TLS versions that are known to golang, but may not necessarily be enabled. +func GolangTLSVersions() []string { + supported := []string{} + for k := range versions { + supported = append(supported, k) + } + sort.Strings(supported) + return supported +} + +// Returns the build enabled TLS versions. func ValidTLSVersions() []string { validVersions := []string{} - for k := range versions { + for k := range supportedVersions { validVersions = append(validVersions, k) } sort.Strings(validVersions) diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto_test.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto_test.go index 98f9eb8f9..8ccd02d58 100644 --- a/vendor/github.com/openshift/library-go/pkg/crypto/crypto_test.go +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto_test.go @@ -51,6 +51,13 @@ func TestConstantMaps(t *testing.T) { t.Errorf("versions map has %s not in tls package", k) } } + + for k := range supportedVersions { + if _, ok := discoveredVersions[k]; !ok { + t.Errorf("supported versions map has %s not in tls package", k) + } + } + } func TestCrypto(t *testing.T) { diff --git a/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/client.go b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/client.go new file mode 100644 index 000000000..7133063d8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/client.go @@ -0,0 +1,999 @@ +package dockerv1client + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/http/cookiejar" + "net/url" + "path" + "strings" + "time" + + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/manifest/schema2" + + godockerclient "github.com/fsouza/go-dockerclient" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + knet "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/transport" + "k8s.io/klog" + + "github.com/openshift/api/image/docker10" + "github.com/openshift/library-go/pkg/image/reference" +) + +var ( + ImageScheme = runtime.NewScheme() +) + +func init() { + utilruntime.Must(ImageScheme.AddConversionFuncs( + // Convert godockerclient client object to internal object + func(in *godockerclient.Image, out *docker10.DockerImage, s conversion.Scope) error { + if err := s.Convert(&in.Config, &out.Config, conversion.AllowDifferentFieldTypeNames); err != nil { + return err + } + if err := s.Convert(&in.ContainerConfig, &out.ContainerConfig, conversion.AllowDifferentFieldTypeNames); err != nil { + return err + } + out.ID = in.ID + out.Parent = in.Parent + out.Comment = in.Comment + out.Created = metav1.NewTime(in.Created) + out.Container = in.Container + out.DockerVersion = in.DockerVersion + out.Author = in.Author + out.Architecture = in.Architecture + out.Size = in.Size + return nil + }, + func(in *docker10.DockerImage, out *godockerclient.Image, s conversion.Scope) error { + if err := s.Convert(&in.Config, &out.Config, conversion.AllowDifferentFieldTypeNames); err != nil { + return err + } + if err := s.Convert(&in.ContainerConfig, &out.ContainerConfig, conversion.AllowDifferentFieldTypeNames); err != nil { + return err + } + out.ID = in.ID + out.Parent = in.Parent + out.Comment = in.Comment + out.Created = in.Created.Time + out.Container = in.Container + out.DockerVersion = in.DockerVersion + out.Author = in.Author + out.Architecture = in.Architecture + out.Size = in.Size + return nil + }, + )) +} + +type Image struct { + Image godockerclient.Image + + // Does this registry support pull by ID + PullByID bool +} + +// Client includes methods for accessing a Docker registry by name. +type Client interface { + // Connect to a Docker registry by name. Pass "" for the Docker Hub + Connect(registry string, allowInsecure bool) (Connection, error) +} + +// Connection allows you to retrieve data from a Docker V1/V2 registry. +type Connection interface { + // ImageTags will return a map of the tags for the image by namespace and name. + // If namespace is not specified, will default to "library" for Docker hub. + ImageTags(namespace, name string) (map[string]string, error) + // ImageByID will return the requested image by namespace, name, and ID. + // If namespace is not specified, will default to "library" for Docker hub. + ImageByID(namespace, name, id string) (*Image, error) + // ImageByTag will return the requested image by namespace, name, and tag + // (if not specified, "latest"). + // If namespace is not specified, will default to "library" for Docker hub. + ImageByTag(namespace, name, tag string) (*Image, error) + // ImageManifest will return the raw image manifest and digest by namespace, + // name, and tag. + ImageManifest(namespace, name, tag string) (string, []byte, error) +} + +// client implements the Client interface +type client struct { + dialTimeout time.Duration + connections map[string]*connection + allowV2 bool +} + +// NewClient returns a client object which allows public access to +// a Docker registry. enableV2 allows a client to prefer V1 registry +// API connections. +// TODO: accept a godockerclient auth config +func NewClient(dialTimeout time.Duration, allowV2 bool) Client { + return &client{ + dialTimeout: dialTimeout, + connections: make(map[string]*connection), + allowV2: allowV2, + } +} + +// Connect accepts the name of a registry in the common form Docker provides and will +// create a connection to the registry. Callers may provide a host, a host:port, or +// a fully qualified URL. When not providing a URL, the default scheme will be "https" +func (c *client) Connect(name string, allowInsecure bool) (Connection, error) { + target, err := normalizeRegistryName(name) + if err != nil { + return nil, err + } + prefix := target.String() + if conn, ok := c.connections[prefix]; ok && conn.allowInsecure == allowInsecure { + return conn, nil + } + conn := newConnection(*target, c.dialTimeout, allowInsecure, c.allowV2) + c.connections[prefix] = conn + return conn, nil +} + +// normalizeDockerHubHost returns the canonical DockerHub registry URL for a given host +// segment and godockerclient API version. +func normalizeDockerHubHost(host string, v2 bool) string { + switch host { + case reference.DockerDefaultRegistry, "www." + reference.DockerDefaultRegistry, reference.DockerDefaultV1Registry, reference.DockerDefaultV2Registry: + if v2 { + return reference.DockerDefaultV2Registry + } + return reference.DockerDefaultV1Registry + } + return host +} + +// normalizeRegistryName standardizes the registry URL so that it is consistent +// across different versions of the same name (for reuse of auth). +func normalizeRegistryName(name string) (*url.URL, error) { + prefix := name + if len(prefix) == 0 { + prefix = reference.DockerDefaultV1Registry + } + hadPrefix := false + switch { + case strings.HasPrefix(prefix, "http://"), strings.HasPrefix(prefix, "https://"): + hadPrefix = true + default: + prefix = "https://" + prefix + } + + target, err := url.Parse(prefix) + if err != nil { + return nil, fmt.Errorf("the registry name cannot be made into a valid url: %v", err) + } + + if host, port, err := net.SplitHostPort(target.Host); err == nil { + host = normalizeDockerHubHost(host, false) + if hadPrefix { + switch { + case port == "443" && target.Scheme == "https": + target.Host = host + case port == "80" && target.Scheme == "http": + target.Host = host + } + } + } else { + target.Host = normalizeDockerHubHost(target.Host, false) + } + return target, nil +} + +// convertConnectionError turns a registry error into a typed error if appropriate. +func convertConnectionError(registry string, err error) error { + switch { + case strings.Contains(err.Error(), "connection refused"): + return errRegistryNotFound{registry} + default: + return err + } +} + +// connection represents a connection to a particular DockerHub registry, reusing +// tokens and other settings. connections are not thread safe. +type connection struct { + client *http.Client + url url.URL + cached map[string]repository + isV2 *bool + token string + + allowInsecure bool +} + +// newConnection creates a new connection +func newConnection(url url.URL, dialTimeout time.Duration, allowInsecure, enableV2 bool) *connection { + var isV2 *bool + if !enableV2 { + v2 := false + isV2 = &v2 + } + + var rt http.RoundTripper + if allowInsecure { + rt = knet.SetTransportDefaults(&http.Transport{ + Dial: (&net.Dialer{ + Timeout: dialTimeout, + KeepAlive: 30 * time.Second, + }).Dial, + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }) + } else { + rt = knet.SetTransportDefaults(&http.Transport{ + Dial: (&net.Dialer{ + Timeout: dialTimeout, + KeepAlive: 30 * time.Second, + }).Dial, + }) + } + + rt = transport.DebugWrappers(rt) + + jar, _ := cookiejar.New(nil) + client := &http.Client{Jar: jar, Transport: rt} + return &connection{ + url: url, + client: client, + cached: make(map[string]repository), + isV2: isV2, + + allowInsecure: allowInsecure, + } +} + +// ImageTags returns the tags for the named Docker image repository. +func (c *connection) ImageTags(namespace, name string) (map[string]string, error) { + if len(namespace) == 0 && reference.IsRegistryDockerHub(c.url.Host) { + namespace = "library" + } + if len(name) == 0 { + return nil, fmt.Errorf("image name must be specified") + } + + repo, err := c.getCachedRepository(fmt.Sprintf("%s/%s", namespace, name)) + if err != nil { + return nil, err + } + + return repo.getTags(c) +} + +// ImageByID returns the specified image within the named Docker image repository +func (c *connection) ImageByID(namespace, name, imageID string) (*Image, error) { + if len(namespace) == 0 && reference.IsRegistryDockerHub(c.url.Host) { + namespace = "library" + } + if len(name) == 0 { + return nil, fmt.Errorf("image name must be specified") + } + + repo, err := c.getCachedRepository(fmt.Sprintf("%s/%s", namespace, name)) + if err != nil { + return nil, err + } + + image, _, err := repo.getImage(c, imageID, "") + return image, err +} + +// ImageByTag returns the specified image within the named Docker image repository +func (c *connection) ImageByTag(namespace, name, tag string) (*Image, error) { + if len(namespace) == 0 && reference.IsRegistryDockerHub(c.url.Host) { + namespace = "library" + } + if len(name) == 0 { + return nil, fmt.Errorf("image name must be specified") + } + searchTag := tag + if len(searchTag) == 0 { + searchTag = "latest" + } + + repo, err := c.getCachedRepository(fmt.Sprintf("%s/%s", namespace, name)) + if err != nil { + return nil, err + } + + image, _, err := repo.getTaggedImage(c, searchTag, tag) + return image, err +} + +// ImageManifest returns raw manifest of the specified image within the named Docker image repository +func (c *connection) ImageManifest(namespace, name, tag string) (string, []byte, error) { + if len(name) == 0 { + return "", nil, fmt.Errorf("image name must be specified") + } + if len(namespace) == 0 && reference.IsRegistryDockerHub(c.url.Host) { + namespace = "library" + } + searchTag := tag + if len(searchTag) == 0 { + searchTag = "latest" + } + + repo, err := c.getCachedRepository(fmt.Sprintf("%s/%s", namespace, name)) + if err != nil { + return "", nil, err + } + + image, manifest, err := repo.getTaggedImage(c, searchTag, tag) + if err != nil { + return "", nil, err + } + return image.Image.ID, manifest, err +} + +// getCachedRepository returns a repository interface matching the provided name and +// may cache information about the server on the connection object. +func (c *connection) getCachedRepository(name string) (repository, error) { + if cached, ok := c.cached[name]; ok { + return cached, nil + } + + if c.isV2 == nil { + v2, err := c.checkV2() + if err != nil { + return nil, err + } + c.isV2 = &v2 + } + if *c.isV2 { + base := c.url + base.Host = normalizeDockerHubHost(base.Host, true) + repo := &v2repository{ + name: name, + endpoint: base, + token: c.token, + } + c.cached[name] = repo + return repo, nil + } + + repo, err := c.getRepositoryV1(name) + if err != nil { + return nil, err + } + c.cached[name] = repo + return repo, nil +} + +// checkV2 performs the registry version checking steps as described by +// https://docs.docker.com/registry/spec/api/ +func (c *connection) checkV2() (bool, error) { + base := c.url + base.Host = normalizeDockerHubHost(base.Host, true) + base.Path = path.Join(base.Path, "v2") + "/" + req, err := http.NewRequest("GET", base.String(), nil) + if err != nil { + return false, fmt.Errorf("error creating request: %v", err) + } + resp, err := c.client.Do(req) + if err != nil { + // if we tried https and were rejected, try http + if c.url.Scheme == "https" && c.allowInsecure { + klog.V(4).Infof("Failed to get https, trying http: %v", err) + c.url.Scheme = "http" + return c.checkV2() + } + return false, convertConnectionError(c.url.String(), fmt.Errorf("error checking for V2 registry at %s: %v", base.String(), err)) + } + defer resp.Body.Close() + + switch code := resp.StatusCode; { + case code == http.StatusUnauthorized: + // handle auth challenges on individual repositories + case code >= 300 || resp.StatusCode < 200: + return false, nil + } + if len(resp.Header.Get("Docker-Distribution-API-Version")) == 0 { + klog.V(5).Infof("Registry v2 API at %s did not have a Docker-Distribution-API-Version header", base.String()) + return false, nil + } + + klog.V(5).Infof("Found registry v2 API at %s", base.String()) + return true, nil +} + +// parseAuthChallenge splits a header of the form 'type[ =""[,...]]' returned +// by the godockerclient registry +func parseAuthChallenge(header string) (string, map[string]string) { + sections := strings.SplitN(header, " ", 2) + if len(sections) == 1 { + sections = append(sections, "") + } + challenge := sections[1] + keys := make(map[string]string) + for _, s := range strings.Split(challenge, ",") { + pair := strings.SplitN(strings.TrimSpace(s), "=", 2) + if len(pair) == 1 { + keys[pair[0]] = "" + continue + } + keys[pair[0]] = strings.Trim(pair[1], "\"") + } + return sections[0], keys +} + +// authenticateV2 attempts to respond to a given WWW-Authenticate challenge header +// by asking for a token from the realm. Currently only supports "Bearer" challenges +// with no credentials provided. +// TODO: support credentials or replace with the Docker distribution v2 registry client +func (c *connection) authenticateV2(header string) (string, error) { + mode, keys := parseAuthChallenge(header) + if strings.ToLower(mode) != "bearer" { + return "", fmt.Errorf("unsupported authentication challenge from registry: %s", header) + } + + realm, ok := keys["realm"] + if !ok { + return "", fmt.Errorf("no realm specified by the server, cannot authenticate: %s", header) + } + delete(keys, "realm") + + realmURL, err := url.Parse(realm) + if err != nil { + return "", fmt.Errorf("realm %q was not a valid url: %v", realm, err) + } + query := realmURL.Query() + for k, v := range keys { + query.Set(k, v) + } + realmURL.RawQuery = query.Encode() + req, err := http.NewRequest("GET", realmURL.String(), nil) + if err != nil { + return "", fmt.Errorf("error creating v2 auth request: %v", err) + } + + resp, err := c.client.Do(req) + if err != nil { + return "", convertConnectionError(realmURL.String(), fmt.Errorf("error authorizing to the registry: %v", err)) + } + defer resp.Body.Close() + + switch code := resp.StatusCode; { + case code == http.StatusUnauthorized: + return "", fmt.Errorf("permission denied to access realm %q", realmURL.String()) + case code == http.StatusNotFound: + return "", fmt.Errorf("defined realm %q cannot be found", realm) + case code >= 300 || resp.StatusCode < 200: + return "", fmt.Errorf("error authenticating to the realm %q; server returned %d", realmURL.String(), resp.StatusCode) + } + + token := struct { + Token string `json:"token"` + }{} + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("can't read authorization body from %s: %v", realmURL.String(), err) + } + if err := json.Unmarshal(body, &token); err != nil { + return "", fmt.Errorf("can't decode the server authorization from %s: %v", realmURL.String(), err) + } + return token.Token, nil +} + +// getRepositoryV1 returns a repository implementation for a v1 registry by asking for +// the appropriate endpoint token. It will try HTTP if HTTPS fails and insecure connections +// are allowed. +func (c *connection) getRepositoryV1(name string) (repository, error) { + klog.V(4).Infof("Getting repository %s from %s", name, c.url.String()) + + base := c.url + base.Path = path.Join(base.Path, fmt.Sprintf("/v1/repositories/%s/images", name)) + req, err := http.NewRequest("GET", base.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating request: %v", err) + } + req.Header.Add("X-Docker-Token", "true") + resp, err := c.client.Do(req) + if err != nil { + // if we tried https and were rejected, try http + if c.url.Scheme == "https" && c.allowInsecure { + klog.V(4).Infof("Failed to get https, trying http: %v", err) + c.url.Scheme = "http" + return c.getRepositoryV1(name) + } + return nil, convertConnectionError(c.url.String(), fmt.Errorf("error getting X-Docker-Token from %s: %v", name, err)) + } + defer resp.Body.Close() + + // if we were redirected, update the base urls + c.url.Scheme = resp.Request.URL.Scheme + c.url.Host = resp.Request.URL.Host + + switch code := resp.StatusCode; { + case code == http.StatusNotFound: + return nil, errRepositoryNotFound{name} + case code >= 300 || resp.StatusCode < 200: + return nil, fmt.Errorf("error retrieving repository: server returned %d", resp.StatusCode) + } + + // TODO: select a random endpoint + return &v1repository{ + name: name, + endpoint: url.URL{Scheme: c.url.Scheme, Host: resp.Header.Get("X-Docker-Endpoints")}, + token: resp.Header.Get("X-Docker-Token"), + }, nil +} + +// repository is an interface for retrieving image info from a Docker V1 or V2 repository. +type repository interface { + getTags(c *connection) (map[string]string, error) + getTaggedImage(c *connection, tag, userTag string) (*Image, []byte, error) + getImage(c *connection, image, userTag string) (*Image, []byte, error) +} + +// v2repository exposes methods for accessing a named Docker V2 repository on a server. +type v2repository struct { + name string + endpoint url.URL + token string + retries int +} + +// v2tags describes the tags/list returned by the Docker V2 registry. +type v2tags struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +func (repo *v2repository) getTags(c *connection) (map[string]string, error) { + endpoint := repo.endpoint + endpoint.Path = path.Join(endpoint.Path, fmt.Sprintf("/v2/%s/tags/list", repo.name)) + req, err := http.NewRequest("GET", endpoint.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating request: %v", err) + } + addAcceptHeader(req) + + if len(repo.token) > 0 { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", repo.token)) + } + resp, err := c.client.Do(req) + if err != nil { + return nil, convertConnectionError(c.url.String(), fmt.Errorf("error getting image tags for %s: %v", repo.name, err)) + } + defer resp.Body.Close() + + switch code := resp.StatusCode; { + case code == http.StatusUnauthorized: + if len(repo.token) != 0 { + // The DockerHub returns JWT tokens that take effect at "now" at second resolution, which means clients can + // be rejected when requests are made near the time boundary. + if repo.retries > 0 { + repo.retries-- + time.Sleep(time.Second / 2) + return repo.getTags(c) + } + delete(c.cached, repo.name) + // godockerclient will not return a NotFound on any repository URL - for backwards compatibility, return NotFound on the + // repo + return nil, errRepositoryNotFound{repo.name} + } + token, err := c.authenticateV2(resp.Header.Get("WWW-Authenticate")) + if err != nil { + return nil, fmt.Errorf("error getting image tags for %s: %v", repo.name, err) + } + repo.retries = 2 + repo.token = token + return repo.getTags(c) + + case code == http.StatusNotFound: + return nil, errRepositoryNotFound{repo.name} + case code >= 300 || resp.StatusCode < 200: + // token might have expired - evict repo from cache so we can get a new one on retry + delete(c.cached, repo.name) + return nil, fmt.Errorf("error retrieving tags: server returned %d", resp.StatusCode) + } + tags := &v2tags{} + if err := json.NewDecoder(resp.Body).Decode(&tags); err != nil { + return nil, fmt.Errorf("error decoding image %s tags: %v", repo.name, err) + } + legacyTags := make(map[string]string) + for _, tag := range tags.Tags { + legacyTags[tag] = tag + } + return legacyTags, nil +} + +func (repo *v2repository) getTaggedImage(c *connection, tag, userTag string) (*Image, []byte, error) { + endpoint := repo.endpoint + endpoint.Path = path.Join(endpoint.Path, fmt.Sprintf("/v2/%s/manifests/%s", repo.name, tag)) + req, err := http.NewRequest("GET", endpoint.String(), nil) + if err != nil { + return nil, nil, fmt.Errorf("error creating request: %v", err) + } + addAcceptHeader(req) + + if len(repo.token) > 0 { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", repo.token)) + } + resp, err := c.client.Do(req) + if err != nil { + return nil, nil, convertConnectionError(c.url.String(), fmt.Errorf("error getting image for %s:%s: %v", repo.name, tag, err)) + } + defer resp.Body.Close() + + switch code := resp.StatusCode; { + case code == http.StatusUnauthorized: + if len(repo.token) != 0 { + // The DockerHub returns JWT tokens that take effect at "now" at second resolution, which means clients can + // be rejected when requests are made near the time boundary. + if repo.retries > 0 { + repo.retries-- + time.Sleep(time.Second / 2) + return repo.getTaggedImage(c, tag, userTag) + } + delete(c.cached, repo.name) + // godockerclient will not return a NotFound on any repository URL - for backwards compatibility, return NotFound on the + // repo + body, _ := ioutil.ReadAll(resp.Body) + klog.V(4).Infof("passed valid auth token, but unable to find tagged image at %q, %d %v: %s", req.URL.String(), resp.StatusCode, resp.Header, body) + return nil, nil, errTagNotFound{len(userTag) == 0, tag, repo.name} + } + token, err := c.authenticateV2(resp.Header.Get("WWW-Authenticate")) + if err != nil { + return nil, nil, fmt.Errorf("error getting image for %s:%s: %v", repo.name, tag, err) + } + repo.retries = 2 + repo.token = token + return repo.getTaggedImage(c, tag, userTag) + case code == http.StatusNotFound: + body, _ := ioutil.ReadAll(resp.Body) + klog.V(4).Infof("unable to find tagged image at %q, %d %v: %s", req.URL.String(), resp.StatusCode, resp.Header, body) + return nil, nil, errTagNotFound{len(userTag) == 0, tag, repo.name} + case code >= 300 || resp.StatusCode < 200: + // token might have expired - evict repo from cache so we can get a new one on retry + delete(c.cached, repo.name) + + return nil, nil, fmt.Errorf("error retrieving tagged image: server returned %d", resp.StatusCode) + } + + digest := resp.Header.Get("Docker-Content-Digest") + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, nil, fmt.Errorf("can't read image body from %s: %v", req.URL, err) + } + dockerImage, err := repo.unmarshalImageManifest(c, body) + if err != nil { + return nil, nil, err + } + image := &Image{ + Image: *dockerImage, + } + if len(digest) > 0 { + image.Image.ID = digest + image.PullByID = true + } + return image, body, nil +} + +func (repo *v2repository) getImage(c *connection, image, userTag string) (*Image, []byte, error) { + return repo.getTaggedImage(c, image, userTag) +} + +func (repo *v2repository) getImageConfig(c *connection, dgst string) ([]byte, error) { + endpoint := repo.endpoint + endpoint.Path = path.Join(endpoint.Path, fmt.Sprintf("/v2/%s/blobs/%s", repo.name, dgst)) + req, err := http.NewRequest("GET", endpoint.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating request: %v", err) + } + + if len(repo.token) > 0 { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", repo.token)) + } + resp, err := c.client.Do(req) + if err != nil { + return nil, convertConnectionError(c.url.String(), fmt.Errorf("error getting image config for %s: %v", repo.name, err)) + } + defer resp.Body.Close() + + switch code := resp.StatusCode; { + case code == http.StatusUnauthorized: + if len(repo.token) != 0 { + // The DockerHub returns JWT tokens that take effect at "now" at second resolution, which means clients can + // be rejected when requests are made near the time boundary. + if repo.retries > 0 { + repo.retries-- + time.Sleep(time.Second / 2) + return repo.getImageConfig(c, dgst) + } + delete(c.cached, repo.name) + // godockerclient will not return a NotFound on any repository URL - for backwards compatibility, return NotFound on the + // repo + body, _ := ioutil.ReadAll(resp.Body) + klog.V(4).Infof("passed valid auth token, but unable to find image config at %q, %d %v: %s", req.URL.String(), resp.StatusCode, resp.Header, body) + return nil, errBlobNotFound{dgst, repo.name} + } + token, err := c.authenticateV2(resp.Header.Get("WWW-Authenticate")) + if err != nil { + return nil, fmt.Errorf("error getting image config for %s:%s: %v", repo.name, dgst, err) + } + repo.retries = 2 + repo.token = token + return repo.getImageConfig(c, dgst) + case code == http.StatusNotFound: + body, _ := ioutil.ReadAll(resp.Body) + klog.V(4).Infof("unable to find image config at %q, %d %v: %s", req.URL.String(), resp.StatusCode, resp.Header, body) + return nil, errBlobNotFound{dgst, repo.name} + case code >= 300 || resp.StatusCode < 200: + // token might have expired - evict repo from cache so we can get a new one on retry + delete(c.cached, repo.name) + + return nil, fmt.Errorf("error retrieving image config: server returned %d", resp.StatusCode) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("can't read image body from %s: %v", req.URL, err) + } + + return body, nil +} + +func (repo *v2repository) unmarshalImageManifest(c *connection, body []byte) (*godockerclient.Image, error) { + manifest := DockerImageManifest{} + if err := json.Unmarshal(body, &manifest); err != nil { + return nil, err + } + switch manifest.SchemaVersion { + case 1: + if len(manifest.History) == 0 { + return nil, fmt.Errorf("image has no v1Compatibility history and cannot be used") + } + return unmarshalDockerImage([]byte(manifest.History[0].DockerV1Compatibility)) + case 2: + config, err := repo.getImageConfig(c, manifest.Config.Digest) + if err != nil { + return nil, err + } + return unmarshalDockerImage(config) + } + return nil, fmt.Errorf("unrecognized Docker image manifest schema %d", manifest.SchemaVersion) +} + +// v1repository exposes methods for accessing a named Docker V1 repository on a server. +type v1repository struct { + name string + endpoint url.URL + token string +} + +func (repo *v1repository) getTags(c *connection) (map[string]string, error) { + endpoint := repo.endpoint + endpoint.Path = path.Join(endpoint.Path, fmt.Sprintf("/v1/repositories/%s/tags", repo.name)) + req, err := http.NewRequest("GET", endpoint.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating request: %v", err) + } + req.Header.Add("Authorization", "Token "+repo.token) + resp, err := c.client.Do(req) + if err != nil { + return nil, convertConnectionError(c.url.String(), fmt.Errorf("error getting image tags for %s: %v", repo.name, err)) + } + defer resp.Body.Close() + + switch code := resp.StatusCode; { + case code == http.StatusNotFound: + return nil, errRepositoryNotFound{repo.name} + case code >= 300 || resp.StatusCode < 200: + // token might have expired - evict repo from cache so we can get a new one on retry + delete(c.cached, repo.name) + + return nil, fmt.Errorf("error retrieving tags: server returned %d", resp.StatusCode) + } + tags := make(map[string]string) + if err := json.NewDecoder(resp.Body).Decode(&tags); err != nil { + return nil, fmt.Errorf("error decoding image %s tags: %v", repo.name, err) + } + return tags, nil +} + +func (repo *v1repository) getTaggedImage(c *connection, tag, userTag string) (*Image, []byte, error) { + endpoint := repo.endpoint + endpoint.Path = path.Join(endpoint.Path, fmt.Sprintf("/v1/repositories/%s/tags/%s", repo.name, tag)) + req, err := http.NewRequest("GET", endpoint.String(), nil) + if err != nil { + return nil, nil, fmt.Errorf("error creating request: %v", err) + } + req.Header.Add("Authorization", "Token "+repo.token) + resp, err := c.client.Do(req) + if err != nil { + return nil, nil, convertConnectionError(c.url.String(), fmt.Errorf("error getting image id for %s:%s: %v", repo.name, tag, err)) + } + defer resp.Body.Close() + + switch code := resp.StatusCode; { + case code == http.StatusNotFound: + // Attempt to lookup tag in tags map, supporting registries that don't allow retrieval + // of tags to ids (Pulp/Crane) + allTags, err := repo.getTags(c) + if err != nil { + return nil, nil, err + } + if image, ok := allTags[tag]; ok { + return repo.getImage(c, image, "") + } + body, _ := ioutil.ReadAll(resp.Body) + klog.V(4).Infof("unable to find v1 tagged image at %q, %d %v: %s", req.URL.String(), resp.StatusCode, resp.Header, body) + return nil, nil, errTagNotFound{len(userTag) == 0, tag, repo.name} + case code >= 300 || resp.StatusCode < 200: + // token might have expired - evict repo from cache so we can get a new one on retry + delete(c.cached, repo.name) + + return nil, nil, fmt.Errorf("error retrieving tag: server returned %d", resp.StatusCode) + } + var imageID string + if err := json.NewDecoder(resp.Body).Decode(&imageID); err != nil { + return nil, nil, fmt.Errorf("error decoding image id: %v", err) + } + return repo.getImage(c, imageID, "") +} + +func (repo *v1repository) getImage(c *connection, image, userTag string) (*Image, []byte, error) { + endpoint := repo.endpoint + endpoint.Path = path.Join(endpoint.Path, fmt.Sprintf("/v1/images/%s/json", image)) + req, err := http.NewRequest("GET", endpoint.String(), nil) + if err != nil { + return nil, nil, fmt.Errorf("error creating request: %v", err) + } + + if len(repo.token) > 0 { + req.Header.Add("Authorization", "Token "+repo.token) + } + resp, err := c.client.Do(req) + if err != nil { + return nil, nil, convertConnectionError(c.url.String(), fmt.Errorf("error getting json for image %q: %v", image, err)) + } + defer resp.Body.Close() + switch code := resp.StatusCode; { + case code == http.StatusNotFound: + return nil, nil, NewImageNotFoundError(repo.name, image, userTag) + case code >= 300 || resp.StatusCode < 200: + // token might have expired - evict repo from cache so we can get a new one on retry + delete(c.cached, repo.name) + if body, err := ioutil.ReadAll(resp.Body); err == nil { + klog.V(6).Infof("unable to fetch image %s: %#v\n%s", req.URL, resp, string(body)) + } + return nil, nil, fmt.Errorf("error retrieving image %s: server returned %d", req.URL, resp.StatusCode) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, nil, fmt.Errorf("can't read image body from %s: %v", req.URL, err) + } + dockerImage, err := unmarshalDockerImage(body) + if err != nil { + return nil, nil, err + } + return &Image{Image: *dockerImage}, body, nil +} + +// errBlobNotFound is an error indicating the requested blob does not exist in the repository. +type errBlobNotFound struct { + digest string + repository string +} + +func (e errBlobNotFound) Error() string { + return fmt.Sprintf("blob %s was not found in repository %q", e.digest, e.repository) +} + +// errTagNotFound is an error indicating the requested tag does not exist on the server. May be returned on +// a v2 repository when the repository does not exist (because the v2 registry returns 401 on any repository +// you do not have permission to see, or does not exist) +type errTagNotFound struct { + wasDefault bool + tag string + repository string +} + +func (e errTagNotFound) Error() string { + if e.wasDefault { + return fmt.Sprintf("the default tag %q has not been set on repository %q", e.tag, e.repository) + } + return fmt.Sprintf("tag %q has not been set on repository %q", e.tag, e.repository) +} + +// errRepositoryNotFound indicates the repository is not found - but is only guaranteed to be returned +// for v1 godockerclient registries. +type errRepositoryNotFound struct { + repository string +} + +func (e errRepositoryNotFound) Error() string { + return fmt.Sprintf("the repository %q was not found", e.repository) +} + +type errImageNotFound struct { + tag string + image string + repository string +} + +func NewImageNotFoundError(repository, image, tag string) error { + return errImageNotFound{tag, image, repository} +} + +func (e errImageNotFound) Error() string { + if len(e.tag) == 0 { + return fmt.Sprintf("the image %q in repository %q was not found and may have been deleted", e.image, e.repository) + } + return fmt.Sprintf("the image %q in repository %q with tag %q was not found and may have been deleted", e.image, e.repository, e.tag) +} + +type errRegistryNotFound struct { + registry string +} + +func (e errRegistryNotFound) Error() string { + return fmt.Sprintf("the registry %q could not be reached", e.registry) +} + +func IsRegistryNotFound(err error) bool { + _, ok := err.(errRegistryNotFound) + return ok +} + +func IsRepositoryNotFound(err error) bool { + _, ok := err.(errRepositoryNotFound) + return ok +} + +func IsImageNotFound(err error) bool { + _, ok := err.(errImageNotFound) + return ok +} + +func IsTagNotFound(err error) bool { + _, ok := err.(errTagNotFound) + return ok +} + +func IsBlobNotFound(err error) bool { + _, ok := err.(errBlobNotFound) + return ok +} + +func IsNotFound(err error) bool { + return IsRegistryNotFound(err) || IsRepositoryNotFound(err) || IsImageNotFound(err) || IsTagNotFound(err) || IsBlobNotFound(err) +} + +func unmarshalDockerImage(body []byte) (*godockerclient.Image, error) { + var imagePre012 godockerclient.ImagePre012 + if err := json.Unmarshal(body, &imagePre012); err != nil { + return nil, err + } + + return &godockerclient.Image{ + ID: imagePre012.ID, + Parent: imagePre012.Parent, + Comment: imagePre012.Comment, + Created: imagePre012.Created, + Container: imagePre012.Container, + ContainerConfig: imagePre012.ContainerConfig, + DockerVersion: imagePre012.DockerVersion, + Author: imagePre012.Author, + Config: imagePre012.Config, + Architecture: imagePre012.Architecture, + Size: imagePre012.Size, + }, nil +} + +func addAcceptHeader(r *http.Request) { + r.Header.Add("Accept", schema1.MediaTypeManifest) + r.Header.Add("Accept", schema2.MediaTypeManifest) +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/client_test.go b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/client_test.go new file mode 100644 index 000000000..d0de6b263 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/client_test.go @@ -0,0 +1,399 @@ +package dockerv1client + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strings" + "testing" + "time" +) + +// tests of running registries are done in the integration client test + +func TestHTTPFallback(t *testing.T) { + called := make(chan struct{}, 2) + var uri *url.URL + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called <- struct{}{} + if strings.HasSuffix(r.URL.Path, "/tags") { + w.WriteHeader(http.StatusNotFound) + return + } + w.Header().Set("X-Docker-Endpoints", uri.Host) + w.WriteHeader(http.StatusOK) + })) + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + if err != nil { + t.Fatal(err) + } + v2 := false + conn.(*connection).isV2 = &v2 + if _, err := conn.ImageTags("foo", "bar"); !IsRepositoryNotFound(err) { + t.Error(err) + } + <-called + <-called +} + +func TestV2Check(t *testing.T) { + called := make(chan struct{}, 2) + var uri *url.URL + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called <- struct{}{} + if strings.HasSuffix(r.URL.Path, "/v2/") { + w.Header().Set("Docker-Distribution-API-Version", "registry/2.0") + w.WriteHeader(http.StatusOK) + return + } + if strings.HasSuffix(r.URL.Path, "/tags/list") { + w.WriteHeader(http.StatusOK) + fmt.Fprintln(w, `{"tags":["tag1","image1"]}`) + return + } + t.Fatalf("unexpected request: %s %s", r.Method, r.URL.RequestURI()) + })) + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + if err != nil { + t.Fatal(err) + } + tags, err := conn.ImageTags("foo", "bar") + if err != nil { + t.Fatal(err) + } + if tags["tag1"] != "tag1" { + t.Errorf("unexpected tags: %#v", tags) + } + if tags["image1"] != "image1" { + t.Errorf("unexpected tags: %#v", tags) + } + + <-called + <-called +} + +func TestV2CheckNoDistributionHeader(t *testing.T) { + called := make(chan struct{}, 3) + var uri *url.URL + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called <- struct{}{} + if strings.HasSuffix(r.URL.Path, "/v2/") { + w.Header().Set("Docker-Distribution-API-Version", "") + w.WriteHeader(http.StatusOK) + return + } + w.Header().Set("X-Docker-Endpoints", uri.Host) + + // Images + if strings.HasSuffix(r.URL.Path, "/images") { + return + } + + // ImageTags + if strings.HasSuffix(r.URL.Path, "/tags") { + fmt.Fprintln(w, `{"tag1":"image1"}`) + return + } + + // get tag->image id + if strings.HasSuffix(r.URL.Path, "latest") { + fmt.Fprintln(w, `"image1"`) + return + } + + // get image json + if strings.HasSuffix(r.URL.Path, "json") { + fmt.Fprintln(w, `{"id":"image1"}`) + return + } + t.Fatalf("unexpected request: %s %s", r.Method, r.URL.RequestURI()) + })) + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + if err != nil { + t.Fatal(err) + } + tags, err := conn.ImageTags("foo", "bar") + if err != nil { + t.Fatal(err) + } + if tags["tag1"] != "image1" { + t.Errorf("unexpected tags: %#v", tags) + } + + <-called + <-called + <-called +} + +func TestInsecureHTTPS(t *testing.T) { + called := make(chan struct{}, 2) + var uri *url.URL + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called <- struct{}{} + if strings.HasSuffix(r.URL.Path, "/tags") { + w.WriteHeader(http.StatusNotFound) + return + } + w.Header().Set("X-Docker-Endpoints", uri.Host) + w.WriteHeader(http.StatusOK) + })) + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + if err != nil { + t.Fatal(err) + } + v2 := false + conn.(*connection).isV2 = &v2 + if _, err := conn.ImageTags("foo", "bar"); !IsRepositoryNotFound(err) { + t.Error(err) + } + <-called + <-called +} + +func TestProxy(t *testing.T) { + called := make(chan struct{}, 2) + var uri *url.URL + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called <- struct{}{} + if strings.HasSuffix(r.URL.Path, "/tags") { + w.WriteHeader(http.StatusNotFound) + return + } + w.Header().Set("X-Docker-Endpoints", uri.Host) + w.WriteHeader(http.StatusOK) + })) + os.Setenv("HTTP_PROXY", "http.proxy.tld") + os.Setenv("HTTPS_PROXY", "secure.proxy.tld") + os.Setenv("NO_PROXY", "") + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + if err != nil { + t.Fatal(err) + } + v2 := false + conn.(*connection).isV2 = &v2 + if _, err := conn.ImageTags("foo", "bar"); !IsRepositoryNotFound(err) { + t.Error(err) + } + <-called + <-called +} + +func TestTokenExpiration(t *testing.T) { + var uri *url.URL + lastToken := "" + tokenIndex := 0 + validToken := "" + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Docker-Token") == "true" { + tokenIndex++ + lastToken = fmt.Sprintf("token%d", tokenIndex) + validToken = lastToken + w.Header().Set("X-Docker-Token", lastToken) + w.Header().Set("X-Docker-Endpoints", uri.Host) + return + } + + auth := r.Header.Get("Authorization") + parts := strings.Split(auth, " ") + token := parts[1] + if token != validToken { + w.WriteHeader(http.StatusUnauthorized) + return + } + + w.WriteHeader(http.StatusOK) + + // ImageTags + if strings.HasSuffix(r.URL.Path, "/tags") { + fmt.Fprintln(w, `{"tag1":"image1"}`) + } + + // get tag->image id + if strings.HasSuffix(r.URL.Path, "latest") { + fmt.Fprintln(w, `"image1"`) + } + + // get image json + if strings.HasSuffix(r.URL.Path, "json") { + fmt.Fprintln(w, `{"id":"image1"}`) + } + })) + + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + if err != nil { + t.Fatal(err) + } + v2 := false + conn.(*connection).isV2 = &v2 + if _, err := conn.ImageTags("foo", "bar"); err != nil { + t.Fatal(err) + } + + // expire token, should get an error + validToken = "" + if _, err := conn.ImageTags("foo", "bar"); err == nil { + t.Fatal("expected error") + } + // retry, should get a new token + if _, err := conn.ImageTags("foo", "bar"); err != nil { + t.Fatal(err) + } + + // expire token, should get an error + validToken = "" + if _, err := conn.ImageByTag("foo", "bar", "latest"); err == nil { + t.Fatal("expected error") + } + // retry, should get a new token + if _, err := conn.ImageByTag("foo", "bar", "latest"); err != nil { + t.Fatal(err) + } + + // expire token, should get an error + validToken = "" + if _, err := conn.ImageByID("foo", "bar", "image1"); err == nil { + t.Fatal("expected error") + } + // retry, should get a new token + if _, err := conn.ImageByID("foo", "bar", "image1"); err != nil { + t.Fatal(err) + } +} + +func TestGetTagFallback(t *testing.T) { + var uri *url.URL + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Docker-Endpoints", uri.Host) + + // get all tags + if strings.HasSuffix(r.URL.Path, "/tags") { + fmt.Fprintln(w, `{"tag1":"image1", "test":"image2"}`) + w.WriteHeader(http.StatusOK) + return + } + if strings.HasSuffix(r.URL.Path, "/json") { + fmt.Fprintln(w, `{"ID":"image2"}`) + w.WriteHeader(http.StatusOK) + return + } + w.WriteHeader(http.StatusNotFound) + })) + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + c := conn.(*connection) + if err != nil { + t.Fatal(err) + } + repo := &v1repository{ + name: "testrepo", + endpoint: *uri, + } + // Case when tag is found + img, _, err := repo.getTaggedImage(c, "test", "") + if err != nil { + t.Errorf("unexpected error getting tag: %v", err) + return + } + if img.Image.ID != "image2" { + t.Errorf("unexpected image for tag: %v", img) + } + // Case when tag is not found + img, _, err = repo.getTaggedImage(c, "test2", "") + if err == nil { + t.Errorf("expected error") + } +} + +func TestImageManifest(t *testing.T) { + manifestDigest := "sha256:958608f8ecc1dc62c93b6c610f3a834dae4220c9642e6e8b4e0f2b3ad7cbd238" + + called := make(chan struct{}, 2) + var uri *url.URL + server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called <- struct{}{} + t.Logf("got %s %s", r.Method, r.URL.Path) + switch r.URL.Path { + case "/v2/": + w.Header().Set("Docker-Distribution-API-Version", "registry/2.0") + w.Write([]byte(`{}`)) + case "/v2/test/image/manifests/latest", "/v2/test/image/manifests/" + manifestDigest: + if r.Method == "HEAD" { + w.Header().Set("Content-Length", fmt.Sprintf("%d", len(SampleImageManifestSchema1))) + w.Header().Set("Docker-Content-Digest", manifestDigest) + w.WriteHeader(http.StatusOK) + return + } + w.Write([]byte(SampleImageManifestSchema1)) + default: + t.Fatalf("unexpected request: %s %s", r.Method, r.URL.RequestURI()) + return + } + })) + uri, _ = url.Parse(server.URL) + conn, err := NewClient(10*time.Second, true).Connect(uri.Host, true) + if err != nil { + t.Fatal(err) + } + _, manifest, err := conn.ImageManifest("test", "image", "latest") + if err != nil { + t.Fatal(err) + } + if len(manifest) == 0 { + t.Errorf("empty manifest") + } + + if string(manifest) != SampleImageManifestSchema1 { + t.Errorf("unexpected manifest: %#v", manifest) + } + + <-called + <-called +} + +const SampleImageManifestSchema1 = `{ + "schemaVersion": 1, + "name": "nm/is", + "tag": "latest", + "architecture": "", + "fsLayers": [ + { + "blobSum": "sha256:b2c5513bd934a7efb412c0dd965600b8cb00575b585eaff1cb980b69037fe6cd" + }, + { + "blobSum": "sha256:2dde6f11a89463bf20dba3b47d8b3b6de7cdcc19e50634e95a18dd95c278768d" + } + ], + "history": [ + { + "v1Compatibility": "{\"size\":18407936}" + }, + { + "v1Compatibility": "{\"size\":19387392}" + } + ], + "signatures": [ + { + "header": { + "jwk": { + "crv": "P-256", + "kid": "5HTY:A24B:L6PG:TQ3G:GMAK:QGKZ:ICD4:S7ZJ:P5JX:UTMP:XZLK:ZXVH", + "kty": "EC", + "x": "j5YnDSyrVIt3NquUKvcZIpbfeD8HLZ7BVBFL4WutRBM", + "y": "PBgFAZ3nNakYN3H9enhrdUrQ_HPYzb8oX5rtJxJo1Y8" + }, + "alg": "ES256" + }, + "signature": "1rXiEmWnf9eL7m7Wy3K4l25-Zv2XXl5GgqhM_yjT0ujPmTn0uwfHcCWlweHa9gput3sECj507eQyGpBOF5rD6Q", + "protected": "eyJmb3JtYXRMZW5ndGgiOjQ4NSwiZm9ybWF0VGFpbCI6IkNuMCIsInRpbWUiOiIyMDE2LTA3LTI2VDExOjQ2OjQ2WiJ9" + } + ] +}` diff --git a/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/conversion.go b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/conversion.go new file mode 100644 index 000000000..c5161d4fe --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/conversion.go @@ -0,0 +1,26 @@ +package dockerv1client + +import "github.com/openshift/api/image/docker10" + +// Convert_DockerV1CompatibilityImage_to_DockerImageConfig takes a Docker registry digest +// (schema 2.1) and converts it to the external API version of Image. +func Convert_DockerV1CompatibilityImage_to_DockerImageConfig(in *DockerV1CompatibilityImage, out *DockerImageConfig) error { + *out = DockerImageConfig{ + ID: in.ID, + Parent: in.Parent, + Comment: in.Comment, + Created: in.Created, + Container: in.Container, + DockerVersion: in.DockerVersion, + Author: in.Author, + Architecture: in.Architecture, + Size: in.Size, + OS: "linux", + ContainerConfig: in.ContainerConfig, + } + if in.Config != nil { + out.Config = &docker10.DockerConfig{} + *out.Config = *in.Config + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/types.go b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/types.go new file mode 100644 index 000000000..3b85b81e0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/dockerv1client/types.go @@ -0,0 +1,113 @@ +package dockerv1client + +import ( + "time" + + "github.com/openshift/api/image/docker10" +) + +// TODO: Move these to openshift/api + +// DockerImageManifest represents the Docker v2 image format. +type DockerImageManifest struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType,omitempty"` + + // schema1 + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []DockerFSLayer `json:"fsLayers"` + History []DockerHistory `json:"history"` + + // schema2 + Layers []Descriptor `json:"layers"` + Config Descriptor `json:"config"` +} + +// DockerFSLayer is a container struct for BlobSums defined in an image manifest +type DockerFSLayer struct { + // DockerBlobSum is the tarsum of the referenced filesystem image layer + // TODO make this digest.Digest once docker/distribution is in Godeps + DockerBlobSum string `json:"blobSum"` +} + +// DockerHistory stores unstructured v1 compatibility information +type DockerHistory struct { + // DockerV1Compatibility is the raw v1 compatibility information + DockerV1Compatibility string `json:"v1Compatibility"` +} + +// DockerV1CompatibilityImage represents the structured v1 +// compatibility information. +type DockerV1CompatibilityImage struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig docker10.DockerConfig `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *docker10.DockerConfig `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` +} + +// DockerV1CompatibilityImageSize represents the structured v1 +// compatibility information for size +type DockerV1CompatibilityImageSize struct { + Size int64 `json:"size,omitempty"` +} + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against against this digest. + Digest string `json:"digest,omitempty"` +} + +// DockerImageConfig stores the image configuration +type DockerImageConfig struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig docker10.DockerConfig `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *docker10.DockerConfig `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` + RootFS *DockerConfigRootFS `json:"rootfs,omitempty"` + History []DockerConfigHistory `json:"history,omitempty"` + OS string `json:"os,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// DockerConfigHistory stores build commands that were used to create an image +type DockerConfigHistory struct { + Created time.Time `json:"created"` + Author string `json:"author,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// DockerConfigRootFS describes images root filesystem +type DockerConfigRootFS struct { + Type string `json:"type"` + DiffIDs []string `json:"diff_ids,omitempty"` +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers.go b/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers.go new file mode 100644 index 000000000..d35c052f3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers.go @@ -0,0 +1,379 @@ +package imageutil + +import ( + "encoding/json" + "fmt" + "regexp" + "sort" + "strings" + + "github.com/blang/semver" + + "github.com/openshift/api/image/docker10" + imagev1 "github.com/openshift/api/image/v1" + digestinternal "github.com/openshift/library-go/pkg/image/internal/digest" + imagereference "github.com/openshift/library-go/pkg/image/reference" +) + +const ( + // DefaultImageTag is used when an image tag is needed and the configuration does not specify a tag to use. + DefaultImageTag = "latest" +) + +var ParseDigest = digestinternal.ParseDigest + +// SplitImageStreamTag turns the name of an ImageStreamTag into Name and Tag. +// It returns false if the tag was not properly specified in the name. +func SplitImageStreamTag(nameAndTag string) (name string, tag string, ok bool) { + parts := strings.SplitN(nameAndTag, ":", 2) + name = parts[0] + if len(parts) > 1 { + tag = parts[1] + } + if len(tag) == 0 { + tag = DefaultImageTag + } + return name, tag, len(parts) == 2 +} + +// SplitImageStreamImage turns the name of an ImageStreamImage into Name and ID. +// It returns false if the ID was not properly specified in the name. +func SplitImageStreamImage(nameAndID string) (name string, id string, ok bool) { + parts := strings.SplitN(nameAndID, "@", 2) + name = parts[0] + if len(parts) > 1 { + id = parts[1] + } + return name, id, len(parts) == 2 +} + +// JoinImageStreamTag turns a name and tag into the name of an ImageStreamTag +func JoinImageStreamTag(name, tag string) string { + if len(tag) == 0 { + tag = DefaultImageTag + } + return fmt.Sprintf("%s:%s", name, tag) +} + +// JoinImageStreamImage creates a name for image stream image object from an image stream name and an id. +func JoinImageStreamImage(name, id string) string { + return fmt.Sprintf("%s@%s", name, id) +} + +// ParseImageStreamTagName splits a string into its name component and tag component, and returns an error +// if the string is not in the right form. +func ParseImageStreamTagName(istag string) (name string, tag string, err error) { + if strings.Contains(istag, "@") { + err = fmt.Errorf("%q is an image stream image, not an image stream tag", istag) + return + } + segments := strings.SplitN(istag, ":", 3) + switch len(segments) { + case 2: + name = segments[0] + tag = segments[1] + if len(name) == 0 || len(tag) == 0 { + err = fmt.Errorf("image stream tag name %q must have a name and a tag", istag) + } + default: + err = fmt.Errorf("expected exactly one : delimiter in the istag %q", istag) + } + return +} + +// ParseImageStreamImageName splits a string into its name component and ID component, and returns an error +// if the string is not in the right form. +func ParseImageStreamImageName(input string) (name string, id string, err error) { + segments := strings.SplitN(input, "@", 3) + switch len(segments) { + case 2: + name = segments[0] + id = segments[1] + if len(name) == 0 || len(id) == 0 { + err = fmt.Errorf("image stream image name %q must have a name and ID", input) + } + default: + err = fmt.Errorf("expected exactly one @ in the isimage name %q", input) + } + return +} + +var ( + reMinorSemantic = regexp.MustCompile(`^[\d]+\.[\d]+$`) + reMinorWithPatch = regexp.MustCompile(`^([\d]+\.[\d]+)-\w+$`) +) + +type tagPriority int + +const ( + // the "latest" tag + tagPriorityLatest tagPriority = iota + + // a semantic minor version ("5.1", "v5.1", "v5.1-rc1") + tagPriorityMinor + + // a full semantic version ("5.1.3-other", "v5.1.3-other") + tagPriorityFull + + // other tags + tagPriorityOther +) + +type prioritizedTag struct { + tag string + priority tagPriority + semver semver.Version + prefix string +} + +func prioritizeTag(tag string) prioritizedTag { + if tag == "latest" { + return prioritizedTag{ + tag: tag, + priority: tagPriorityLatest, + } + } + + short := tag + prefix := "" + if strings.HasPrefix(tag, "v") { + prefix = "v" + short = tag[1:] + } + + // 5.1.3 + if v, err := semver.Parse(short); err == nil { + return prioritizedTag{ + tag: tag, + priority: tagPriorityFull, + semver: v, + prefix: prefix, + } + } + + // 5.1 + if reMinorSemantic.MatchString(short) { + if v, err := semver.Parse(short + ".0"); err == nil { + return prioritizedTag{ + tag: tag, + priority: tagPriorityMinor, + semver: v, + prefix: prefix, + } + } + } + + // 5.1-rc1 + if match := reMinorWithPatch.FindStringSubmatch(short); match != nil { + if v, err := semver.Parse(strings.Replace(short, match[1], match[1]+".0", 1)); err == nil { + return prioritizedTag{ + tag: tag, + priority: tagPriorityMinor, + semver: v, + prefix: prefix, + } + } + } + + // other + return prioritizedTag{ + tag: tag, + priority: tagPriorityOther, + prefix: prefix, + } +} + +type prioritizedTags []prioritizedTag + +func (t prioritizedTags) Len() int { return len(t) } +func (t prioritizedTags) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t prioritizedTags) Less(i, j int) bool { + if t[i].priority != t[j].priority { + return t[i].priority < t[j].priority + } + + if t[i].priority == tagPriorityOther { + return t[i].tag < t[j].tag + } + + cmp := t[i].semver.Compare(t[j].semver) + if cmp > 0 { // the newer tag has a higher priority + return true + } + return cmp == 0 && t[i].prefix < t[j].prefix +} + +// PrioritizeTags orders a set of image tags with a few conventions: +// +// 1. the "latest" tag, if present, should be first +// 2. any tags that represent a semantic minor version ("5.1", "v5.1", "v5.1-rc1") should be next, in descending order +// 3. any tags that represent a full semantic version ("5.1.3-other", "v5.1.3-other") should be next, in descending order +// 4. any remaining tags should be sorted in lexicographic order +// +// The method updates the tags in place. +func PrioritizeTags(tags []string) { + ptags := make(prioritizedTags, len(tags)) + for i, tag := range tags { + ptags[i] = prioritizeTag(tag) + } + sort.Sort(ptags) + for i, pt := range ptags { + tags[i] = pt.tag + } +} + +// StatusHasTag returns named tag from image stream's status and boolean whether one was found. +func StatusHasTag(stream *imagev1.ImageStream, name string) (imagev1.NamedTagEventList, bool) { + for _, tag := range stream.Status.Tags { + if tag.Tag == name { + return tag, true + } + } + return imagev1.NamedTagEventList{}, false +} + +// LatestTaggedImage returns the most recent TagEvent for the specified image +// repository and tag. Will resolve lookups for the empty tag. Returns nil +// if tag isn't present in stream.status.tags. +func LatestTaggedImage(stream *imagev1.ImageStream, tag string) *imagev1.TagEvent { + if len(tag) == 0 { + tag = imagev1.DefaultImageTag + } + + // find the most recent tag event with an image reference + t, ok := StatusHasTag(stream, tag) + if ok { + if len(t.Items) == 0 { + return nil + } + return &t.Items[0] + } + + return nil +} + +// ImageWithMetadata mutates the given image. It parses raw DockerImageManifest data stored in the image and +// fills its DockerImageMetadata and other fields. +// Copied from github.com/openshift/image-registry/pkg/origin-common/util/util.go +func ImageWithMetadata(image *imagev1.Image) error { + // Check if the metadata are already filled in for this image. + meta, hasMetadata := image.DockerImageMetadata.Object.(*docker10.DockerImage) + if hasMetadata && meta.Size > 0 { + return nil + } + + version := image.DockerImageMetadataVersion + if len(version) == 0 { + version = "1.0" + } + + obj := &docker10.DockerImage{} + if len(image.DockerImageMetadata.Raw) != 0 { + if err := json.Unmarshal(image.DockerImageMetadata.Raw, obj); err != nil { + return err + } + image.DockerImageMetadata.Object = obj + } + + image.DockerImageMetadataVersion = version + + return nil +} + +func ImageWithMetadataOrDie(image *imagev1.Image) { + if err := ImageWithMetadata(image); err != nil { + panic(err) + } +} + +// ResolveLatestTaggedImage returns the appropriate pull spec for a given tag in +// the image stream, handling the tag's reference policy if necessary to return +// a resolved image. Callers that transform an ImageStreamTag into a pull spec +// should use this method instead of LatestTaggedImage. +func ResolveLatestTaggedImage(stream *imagev1.ImageStream, tag string) (string, bool) { + if len(tag) == 0 { + tag = imagev1.DefaultImageTag + } + return resolveTagReference(stream, tag, LatestTaggedImage(stream, tag)) +} + +// ResolveTagReference applies the tag reference rules for a stream, tag, and tag event for +// that tag. It returns true if the tag is +func resolveTagReference(stream *imagev1.ImageStream, tag string, latest *imagev1.TagEvent) (string, bool) { + if latest == nil { + return "", false + } + return resolveReferenceForTagEvent(stream, tag, latest), true +} + +// SpecHasTag returns named tag from image stream's spec and boolean whether one was found. +func SpecHasTag(stream *imagev1.ImageStream, name string) (imagev1.TagReference, bool) { + for _, tag := range stream.Spec.Tags { + if tag.Name == name { + return tag, true + } + } + return imagev1.TagReference{}, false +} + +// ResolveReferenceForTagEvent applies the tag reference rules for a stream, tag, and tag event for +// that tag. +func resolveReferenceForTagEvent(stream *imagev1.ImageStream, tag string, latest *imagev1.TagEvent) string { + // retrieve spec policy - if not found, we use the latest spec + ref, ok := SpecHasTag(stream, tag) + if !ok { + return latest.DockerImageReference + } + + switch ref.ReferencePolicy.Type { + // the local reference policy attempts to use image pull through on the integrated + // registry if possible + case imagev1.LocalTagReferencePolicy: + local := stream.Status.DockerImageRepository + if len(local) == 0 || len(latest.Image) == 0 { + // fallback to the originating reference if no local docker registry defined or we + // lack an image ID + return latest.DockerImageReference + } + + // we must use imageapi's helper since we're calling Exact later on, which produces string + ref, err := imagereference.Parse(local) + if err != nil { + // fallback to the originating reference if the reported local repository spec is not valid + return latest.DockerImageReference + } + + // create a local pullthrough URL + ref.Tag = "" + ref.ID = latest.Image + return ref.Exact() + + // the default policy is to use the originating image + default: + return latest.DockerImageReference + } +} + +// DigestOrImageMatch matches the digest in the image name. +func DigestOrImageMatch(image, imageID string) bool { + if d, err := ParseDigest(image); err == nil { + return strings.HasPrefix(d.Hex(), imageID) || strings.HasPrefix(image, imageID) + } + return strings.HasPrefix(image, imageID) +} + +// ParseDockerImageReference parses a Docker pull spec string into a +// DockerImageReference. +func ParseDockerImageReference(spec string) (imagev1.DockerImageReference, error) { + ref, err := imagereference.Parse(spec) + if err != nil { + return imagev1.DockerImageReference{}, err + } + return imagev1.DockerImageReference{ + Registry: ref.Registry, + Namespace: ref.Namespace, + Name: ref.Name, + Tag: ref.Tag, + ID: ref.ID, + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers_test.go b/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers_test.go new file mode 100644 index 000000000..40ae2a060 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/imageutil/helpers_test.go @@ -0,0 +1,155 @@ +package imageutil + +import ( + "reflect" + "testing" +) + +func TestJoinImageStreamTag(t *testing.T) { + if e, a := "foo:bar", JoinImageStreamTag("foo", "bar"); e != a { + t.Errorf("Unexpected value: %s", a) + } + if e, a := "foo:"+DefaultImageTag, JoinImageStreamTag("foo", ""); e != a { + t.Errorf("Unexpected value: %s", a) + } +} + +func TestParseImageStreamTagName(t *testing.T) { + tests := map[string]struct { + id string + expectedName string + expectedTag string + expectError bool + }{ + "empty id": { + id: "", + expectError: true, + }, + "missing semicolon": { + id: "hello", + expectError: true, + }, + "too many semicolons": { + id: "a:b:c", + expectError: true, + }, + "empty name": { + id: ":tag", + expectError: true, + }, + "empty tag": { + id: "name", + expectError: true, + }, + "happy path": { + id: "name:tag", + expectError: false, + expectedName: "name", + expectedTag: "tag", + }, + } + + for description, testCase := range tests { + name, tag, err := ParseImageStreamTagName(testCase.id) + gotError := err != nil + if e, a := testCase.expectError, gotError; e != a { + t.Fatalf("%s: expected err: %t, got: %t: %s", description, e, a, err) + } + if err != nil { + continue + } + if e, a := testCase.expectedName, name; e != a { + t.Errorf("%s: name: expected %q, got %q", description, e, a) + } + if e, a := testCase.expectedTag, tag; e != a { + t.Errorf("%s: tag: expected %q, got %q", description, e, a) + } + } +} + +func TestParseImageStreamImageName(t *testing.T) { + tests := map[string]struct { + input string + expectedRepo string + expectedId string + expectError bool + }{ + "empty string": { + input: "", + expectError: true, + }, + "one part": { + input: "a", + expectError: true, + }, + "more than 2 parts": { + input: "a@b@c", + expectError: true, + }, + "empty name part": { + input: "@id", + expectError: true, + }, + "empty id part": { + input: "name@", + expectError: true, + }, + "valid input": { + input: "repo@id", + expectedRepo: "repo", + expectedId: "id", + expectError: false, + }, + } + + for name, test := range tests { + repo, id, err := ParseImageStreamImageName(test.input) + didError := err != nil + if e, a := test.expectError, didError; e != a { + t.Errorf("%s: expected error=%t, got=%t: %s", name, e, a, err) + continue + } + if test.expectError { + continue + } + if e, a := test.expectedRepo, repo; e != a { + t.Errorf("%s: repo: expected %q, got %q", name, e, a) + continue + } + if e, a := test.expectedId, id; e != a { + t.Errorf("%s: id: expected %q, got %q", name, e, a) + continue + } + } +} +func TestPrioritizeTags(t *testing.T) { + tests := []struct { + tags []string + expected []string + }{ + { + tags: []string{"other", "latest", "v5.5", "5.2.3", "5.5", "v5.3.6-bother", "5.3.6-abba", "5.6"}, + expected: []string{"latest", "5.6", "5.5", "v5.5", "v5.3.6-bother", "5.3.6-abba", "5.2.3", "other"}, + }, + { + tags: []string{"1.1-beta1", "1.2-rc1", "1.1-rc1", "1.1-beta2", "1.2-beta1", "1.2-alpha1", "1.2-beta4", "latest"}, + expected: []string{"latest", "1.2-rc1", "1.2-beta4", "1.2-beta1", "1.2-alpha1", "1.1-rc1", "1.1-beta2", "1.1-beta1"}, + }, + { + tags: []string{"7.1", "v7.1", "7.1.0"}, + expected: []string{"7.1", "v7.1", "7.1.0"}, + }, + { + tags: []string{"7.1.0", "v7.1", "7.1"}, + expected: []string{"7.1", "v7.1", "7.1.0"}, + }, + } + + for _, tc := range tests { + t.Log("sorting", tc.tags) + PrioritizeTags(tc.tags) + if !reflect.DeepEqual(tc.tags, tc.expected) { + t.Errorf("got %v, want %v", tc.tags, tc.expected) + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/referencemutator/builds.go b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/builds.go new file mode 100644 index 000000000..663aa9609 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/builds.go @@ -0,0 +1,120 @@ +package referencemutator + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/validation/field" + + buildv1 "github.com/openshift/api/build/v1" +) + +type buildSpecMutator struct { + spec *buildv1.CommonSpec + oldSpec *buildv1.CommonSpec + path *field.Path + output bool +} + +// NewBuildMutator returns an ImageReferenceMutator that includes the output field. +func NewBuildMutator(build *buildv1.Build) ImageReferenceMutator { + return &buildSpecMutator{ + spec: &build.Spec.CommonSpec, + path: field.NewPath("spec"), + output: true, + } +} + +func hasIdenticalImageSourceObjectReference(spec *buildv1.CommonSpec, ref corev1.ObjectReference) bool { + if spec == nil { + return false + } + for i := range spec.Source.Images { + if spec.Source.Images[i].From == ref { + return true + } + } + return false +} + +func hasIdenticalStrategyFrom(spec, oldSpec *buildv1.CommonSpec) bool { + if oldSpec == nil { + return false + } + switch { + case spec.Strategy.CustomStrategy != nil: + if oldSpec.Strategy.CustomStrategy != nil { + return spec.Strategy.CustomStrategy.From == oldSpec.Strategy.CustomStrategy.From + } + case spec.Strategy.DockerStrategy != nil: + if oldSpec.Strategy.DockerStrategy != nil { + return hasIdenticalObjectReference(spec.Strategy.DockerStrategy.From, oldSpec.Strategy.DockerStrategy.From) + } + case spec.Strategy.SourceStrategy != nil: + if oldSpec.Strategy.SourceStrategy != nil { + return spec.Strategy.SourceStrategy.From == oldSpec.Strategy.SourceStrategy.From + } + } + return false +} + +func hasIdenticalObjectReference(ref, oldRef *corev1.ObjectReference) bool { + if ref == nil || oldRef == nil { + return false + } + return *ref == *oldRef +} + +func (m *buildSpecMutator) Mutate(fn ImageReferenceMutateFunc) field.ErrorList { + var errs field.ErrorList + for i := range m.spec.Source.Images { + if hasIdenticalImageSourceObjectReference(m.oldSpec, m.spec.Source.Images[i].From) { + continue + } + if err := fn(&m.spec.Source.Images[i].From); err != nil { + errs = append(errs, fieldErrorOrInternal(err, m.path.Child("source", "images").Index(i).Child("from", "name"))) + continue + } + } + if !hasIdenticalStrategyFrom(m.spec, m.oldSpec) { + if s := m.spec.Strategy.CustomStrategy; s != nil { + if err := fn(&s.From); err != nil { + errs = append(errs, fieldErrorOrInternal(err, m.path.Child("strategy", "customStrategy", "from", "name"))) + } + } + if s := m.spec.Strategy.DockerStrategy; s != nil { + if s.From != nil { + if err := fn(s.From); err != nil { + errs = append(errs, fieldErrorOrInternal(err, m.path.Child("strategy", "dockerStrategy", "from", "name"))) + } + } + } + if s := m.spec.Strategy.SourceStrategy; s != nil { + if err := fn(&s.From); err != nil { + errs = append(errs, fieldErrorOrInternal(err, m.path.Child("strategy", "sourceStrategy", "from", "name"))) + } + } + } + if m.output { + if s := m.spec.Output.To; s != nil { + if m.oldSpec == nil || m.oldSpec.Output.To == nil || !hasIdenticalObjectReference(s, m.oldSpec.Output.To) { + if err := fn(s); err != nil { + errs = append(errs, fieldErrorOrInternal(err, m.path.Child("output", "to"))) + } + } + } + } + return errs +} + +func fieldErrorOrInternal(err error, path *field.Path) *field.Error { + if ferr, ok := err.(*field.Error); ok { + if len(ferr.Field) == 0 { + ferr.Field = path.String() + } + return ferr + } + if errors.IsNotFound(err) { + return field.NotFound(path, err) + } + return field.InternalError(path, err) +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/referencemutator/builds_test.go b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/builds_test.go new file mode 100644 index 000000000..a40828652 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/builds_test.go @@ -0,0 +1,289 @@ +package referencemutator + +import ( + "reflect" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + + buildv1 "github.com/openshift/api/build/v1" +) + +func imageRef(name string) *corev1.ObjectReference { + ref := imageRefValue(name) + return &ref +} +func imageRefValue(name string) corev1.ObjectReference { + return corev1.ObjectReference{Kind: "DockerImage", Name: name} +} + +func Test_buildSpecMutator_Mutate(t *testing.T) { + type fields struct { + spec *buildv1.CommonSpec + oldSpec *buildv1.CommonSpec + path *field.Path + output bool + } + type args struct { + fn ImageReferenceMutateFunc + } + tests := []struct { + name string + fields fields + args args + want field.ErrorList + wantSpec *buildv1.CommonSpec + }{ + { + name: "no-op", + fields: fields{spec: &buildv1.CommonSpec{}}, + }, + { + name: "passes reference", + fields: fields{spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{From: imageRef("test")}, + }, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + if !reflect.DeepEqual(ref, imageRef("test")) { + t.Errorf("unexpected ref: %#v", ref) + } + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{From: imageRef("test")}, + }, + }, + }, + { + name: "mutates docker reference", + fields: fields{spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{From: imageRef("test")}, + }, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + ref.Name = "test-2" + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{From: imageRef("test-2")}, + }, + }, + }, + { + name: "mutates source reference", + fields: fields{spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{From: imageRefValue("test")}, + }, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + ref.Name = "test-2" + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{From: imageRefValue("test-2")}, + }, + }, + }, + { + name: "mutates custom reference", + fields: fields{spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + CustomStrategy: &buildv1.CustomBuildStrategy{From: imageRefValue("test")}, + }, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + ref.Name = "test-2" + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + CustomStrategy: &buildv1.CustomBuildStrategy{From: imageRefValue("test-2")}, + }, + }, + }, + { + name: "mutates image source references", + fields: fields{spec: &buildv1.CommonSpec{ + Source: buildv1.BuildSource{Images: []buildv1.ImageSource{ + {From: imageRefValue("test-1")}, + {From: imageRefValue("test-2")}, + {From: imageRefValue("test-3")}, + }}, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + if ref.Name == "test-2" { + ref.Name = "test-4" + } + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Source: buildv1.BuildSource{Images: []buildv1.ImageSource{ + {From: imageRefValue("test-1")}, + {From: imageRefValue("test-4")}, + {From: imageRefValue("test-3")}, + }}, + }, + }, + { + name: "mutates only changed references", + fields: fields{ + spec: &buildv1.CommonSpec{ + Source: buildv1.BuildSource{Images: []buildv1.ImageSource{ + {From: imageRefValue("test-1")}, + {From: imageRefValue("test-2")}, + {From: imageRefValue("test-3")}, + }}, + }, + oldSpec: &buildv1.CommonSpec{ + Source: buildv1.BuildSource{Images: []buildv1.ImageSource{ + {From: imageRefValue("test-1")}, + {From: imageRefValue("test-3")}, + }}, + }, + }, + args: args{fn: func(ref *corev1.ObjectReference) error { + if ref.Name != "test-2" { + t.Errorf("did not expect to be called for existing reference") + } + ref.Name = "test-4" + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Source: buildv1.BuildSource{Images: []buildv1.ImageSource{ + {From: imageRefValue("test-1")}, + {From: imageRefValue("test-4")}, + {From: imageRefValue("test-3")}, + }}, + }, + }, + { + name: "skips when docker reference unchanged", + fields: fields{ + spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{From: imageRef("test")}, + }, + }, + oldSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{From: imageRef("test")}, + }, + }, + }, + args: args{fn: func(ref *corev1.ObjectReference) error { + t.Errorf("should not have called mutator") + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + DockerStrategy: &buildv1.DockerBuildStrategy{From: imageRef("test")}, + }, + }, + }, + { + name: "skips when custom reference unchanged", + fields: fields{ + spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + CustomStrategy: &buildv1.CustomBuildStrategy{From: imageRefValue("test")}, + }, + }, + oldSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + CustomStrategy: &buildv1.CustomBuildStrategy{From: imageRefValue("test")}, + }, + }, + }, + args: args{fn: func(ref *corev1.ObjectReference) error { + t.Errorf("should not have called mutator") + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + CustomStrategy: &buildv1.CustomBuildStrategy{From: imageRefValue("test")}, + }, + }, + }, + { + name: "skips when source reference unchanged", + fields: fields{ + spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{From: imageRefValue("test")}, + }, + }, + oldSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{From: imageRefValue("test")}, + }, + }, + }, + args: args{fn: func(ref *corev1.ObjectReference) error { + t.Errorf("should not have called mutator") + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{From: imageRefValue("test")}, + }, + }, + }, + { + name: "skips when source reference unchanged", + fields: fields{ + spec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{ + From: imageRefValue("test"), + }, + }, + }, + oldSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{ + From: imageRefValue("test"), + }, + }, + }, + }, + args: args{fn: func(ref *corev1.ObjectReference) error { + t.Errorf("should not have called mutator") + return nil + }}, + wantSpec: &buildv1.CommonSpec{ + Strategy: buildv1.BuildStrategy{ + SourceStrategy: &buildv1.SourceBuildStrategy{ + From: imageRefValue("test"), + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &buildSpecMutator{ + spec: tt.fields.spec, + oldSpec: tt.fields.oldSpec, + path: tt.fields.path, + output: tt.fields.output, + } + if tt.wantSpec == nil { + tt.wantSpec = &buildv1.CommonSpec{} + } + if got := m.Mutate(tt.args.fn); !reflect.DeepEqual(got, tt.want) { + t.Errorf("buildSpecMutator.Mutate() = %v, want %v", got, tt.want) + } + if !reflect.DeepEqual(tt.wantSpec, tt.fields.spec) { + t.Errorf("buildSpecMutator.Mutate() spec = %#v, want %#v", tt.fields.spec, tt.wantSpec) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/referencemutator/meta.go b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/meta.go new file mode 100644 index 000000000..effde5165 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/meta.go @@ -0,0 +1,109 @@ +package referencemutator + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + + buildv1 "github.com/openshift/api/build/v1" +) + +// ImageReferenceMutateFunc is passed a reference representing an image, and may alter +// the Name, Kind, and Namespace fields of the reference. If an error is returned the +// object may still be mutated under the covers. +type ImageReferenceMutateFunc func(ref *corev1.ObjectReference) error + +type ImageReferenceMutator interface { + // Mutate invokes fn on every image reference in the object. If fn returns an error, + // a field.Error is added to the list to be returned. Mutate does not terminate early + // if errors are detected. + Mutate(fn ImageReferenceMutateFunc) field.ErrorList +} + +var errNoImageMutator = fmt.Errorf("no list of images available for this object") + +// GetImageReferenceMutator returns a mutator for the provided object, or an error if no +// such mutator is defined. Only references that are different between obj and old will +// be returned unless old is nil. +func GetImageReferenceMutator(obj, old runtime.Object) (ImageReferenceMutator, error) { + switch t := obj.(type) { + case *buildv1.Build: + if oldT, ok := old.(*buildv1.Build); ok && oldT != nil { + return &buildSpecMutator{spec: &t.Spec.CommonSpec, oldSpec: &oldT.Spec.CommonSpec, path: field.NewPath("spec")}, nil + } + return &buildSpecMutator{spec: &t.Spec.CommonSpec, path: field.NewPath("spec")}, nil + case *buildv1.BuildConfig: + if oldT, ok := old.(*buildv1.BuildConfig); ok && oldT != nil { + return &buildSpecMutator{spec: &t.Spec.CommonSpec, oldSpec: &oldT.Spec.CommonSpec, path: field.NewPath("spec")}, nil + } + return &buildSpecMutator{spec: &t.Spec.CommonSpec, path: field.NewPath("spec")}, nil + default: + if spec, path, err := GetPodSpecV1(obj); err == nil { + if old == nil { + return &podSpecV1Mutator{spec: spec, path: path}, nil + } + oldSpec, _, err := GetPodSpecV1(old) + if err != nil { + return nil, fmt.Errorf("old and new pod spec objects were not of the same type %T != %T: %v", obj, old, err) + } + return &podSpecV1Mutator{spec: spec, oldSpec: oldSpec, path: path}, nil + } + return nil, errNoImageMutator + } +} + +type AnnotationAccessor interface { + // Annotations returns a map representing annotations. Not mutable. + Annotations() map[string]string + // SetAnnotations sets representing annotations onto the object. + SetAnnotations(map[string]string) + // TemplateAnnotations returns a map representing annotations on a nested template in the object. Not mutable. + // If no template is present bool will be false. + TemplateAnnotations() (map[string]string, bool) + // SetTemplateAnnotations sets annotations on a nested template in the object. + // If no template is present bool will be false. + SetTemplateAnnotations(map[string]string) bool +} + +type annotationsAccessor struct { + object metav1.Object + template metav1.Object +} + +func (a annotationsAccessor) Annotations() map[string]string { + return a.object.GetAnnotations() +} + +func (a annotationsAccessor) TemplateAnnotations() (map[string]string, bool) { + if a.template == nil { + return nil, false + } + return a.template.GetAnnotations(), true +} + +func (a annotationsAccessor) SetAnnotations(annotations map[string]string) { + a.object.SetAnnotations(annotations) +} + +func (a annotationsAccessor) SetTemplateAnnotations(annotations map[string]string) bool { + if a.template == nil { + return false + } + a.template.SetAnnotations(annotations) + return true +} + +// GetAnnotationAccessor returns an accessor for the provided object or false if the object +// does not support accessing annotations. +func GetAnnotationAccessor(obj runtime.Object) (AnnotationAccessor, bool) { + switch t := obj.(type) { + case metav1.Object: + templateObject, _ := GetTemplateMetaObject(obj) + return annotationsAccessor{object: t, template: templateObject}, true + default: + return nil, false + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/referencemutator/pods.go b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/pods.go new file mode 100644 index 000000000..28c29378d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/pods.go @@ -0,0 +1,302 @@ +package referencemutator + +import ( + "fmt" + + kappsv1 "k8s.io/api/apps/v1" + kappsv1beta1 "k8s.io/api/apps/v1beta1" + kappsv1beta2 "k8s.io/api/apps/v1beta2" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + batchv2alpha1 "k8s.io/api/batch/v2alpha1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + + appsv1 "github.com/openshift/api/apps/v1" + securityv1 "github.com/openshift/api/security/v1" +) + +type ContainerMutator interface { + GetName() string + GetImage() string + SetImage(image string) +} + +type PodSpecReferenceMutator interface { + GetContainerByIndex(init bool, i int) (ContainerMutator, bool) + GetContainerByName(name string) (ContainerMutator, bool) + Path() *field.Path +} + +// GetPodSpecReferenceMutator returns a mutator for the provided object, or an error if no +// such mutator is defined. +func GetPodSpecReferenceMutator(obj runtime.Object) (PodSpecReferenceMutator, error) { + if spec, path, err := GetPodSpecV1(obj); err == nil { + return &podSpecV1Mutator{spec: spec, path: path}, nil + } + return nil, errNoImageMutator +} + +var errNoPodSpec = fmt.Errorf("No PodSpec available for this object") + +// GetPodSpecV1 returns a mutable pod spec out of the provided object, including a field path +// to the field in the object, or an error if the object does not contain a pod spec. +// This only returns pod specs for v1 compatible objects. +func GetPodSpecV1(obj runtime.Object) (*corev1.PodSpec, *field.Path, error) { + switch r := obj.(type) { + + case *corev1.Pod: + return &r.Spec, field.NewPath("spec"), nil + + case *corev1.PodTemplate: + return &r.Template.Spec, field.NewPath("template", "spec"), nil + + case *corev1.ReplicationController: + if r.Spec.Template != nil { + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + } + + case *extensionsv1beta1.DaemonSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1.DaemonSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta2.DaemonSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *extensionsv1beta1.Deployment: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1.Deployment: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta1.Deployment: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta2.Deployment: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *extensionsv1beta1.ReplicaSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1.ReplicaSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta2.ReplicaSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *batchv1.Job: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *batchv2alpha1.CronJob: + return &r.Spec.JobTemplate.Spec.Template.Spec, field.NewPath("spec", "jobTemplate", "spec", "template", "spec"), nil + case *batchv1beta1.CronJob: + return &r.Spec.JobTemplate.Spec.Template.Spec, field.NewPath("spec", "jobTemplate", "spec", "template", "spec"), nil + + case *batchv2alpha1.JobTemplate: + return &r.Template.Spec.Template.Spec, field.NewPath("template", "spec", "template", "spec"), nil + case *batchv1beta1.JobTemplate: + return &r.Template.Spec.Template.Spec, field.NewPath("template", "spec", "template", "spec"), nil + + case *kappsv1.StatefulSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta1.StatefulSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + case *kappsv1beta2.StatefulSet: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *securityv1.PodSecurityPolicySubjectReview: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *securityv1.PodSecurityPolicySelfSubjectReview: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *securityv1.PodSecurityPolicyReview: + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + + case *appsv1.DeploymentConfig: + if r.Spec.Template != nil { + return &r.Spec.Template.Spec, field.NewPath("spec", "template", "spec"), nil + } + } + return nil, nil, errNoPodSpec +} + +// GetTemplateMetaObject returns a mutable metav1.Object interface for the template +// the object contains, or false if no such object is available. +func GetTemplateMetaObject(obj runtime.Object) (metav1.Object, bool) { + switch r := obj.(type) { + + case *corev1.PodTemplate: + return &r.Template.ObjectMeta, true + + case *corev1.ReplicationController: + if r.Spec.Template != nil { + return &r.Spec.Template.ObjectMeta, true + } + + case *extensionsv1beta1.DaemonSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1.DaemonSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta2.DaemonSet: + return &r.Spec.Template.ObjectMeta, true + + case *extensionsv1beta1.Deployment: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1.Deployment: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta1.Deployment: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta2.Deployment: + return &r.Spec.Template.ObjectMeta, true + + case *extensionsv1beta1.ReplicaSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1.ReplicaSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta2.ReplicaSet: + return &r.Spec.Template.ObjectMeta, true + + case *batchv1.Job: + return &r.Spec.Template.ObjectMeta, true + + case *batchv2alpha1.CronJob: + return &r.Spec.JobTemplate.Spec.Template.ObjectMeta, true + case *batchv1beta1.CronJob: + return &r.Spec.JobTemplate.Spec.Template.ObjectMeta, true + + case *batchv2alpha1.JobTemplate: + return &r.Template.Spec.Template.ObjectMeta, true + case *batchv1beta1.JobTemplate: + return &r.Template.Spec.Template.ObjectMeta, true + + case *kappsv1.StatefulSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta1.StatefulSet: + return &r.Spec.Template.ObjectMeta, true + case *kappsv1beta2.StatefulSet: + return &r.Spec.Template.ObjectMeta, true + + case *securityv1.PodSecurityPolicySubjectReview: + return &r.Spec.Template.ObjectMeta, true + + case *securityv1.PodSecurityPolicySelfSubjectReview: + return &r.Spec.Template.ObjectMeta, true + + case *securityv1.PodSecurityPolicyReview: + return &r.Spec.Template.ObjectMeta, true + + case *appsv1.DeploymentConfig: + if r.Spec.Template != nil { + return &r.Spec.Template.ObjectMeta, true + } + } + return nil, false +} + +type containerV1Mutator struct { + *corev1.Container +} + +func (m containerV1Mutator) GetName() string { return m.Name } +func (m containerV1Mutator) GetImage() string { return m.Image } +func (m containerV1Mutator) SetImage(image string) { m.Image = image } + +// podSpecV1Mutator implements the mutation interface over objects with a pod spec. +type podSpecV1Mutator struct { + spec *corev1.PodSpec + oldSpec *corev1.PodSpec + path *field.Path +} + +func (m *podSpecV1Mutator) Path() *field.Path { + return m.path +} + +func hasIdenticalPodSpecV1Image(spec *corev1.PodSpec, containerName, image string) bool { + if spec == nil { + return false + } + for i := range spec.InitContainers { + if spec.InitContainers[i].Name == containerName { + return spec.InitContainers[i].Image == image + } + } + for i := range spec.Containers { + if spec.Containers[i].Name == containerName { + return spec.Containers[i].Image == image + } + } + return false +} + +// Mutate applies fn to all containers and init containers. If fn changes the Kind to +// any value other than "DockerImage", an error is set on that field. +func (m *podSpecV1Mutator) Mutate(fn ImageReferenceMutateFunc) field.ErrorList { + var errs field.ErrorList + for i := range m.spec.InitContainers { + container := &m.spec.InitContainers[i] + if hasIdenticalPodSpecV1Image(m.oldSpec, container.Name, container.Image) { + continue + } + ref := corev1.ObjectReference{Kind: "DockerImage", Name: container.Image} + if err := fn(&ref); err != nil { + errs = append(errs, fieldErrorOrInternal(err, m.path.Child("initContainers").Index(i).Child("image"))) + continue + } + if ref.Kind != "DockerImage" { + errs = append(errs, fieldErrorOrInternal(fmt.Errorf("pod specs may only contain references to docker images, not %q", ref.Kind), m.path.Child("initContainers").Index(i).Child("image"))) + continue + } + container.Image = ref.Name + } + for i := range m.spec.Containers { + container := &m.spec.Containers[i] + if hasIdenticalPodSpecV1Image(m.oldSpec, container.Name, container.Image) { + continue + } + ref := corev1.ObjectReference{Kind: "DockerImage", Name: container.Image} + if err := fn(&ref); err != nil { + errs = append(errs, fieldErrorOrInternal(err, m.path.Child("containers").Index(i).Child("image"))) + continue + } + if ref.Kind != "DockerImage" { + errs = append(errs, fieldErrorOrInternal(fmt.Errorf("pod specs may only contain references to docker images, not %q", ref.Kind), m.path.Child("containers").Index(i).Child("image"))) + continue + } + container.Image = ref.Name + } + return errs +} + +func (m *podSpecV1Mutator) GetContainerByName(name string) (ContainerMutator, bool) { + spec := m.spec + for i := range spec.InitContainers { + if name != spec.InitContainers[i].Name { + continue + } + return containerV1Mutator{&spec.InitContainers[i]}, true + } + for i := range spec.Containers { + if name != spec.Containers[i].Name { + continue + } + return containerV1Mutator{&spec.Containers[i]}, true + } + return nil, false +} + +func (m *podSpecV1Mutator) GetContainerByIndex(init bool, i int) (ContainerMutator, bool) { + var container *corev1.Container + spec := m.spec + if init { + if i < 0 || i >= len(spec.InitContainers) { + return nil, false + } + container = &spec.InitContainers[i] + } else { + if i < 0 || i >= len(spec.Containers) { + return nil, false + } + container = &spec.Containers[i] + } + return containerV1Mutator{container}, true +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/referencemutator/pods_test.go b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/pods_test.go new file mode 100644 index 000000000..545b1fa8d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/referencemutator/pods_test.go @@ -0,0 +1,150 @@ +package referencemutator + +import ( + "reflect" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func Test_podSpecV1Mutator_Mutate(t *testing.T) { + type fields struct { + spec *corev1.PodSpec + oldSpec *corev1.PodSpec + path *field.Path + } + type args struct { + fn ImageReferenceMutateFunc + } + tests := []struct { + name string + fields fields + args args + want field.ErrorList + wantSpec *corev1.PodSpec + }{ + { + name: "no-op", + fields: fields{spec: &corev1.PodSpec{}}, + }, + { + name: "passes init container reference", + fields: fields{spec: &corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "1", Image: "test"}, + }, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + if !reflect.DeepEqual(ref, imageRef("test")) { + t.Errorf("unexpected ref: %#v", ref) + } + return nil + }}, + wantSpec: &corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "1", Image: "test"}, + }, + }, + }, + { + name: "passes container reference", + fields: fields{spec: &corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "1", Image: "test"}, + }, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + if !reflect.DeepEqual(ref, imageRef("test")) { + t.Errorf("unexpected ref: %#v", ref) + } + return nil + }}, + wantSpec: &corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "1", Image: "test"}, + }, + }, + }, + + { + name: "mutates reference", + fields: fields{spec: &corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "1", Image: "test"}, + }, + Containers: []corev1.Container{ + {Name: "2", Image: "test-2"}, + }, + }}, + args: args{fn: func(ref *corev1.ObjectReference) error { + if ref.Name == "test-2" { + ref.Name = "test-3" + } + return nil + }}, + wantSpec: &corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "1", Image: "test"}, + }, + Containers: []corev1.Container{ + {Name: "2", Image: "test-3"}, + }, + }, + }, + { + name: "mutates only changed references", + fields: fields{ + spec: &corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "1", Image: "test"}, + }, + Containers: []corev1.Container{ + {Name: "2", Image: "test-2"}, + }, + }, + oldSpec: &corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "1", Image: "test-1"}, + }, + Containers: []corev1.Container{ + {Name: "2", Image: "test-2"}, + }, + }, + }, + args: args{fn: func(ref *corev1.ObjectReference) error { + if ref.Name != "test" { + t.Errorf("did not expect to be called for existing reference") + } + ref.Name = "test-3" + return nil + }}, + wantSpec: &corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "1", Image: "test-3"}, + }, + Containers: []corev1.Container{ + {Name: "2", Image: "test-2"}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &podSpecV1Mutator{ + spec: tt.fields.spec, + oldSpec: tt.fields.oldSpec, + path: tt.fields.path, + } + if tt.wantSpec == nil { + tt.wantSpec = &corev1.PodSpec{} + } + if got := m.Mutate(tt.args.fn); !reflect.DeepEqual(got, tt.want) { + t.Errorf("buildSpecMutator.Mutate() = %v, want %v", got, tt.want) + } + if !reflect.DeepEqual(tt.wantSpec, tt.fields.spec) { + t.Errorf("buildSpecMutator.Mutate() spec = %v, want %v", tt.fields.spec, tt.wantSpec) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go new file mode 100644 index 000000000..0596a0dcf --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go @@ -0,0 +1,683 @@ +package registryclient + +import ( + "fmt" + "hash" + "io" + "net" + "net/http" + "net/url" + "path" + "sort" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/time/rate" + + "k8s.io/klog" + + "github.com/docker/distribution" + "github.com/docker/distribution/manifest/schema1" + "github.com/docker/distribution/reference" + registryclient "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/opencontainers/go-digest" +) + +// RepositoryRetriever fetches a Docker distribution.Repository. +type RepositoryRetriever interface { + // Repository returns a properly authenticated distribution.Repository for the given registry, repository + // name, and insecure toleration behavior. + Repository(ctx context.Context, registry *url.URL, repoName string, insecure bool) (distribution.Repository, error) +} + +// ErrNotV2Registry is returned when the server does not report itself as a V2 Docker registry +type ErrNotV2Registry struct { + Registry string +} + +func (e *ErrNotV2Registry) Error() string { + return fmt.Sprintf("endpoint %q does not support v2 API", e.Registry) +} + +type AuthHandlersFunc func(transport http.RoundTripper, registry *url.URL, repoName string) []auth.AuthenticationHandler + +// NewContext is capable of creating RepositoryRetrievers. +func NewContext(transport, insecureTransport http.RoundTripper) *Context { + return &Context{ + Transport: transport, + InsecureTransport: insecureTransport, + Challenges: challenge.NewSimpleManager(), + Actions: []string{"pull"}, + Retries: 2, + Credentials: NoCredentials, + + pings: make(map[url.URL]error), + redirect: make(map[url.URL]*url.URL), + } +} + +type transportCache struct { + rt http.RoundTripper + scopes map[string]struct{} + transport http.RoundTripper +} + +type Context struct { + Transport http.RoundTripper + InsecureTransport http.RoundTripper + Challenges challenge.Manager + Scopes []auth.Scope + Actions []string + Retries int + Credentials auth.CredentialStore + Limiter *rate.Limiter + + DisableDigestVerification bool + + lock sync.Mutex + pings map[url.URL]error + redirect map[url.URL]*url.URL + cachedTransports []transportCache +} + +func (c *Context) Copy() *Context { + c.lock.Lock() + defer c.lock.Unlock() + copied := &Context{ + Transport: c.Transport, + InsecureTransport: c.InsecureTransport, + Challenges: c.Challenges, + Scopes: c.Scopes, + Actions: c.Actions, + Retries: c.Retries, + Credentials: c.Credentials, + Limiter: c.Limiter, + + DisableDigestVerification: c.DisableDigestVerification, + + pings: make(map[url.URL]error), + redirect: make(map[url.URL]*url.URL), + } + for k, v := range c.redirect { + copied.redirect[k] = v + } + return copied +} + +func (c *Context) WithRateLimiter(limiter *rate.Limiter) *Context { + c.Limiter = limiter + return c +} + +func (c *Context) WithScopes(scopes ...auth.Scope) *Context { + c.Scopes = scopes + return c +} + +func (c *Context) WithActions(actions ...string) *Context { + c.Actions = actions + return c +} + +func (c *Context) WithCredentials(credentials auth.CredentialStore) *Context { + c.Credentials = credentials + return c +} + +// Reset clears any cached repository info for this context. +func (c *Context) Reset() { + c.lock.Lock() + defer c.lock.Unlock() + + c.pings = nil + c.redirect = nil +} + +func (c *Context) cachedPing(src url.URL) (*url.URL, error) { + c.lock.Lock() + defer c.lock.Unlock() + + err, ok := c.pings[src] + if !ok { + return nil, nil + } + if err != nil { + return nil, err + } + if redirect, ok := c.redirect[src]; ok { + src = *redirect + } + return &src, nil +} + +// Ping contacts a registry and returns the transport and URL of the registry or an error. +func (c *Context) Ping(ctx context.Context, registry *url.URL, insecure bool) (http.RoundTripper, *url.URL, error) { + t := c.Transport + if insecure && c.InsecureTransport != nil { + t = c.InsecureTransport + } + src := *registry + if len(src.Scheme) == 0 { + src.Scheme = "https" + } + + // reused cached pings + url, err := c.cachedPing(src) + if err != nil { + return nil, nil, err + } + if url != nil { + return t, url, nil + } + + // follow redirects + redirect, err := c.ping(src, insecure, t) + + c.lock.Lock() + defer c.lock.Unlock() + c.pings[src] = err + if err != nil { + return nil, nil, err + } + if redirect != nil { + c.redirect[src] = redirect + src = *redirect + } + return t, &src, nil +} + +func (c *Context) Repository(ctx context.Context, registry *url.URL, repoName string, insecure bool) (distribution.Repository, error) { + named, err := reference.WithName(repoName) + if err != nil { + return nil, err + } + + rt, src, err := c.Ping(ctx, registry, insecure) + if err != nil { + return nil, err + } + + rt = c.repositoryTransport(rt, src, repoName) + + repo, err := registryclient.NewRepository(named, src.String(), rt) + if err != nil { + return nil, err + } + if !c.DisableDigestVerification { + repo = repositoryVerifier{Repository: repo} + } + limiter := c.Limiter + if limiter == nil { + limiter = rate.NewLimiter(rate.Limit(5), 5) + } + return NewLimitedRetryRepository(repo, c.Retries, limiter), nil +} + +func (c *Context) ping(registry url.URL, insecure bool, transport http.RoundTripper) (*url.URL, error) { + pingClient := &http.Client{ + Transport: transport, + Timeout: 15 * time.Second, + } + target := registry + target.Path = path.Join(target.Path, "v2") + "/" + req, err := http.NewRequest("GET", target.String(), nil) + if err != nil { + return nil, err + } + resp, err := pingClient.Do(req) + if err != nil { + if insecure && registry.Scheme == "https" { + klog.V(5).Infof("Falling back to an HTTP check for an insecure registry %s: %v", registry.String(), err) + registry.Scheme = "http" + _, nErr := c.ping(registry, true, transport) + if nErr != nil { + return nil, nErr + } + return ®istry, nil + } + return nil, err + } + defer resp.Body.Close() + + versions := auth.APIVersions(resp, "Docker-Distribution-API-Version") + if len(versions) == 0 { + klog.V(5).Infof("Registry responded to v2 Docker endpoint, but has no header for Docker Distribution %s: %d, %#v", req.URL, resp.StatusCode, resp.Header) + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 300: + // v2 + case resp.StatusCode == http.StatusUnauthorized, resp.StatusCode == http.StatusForbidden: + // v2 + default: + return nil, &ErrNotV2Registry{Registry: registry.String()} + } + } + + c.Challenges.AddResponse(resp) + + return nil, nil +} + +func hasAll(a, b map[string]struct{}) bool { + for key := range b { + if _, ok := a[key]; !ok { + return false + } + } + return true +} + +type stringScope string + +func (s stringScope) String() string { return string(s) } + +// cachedTransport reuses an underlying transport for the given round tripper based +// on the set of passed scopes. It will always return a transport that has at least the +// provided scope list. +func (c *Context) cachedTransport(rt http.RoundTripper, scopes []auth.Scope) http.RoundTripper { + scopeNames := make(map[string]struct{}) + for _, scope := range scopes { + scopeNames[scope.String()] = struct{}{} + } + + c.lock.Lock() + defer c.lock.Unlock() + for _, c := range c.cachedTransports { + if c.rt == rt && hasAll(c.scopes, scopeNames) { + return c.transport + } + } + + // avoid taking a dependency on kube sets.String for minimal dependencies + names := make([]string, 0, len(scopeNames)) + for s := range scopeNames { + names = append(names, s) + } + sort.Strings(names) + scopes = make([]auth.Scope, 0, len(scopeNames)) + for _, s := range names { + scopes = append(scopes, stringScope(s)) + } + + t := transport.NewTransport( + rt, + // TODO: slightly smarter authorizer that retries unauthenticated requests + // TODO: make multiple attempts if the first credential fails + auth.NewAuthorizer( + c.Challenges, + auth.NewTokenHandlerWithOptions(auth.TokenHandlerOptions{ + Transport: rt, + Credentials: c.Credentials, + Scopes: scopes, + }), + auth.NewBasicHandler(c.Credentials), + ), + ) + c.cachedTransports = append(c.cachedTransports, transportCache{ + rt: rt, + scopes: scopeNames, + transport: t, + }) + return t +} + +func (c *Context) scopes(repoName string) []auth.Scope { + scopes := make([]auth.Scope, 0, 1+len(c.Scopes)) + scopes = append(scopes, c.Scopes...) + if len(c.Actions) == 0 { + scopes = append(scopes, auth.RepositoryScope{Repository: repoName, Actions: []string{"pull"}}) + } else { + scopes = append(scopes, auth.RepositoryScope{Repository: repoName, Actions: c.Actions}) + } + return scopes +} + +func (c *Context) repositoryTransport(t http.RoundTripper, registry *url.URL, repoName string) http.RoundTripper { + return c.cachedTransport(t, c.scopes(repoName)) +} + +var nowFn = time.Now + +type retryRepository struct { + distribution.Repository + + limiter *rate.Limiter + retries int + sleepFn func(time.Duration) +} + +// NewLimitedRetryRepository wraps a distribution.Repository with helpers that will retry temporary failures +// over a limited time window and duration, and also obeys a rate limit. +func NewLimitedRetryRepository(repo distribution.Repository, retries int, limiter *rate.Limiter) distribution.Repository { + return &retryRepository{ + Repository: repo, + + limiter: limiter, + retries: retries, + sleepFn: time.Sleep, + } +} + +// isTemporaryHTTPError returns true if the error indicates a temporary or partial HTTP failure +func isTemporaryHTTPError(err error) (time.Duration, bool) { + if err == nil { + return 0, false + } + switch t := err.(type) { + case net.Error: + return time.Second, t.Temporary() || t.Timeout() + case *registryclient.UnexpectedHTTPResponseError: + if t.StatusCode == http.StatusTooManyRequests { + return 2 * time.Second, true + } + } + return 0, false +} + +// shouldRetry returns true if the error was temporary and count is less than retries. +func (c *retryRepository) shouldRetry(count int, err error) bool { + if err == nil { + return false + } + retryAfter, ok := isTemporaryHTTPError(err) + if !ok { + return false + } + if count >= c.retries { + return false + } + c.sleepFn(retryAfter) + klog.V(4).Infof("Retrying request to Docker registry after encountering error (%d attempts remaining): %v", count, err) + return true +} + +// Manifests wraps the manifest service in a retryManifest for shared retries. +func (c *retryRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + s, err := c.Repository.Manifests(ctx, options...) + if err != nil { + return nil, err + } + return retryManifest{ManifestService: s, repo: c}, nil +} + +// Blobs wraps the blob service in a retryBlobStore for shared retries. +func (c *retryRepository) Blobs(ctx context.Context) distribution.BlobStore { + return retryBlobStore{BlobStore: c.Repository.Blobs(ctx), repo: c} +} + +// Tags lists the tags under the named repository. +func (c *retryRepository) Tags(ctx context.Context) distribution.TagService { + return &retryTags{TagService: c.Repository.Tags(ctx), repo: c} +} + +// retryManifest wraps the manifest service and invokes retries on the repo. +type retryManifest struct { + distribution.ManifestService + repo *retryRepository +} + +// Exists returns true if the manifest exists. +func (c retryManifest) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return false, err + } + exists, err := c.ManifestService.Exists(ctx, dgst) + if c.repo.shouldRetry(i, err) { + continue + } + return exists, err + } +} + +// Get retrieves the manifest identified by the digest, if it exists. +func (c retryManifest) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return nil, err + } + m, err := c.ManifestService.Get(ctx, dgst, options...) + if c.repo.shouldRetry(i, err) { + continue + } + return m, err + } +} + +// retryBlobStore wraps the blob store and invokes retries on the repo. +type retryBlobStore struct { + distribution.BlobStore + repo *retryRepository +} + +func (c retryBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return distribution.Descriptor{}, err + } + d, err := c.BlobStore.Stat(ctx, dgst) + if c.repo.shouldRetry(i, err) { + continue + } + return d, err + } +} + +func (c retryBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return err + } + err := c.BlobStore.ServeBlob(ctx, w, req, dgst) + if c.repo.shouldRetry(i, err) { + continue + } + return err + } +} + +func (c retryBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return nil, err + } + rsc, err := c.BlobStore.Open(ctx, dgst) + if c.repo.shouldRetry(i, err) { + continue + } + return rsc, err + } +} + +type retryTags struct { + distribution.TagService + repo *retryRepository +} + +func (c *retryTags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return distribution.Descriptor{}, err + } + t, err := c.TagService.Get(ctx, tag) + if c.repo.shouldRetry(i, err) { + continue + } + return t, err + } +} + +func (c *retryTags) All(ctx context.Context) ([]string, error) { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return nil, err + } + t, err := c.TagService.All(ctx) + if c.repo.shouldRetry(i, err) { + continue + } + return t, err + } +} + +func (c *retryTags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + for i := 0; ; i++ { + if err := c.repo.limiter.Wait(ctx); err != nil { + return nil, err + } + t, err := c.TagService.Lookup(ctx, digest) + if c.repo.shouldRetry(i, err) { + continue + } + return t, err + } +} + +// repositoryVerifier ensures that manifests are verified when they are retrieved via digest +type repositoryVerifier struct { + distribution.Repository +} + +// Manifests returns a ManifestService that checks whether manifests match their digest. +func (r repositoryVerifier) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + ms, err := r.Repository.Manifests(ctx, options...) + if err != nil { + return nil, err + } + return manifestServiceVerifier{ManifestService: ms}, nil +} + +// Blobs returns a BlobStore that checks whether blob content returned from the server matches the expected digest. +func (r repositoryVerifier) Blobs(ctx context.Context) distribution.BlobStore { + return blobStoreVerifier{BlobStore: r.Repository.Blobs(ctx)} +} + +// manifestServiceVerifier wraps the manifest service and ensures that content retrieved by digest matches that digest. +type manifestServiceVerifier struct { + distribution.ManifestService +} + +// Get retrieves the manifest identified by the digest and guarantees it matches the content it is retrieved by. +func (m manifestServiceVerifier) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + manifest, err := m.ManifestService.Get(ctx, dgst, options...) + if err != nil { + return nil, err + } + if len(dgst) > 0 { + if err := VerifyManifestIntegrity(manifest, dgst); err != nil { + return nil, err + } + } + return manifest, nil +} + +// VerifyManifestIntegrity checks the provided manifest against the specified digest and returns an error +// if the manifest does not match that digest. +func VerifyManifestIntegrity(manifest distribution.Manifest, dgst digest.Digest) error { + contentDigest, err := ContentDigestForManifest(manifest, dgst.Algorithm()) + if err != nil { + return err + } + if contentDigest != dgst { + if klog.V(4) { + _, payload, _ := manifest.Payload() + klog.Infof("Mismatched content: %s\n%s", contentDigest, string(payload)) + } + return fmt.Errorf("content integrity error: the manifest retrieved with digest %s does not match the digest calculated from the content %s", dgst, contentDigest) + } + return nil +} + +// ContentDigestForManifest returns the digest in the provided algorithm of the supplied manifest's contents. +func ContentDigestForManifest(manifest distribution.Manifest, algo digest.Algorithm) (digest.Digest, error) { + switch t := manifest.(type) { + case *schema1.SignedManifest: + // schema1 manifest digests are calculated from the payload + if len(t.Canonical) == 0 { + return "", fmt.Errorf("the schema1 manifest does not have a canonical representation") + } + return algo.FromBytes(t.Canonical), nil + default: + _, payload, err := manifest.Payload() + if err != nil { + return "", err + } + return algo.FromBytes(payload), nil + } +} + +// blobStoreVerifier wraps the blobs service and ensures that content retrieved by digest matches that digest. +type blobStoreVerifier struct { + distribution.BlobStore +} + +// Get retrieves the blob identified by the digest and guarantees it matches the content it is retrieved by. +func (b blobStoreVerifier) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + data, err := b.BlobStore.Get(ctx, dgst) + if err != nil { + return nil, err + } + if len(dgst) > 0 { + dataDgst := dgst.Algorithm().FromBytes(data) + if dataDgst != dgst { + return nil, fmt.Errorf("content integrity error: the blob retrieved with digest %s does not match the digest calculated from the content %s", dgst, dataDgst) + } + } + return data, nil +} + +// Open streams the blob identified by the digest and guarantees it matches the content it is retrieved by. +func (b blobStoreVerifier) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + rsc, err := b.BlobStore.Open(ctx, dgst) + if err != nil { + return nil, err + } + if len(dgst) > 0 { + return &readSeekCloserVerifier{ + rsc: rsc, + hash: dgst.Algorithm().Hash(), + expect: dgst, + }, nil + } + return rsc, nil +} + +// readSeekCloserVerifier performs validation over the stream returned by a distribution.ReadSeekCloser returned +// by blobService.Open. +type readSeekCloserVerifier struct { + rsc distribution.ReadSeekCloser + hash hash.Hash + expect digest.Digest +} + +// Read verifies the bytes in the underlying stream match the expected digest or returns an error. +func (r *readSeekCloserVerifier) Read(p []byte) (n int, err error) { + n, err = r.rsc.Read(p) + if r.hash != nil { + if n > 0 { + r.hash.Write(p[:n]) + } + if err == io.EOF { + actual := digest.NewDigest(r.expect.Algorithm(), r.hash) + if actual != r.expect { + return n, fmt.Errorf("content integrity error: the blob streamed from digest %s does not match the digest calculated from the content %s", r.expect, actual) + } + } + } + return n, err +} + +// Seek moves the underlying stream and also cancels any streaming hash. Verification is not possible +// with a seek. +func (r *readSeekCloserVerifier) Seek(offset int64, whence int) (int64, error) { + r.hash = nil + return r.rsc.Seek(offset, whence) +} + +// Close closes the underlying stream. +func (r *readSeekCloserVerifier) Close() error { + return r.rsc.Close() +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/client_test.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client_test.go new file mode 100644 index 000000000..9fb65fb7a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client_test.go @@ -0,0 +1,652 @@ +package registryclient + +import ( + "bytes" + "encoding/hex" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "strings" + "testing" + "time" + + "golang.org/x/time/rate" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/opencontainers/go-digest" + "golang.org/x/net/context" +) + +type mockRetriever struct { + repo distribution.Repository + insecure bool + err error +} + +func (r *mockRetriever) Repository(ctx context.Context, registry *url.URL, repoName string, insecure bool) (distribution.Repository, error) { + r.insecure = insecure + return r.repo, r.err +} + +type mockRepository struct { + repoErr, getErr, getByTagErr, getTagErr, tagErr, untagErr, allTagErr, err error + + blobs *mockBlobStore + + manifest distribution.Manifest + tags map[string]string +} + +func (r *mockRepository) Name() string { return "test" } +func (r *mockRepository) Named() reference.Named { + named, _ := reference.WithName("test") + return named +} + +func (r *mockRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + return r, r.repoErr +} +func (r *mockRepository) Blobs(ctx context.Context) distribution.BlobStore { return r.blobs } +func (r *mockRepository) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + return false, r.getErr +} +func (r *mockRepository) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + for _, option := range options { + if _, ok := option.(distribution.WithTagOption); ok { + return r.manifest, r.getByTagErr + } + } + return r.manifest, r.getErr +} +func (r *mockRepository) Delete(ctx context.Context, dgst digest.Digest) error { + return fmt.Errorf("not implemented") +} +func (r *mockRepository) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + return "", fmt.Errorf("not implemented") +} +func (r *mockRepository) Tags(ctx context.Context) distribution.TagService { + return &mockTagService{repo: r} +} + +type mockBlobStore struct { + distribution.BlobStore + + blobs map[digest.Digest][]byte + + statErr, serveErr, openErr error +} + +func (r *mockBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return distribution.Descriptor{}, r.statErr +} + +func (r *mockBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, req *http.Request, dgst digest.Digest) error { + return r.serveErr +} + +func (r *mockBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + return nil, r.openErr +} + +func (r *mockBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + b, exists := r.blobs[dgst] + if !exists { + return nil, distribution.ErrBlobUnknown + } + return b, nil +} + +type mockTagService struct { + distribution.TagService + + repo *mockRepository +} + +func (r *mockTagService) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + v, ok := r.repo.tags[tag] + if !ok { + return distribution.Descriptor{}, r.repo.getTagErr + } + dgst, err := digest.Parse(v) + if err != nil { + panic(err) + } + return distribution.Descriptor{Digest: dgst}, r.repo.getTagErr +} + +func (r *mockTagService) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + r.repo.tags[tag] = desc.Digest.String() + return r.repo.tagErr +} + +func (r *mockTagService) Untag(ctx context.Context, tag string) error { + if _, ok := r.repo.tags[tag]; ok { + delete(r.repo.tags, tag) + } + return r.repo.untagErr +} + +func (r *mockTagService) All(ctx context.Context) (res []string, err error) { + err = r.repo.allTagErr + for tag := range r.repo.tags { + res = append(res, tag) + } + return +} + +func (r *mockTagService) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + return nil, fmt.Errorf("not implemented") +} + +func TestPing(t *testing.T) { + retriever := NewContext(http.DefaultTransport, http.DefaultTransport).WithCredentials(NoCredentials) + + fn404 := func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) } + var fn http.HandlerFunc + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + if fn != nil { + fn(w, r) + } + }) + server := httptest.NewServer(mux) + defer server.Close() + + uri, _ := url.Parse(server.URL) + + testCases := []struct { + name string + uri url.URL + expectV2 bool + fn http.HandlerFunc + }{ + {name: "http only", uri: url.URL{Scheme: "http", Host: uri.Host}, expectV2: false, fn: fn404}, + {name: "https only", uri: url.URL{Scheme: "https", Host: uri.Host}, expectV2: false, fn: fn404}, + { + name: "403", + uri: url.URL{Scheme: "https", Host: uri.Host}, + expectV2: true, + fn: func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/v2/" { + w.WriteHeader(403) + return + } + }, + }, + { + name: "401", + uri: url.URL{Scheme: "https", Host: uri.Host}, + expectV2: true, + fn: func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/v2/" { + w.WriteHeader(401) + return + } + }, + }, + { + name: "200", + uri: url.URL{Scheme: "https", Host: uri.Host}, + expectV2: true, + fn: func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/v2/" { + w.WriteHeader(200) + return + } + }, + }, + { + name: "has header but 500", + uri: url.URL{Scheme: "https", Host: uri.Host}, + expectV2: true, + fn: func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/v2/" { + w.Header().Set("Docker-Distribution-API-Version", "registry/2.0") + w.WriteHeader(500) + return + } + }, + }, + { + name: "no header, 500", + uri: url.URL{Scheme: "https", Host: uri.Host}, + expectV2: false, + fn: func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/v2/" { + w.WriteHeader(500) + return + } + }, + }, + } + + for _, test := range testCases { + fn = test.fn + _, err := retriever.ping(test.uri, true, retriever.InsecureTransport) + if (err != nil && strings.Contains(err.Error(), "does not support v2 API")) == test.expectV2 { + t.Errorf("%s: Expected ErrNotV2Registry, got %v", test.name, err) + } + } +} + +var unlimited = rate.NewLimiter(rate.Inf, 100) + +type temporaryError struct{} + +func (temporaryError) Error() string { return "temporary" } +func (temporaryError) Timeout() bool { return false } +func (temporaryError) Temporary() bool { return true } + +func TestShouldRetry(t *testing.T) { + r := NewLimitedRetryRepository(nil, 1, unlimited).(*retryRepository) + sleeps := 0 + r.sleepFn = func(time.Duration) { sleeps++ } + + // nil error doesn't consume retries + if r.shouldRetry(0, nil) { + t.Fatal(r) + } + + // normal error doesn't consume retries + if r.shouldRetry(0, fmt.Errorf("error")) { + t.Fatal(r) + } + + // docker error doesn't consume retries + if r.shouldRetry(0, errcode.ErrorCodeDenied) { + t.Fatal(r) + } + if sleeps != 0 { + t.Fatal(sleeps) + } + + now := time.Unix(1, 0) + nowFn = func() time.Time { + return now + } + // should retry a temporary error + r = NewLimitedRetryRepository(nil, 1, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = func(time.Duration) { sleeps++ } + if !r.shouldRetry(0, temporaryError{}) { + t.Fatal(r) + } + if r.shouldRetry(1, temporaryError{}) { + t.Fatal(r) + } + if sleeps != 1 { + t.Fatal(sleeps) + } +} + +func TestRetryFailure(t *testing.T) { + sleeps := 0 + sleepFn := func(time.Duration) { sleeps++ } + + ctx := context.Background() + // do not retry on Manifests() + repo := &mockRepository{repoErr: fmt.Errorf("does not support v2 API")} + r := NewLimitedRetryRepository(repo, 1, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = sleepFn + if m, err := r.Manifests(ctx); m != nil || err != repo.repoErr || r.retries != 1 { + t.Fatalf("unexpected: %v %v %#v", m, err, r) + } + + // do not retry on Manifests() + repo = &mockRepository{repoErr: temporaryError{}} + r = NewLimitedRetryRepository(repo, 4, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = sleepFn + if m, err := r.Manifests(ctx); m != nil || err != repo.repoErr || r.retries != 4 { + t.Fatalf("unexpected: %v %v %#v", m, err, r) + } + + // do not retry on non standard errors + repo = &mockRepository{getErr: fmt.Errorf("does not support v2 API")} + r = NewLimitedRetryRepository(repo, 4, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = sleepFn + m, err := r.Manifests(ctx) + if err != nil { + t.Fatal(err) + } + if _, err := m.Get(ctx, digest.Digest("foo")); err != repo.getErr || r.retries != 4 { + t.Fatalf("unexpected: %v %v %#v", m, err, r) + } + + // retry four times + repo = &mockRepository{ + getErr: temporaryError{}, + blobs: &mockBlobStore{ + serveErr: temporaryError{}, + statErr: temporaryError{}, + openErr: temporaryError{}, + }, + } + r = NewLimitedRetryRepository(repo, 4, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = sleepFn + if m, err = r.Manifests(ctx); err != nil { + t.Fatal(err) + } + r.retries = 2 + if _, err := m.Get(ctx, digest.Digest("foo")); err != repo.getErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 2 + if m, err := m.Exists(ctx, "foo"); m || err != repo.getErr { + t.Fatalf("unexpected: %v %v %#v", m, err, r) + } + if sleeps != 4 { + t.Fatal(sleeps) + } + + r.retries = 2 + b := r.Blobs(ctx) + if err != nil { + t.Fatal(err) + } + if _, err := b.Stat(ctx, digest.Digest("x")); err != repo.blobs.statErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 2 + if err := b.ServeBlob(ctx, nil, nil, digest.Digest("foo")); err != repo.blobs.serveErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 2 + if _, err := b.Open(ctx, digest.Digest("foo")); err != repo.blobs.openErr { + t.Fatalf("unexpected: %v %#v", err, r) + } +} + +func Test_verifyManifest_Get(t *testing.T) { + tests := []struct { + name string + dgst digest.Digest + err error + manifest distribution.Manifest + options []distribution.ManifestServiceOption + want distribution.Manifest + wantErr bool + }{ + { + dgst: payload1Digest, + manifest: &fakeManifest{payload: []byte(payload1)}, + want: &fakeManifest{payload: []byte(payload1)}, + }, + { + dgst: payload2Digest, + manifest: &fakeManifest{payload: []byte(payload2)}, + want: &fakeManifest{payload: []byte(payload2)}, + }, + { + dgst: payload1Digest, + manifest: &fakeManifest{payload: []byte(payload2)}, + wantErr: true, + }, + { + dgst: payload1Digest, + manifest: &fakeManifest{payload: []byte(payload1), err: fmt.Errorf("unknown")}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ms := &fakeManifestService{err: tt.err, manifest: tt.manifest} + m := manifestServiceVerifier{ + ManifestService: ms, + } + ctx := context.Background() + got, err := m.Get(ctx, tt.dgst, tt.options...) + if (err != nil) != tt.wantErr { + t.Errorf("verifyManifest.Get() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("verifyManifest.Get() = %v, want %v", got, tt.want) + } + }) + } +} + +const ( + payload1 = `{"some":"content"}` + payload2 = `{"some":"content"} ` +) + +var ( + payload1Digest = digest.SHA256.FromString(payload1) + payload2Digest = digest.SHA256.FromString(payload2) +) + +type fakeManifest struct { + mediaType string + payload []byte + err error +} + +func (m *fakeManifest) References() []distribution.Descriptor { + panic("not implemented") +} + +func (m *fakeManifest) Payload() (mediaType string, payload []byte, err error) { + return m.mediaType, m.payload, m.err +} + +type fakeManifestService struct { + manifest distribution.Manifest + err error +} + +func (s *fakeManifestService) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + panic("not implemented") +} + +func (s *fakeManifestService) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + return s.manifest, s.err +} + +func (s *fakeManifestService) Put(ctx context.Context, manifest distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + panic("not implemented") +} + +func (s *fakeManifestService) Delete(ctx context.Context, dgst digest.Digest) error { + panic("not implemented") +} + +func Test_blobStoreVerifier_Get(t *testing.T) { + tests := []struct { + name string + bytes []byte + err error + dgst digest.Digest + want []byte + wantErr bool + }{ + { + dgst: payload1Digest, + bytes: []byte(payload1), + want: []byte(payload1), + }, + { + dgst: payload2Digest, + bytes: []byte(payload2), + want: []byte(payload2), + }, + { + dgst: payload1Digest, + bytes: []byte(payload2), + wantErr: true, + }, + { + dgst: payload1Digest, + bytes: []byte(payload1), + err: fmt.Errorf("unknown"), + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + bs := &fakeBlobStore{err: tt.err, bytes: tt.bytes} + b := blobStoreVerifier{ + BlobStore: bs, + } + ctx := context.Background() + got, err := b.Get(ctx, tt.dgst) + if (err != nil) != tt.wantErr { + t.Errorf("blobStoreVerifier.Get() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("blobStoreVerifier.Get() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_blobStoreVerifier_Open(t *testing.T) { + tests := []struct { + name string + bytes []byte + err error + dgst digest.Digest + want func(t *testing.T, got distribution.ReadSeekCloser) + wantErr bool + }{ + { + dgst: payload1Digest, + bytes: []byte(payload1), + want: func(t *testing.T, got distribution.ReadSeekCloser) { + data, err := ioutil.ReadAll(got) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal([]byte(payload1), data) { + t.Fatalf("contents not equal: %s", hex.Dump(data)) + } + }, + }, + { + dgst: payload2Digest, + bytes: []byte(payload2), + want: func(t *testing.T, got distribution.ReadSeekCloser) { + data, err := ioutil.ReadAll(got) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal([]byte(payload2), data) { + t.Fatalf("contents not equal: %s", hex.Dump(data)) + } + }, + }, + { + dgst: payload1Digest, + bytes: []byte(payload2), + want: func(t *testing.T, got distribution.ReadSeekCloser) { + data, err := ioutil.ReadAll(got) + if err == nil || !strings.Contains(err.Error(), "content integrity error") || !strings.Contains(err.Error(), payload2Digest.String()) { + t.Fatal(err) + } + if !bytes.Equal([]byte(payload2), data) { + t.Fatalf("contents not equal: %s", hex.Dump(data)) + } + }, + }, + { + dgst: payload1Digest, + bytes: []byte(payload2), + want: func(t *testing.T, got distribution.ReadSeekCloser) { + _, err := got.Seek(0, 0) + if err == nil || err.Error() != "invoked seek" { + t.Fatal(err) + } + data, err := ioutil.ReadAll(got) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal([]byte(payload2), data) { + t.Fatalf("contents not equal: %s", hex.Dump(data)) + } + }, + }, + { + dgst: payload1Digest, + bytes: []byte(payload1), + err: fmt.Errorf("unknown"), + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + bs := &fakeBlobStore{err: tt.err, bytes: tt.bytes} + b := blobStoreVerifier{ + BlobStore: bs, + } + ctx := context.Background() + got, err := b.Open(ctx, tt.dgst) + if (err != nil) != tt.wantErr { + t.Errorf("blobStoreVerifier.Get() error = %v, wantErr %v", err, tt.wantErr) + return + } + if err != nil { + return + } + tt.want(t, got) + }) + } +} + +type fakeSeekCloser struct { + *bytes.Buffer +} + +func (f fakeSeekCloser) Seek(offset int64, whence int) (int64, error) { + return 0, fmt.Errorf("invoked seek") +} + +func (f fakeSeekCloser) Close() error { + return fmt.Errorf("not implemented") +} + +type fakeBlobStore struct { + bytes []byte + err error +} + +func (s *fakeBlobStore) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + panic("not implemented") +} + +func (s *fakeBlobStore) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + return s.bytes, s.err +} + +func (s *fakeBlobStore) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + return fakeSeekCloser{bytes.NewBuffer(s.bytes)}, s.err +} + +func (s *fakeBlobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + panic("not implemented") +} + +func (s *fakeBlobStore) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (s *fakeBlobStore) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (s *fakeBlobStore) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + panic("not implemented") +} + +func (s *fakeBlobStore) Delete(ctx context.Context, dgst digest.Digest) error { + panic("not implemented") +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials.go new file mode 100644 index 000000000..c9d22c760 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials.go @@ -0,0 +1,90 @@ +package registryclient + +import ( + "net/url" + "sync" + + "github.com/docker/distribution/registry/client/auth" +) + +var ( + NoCredentials auth.CredentialStore = &noopCredentialStore{} +) + +type RefreshTokenStore interface { + RefreshToken(url *url.URL, service string) string + SetRefreshToken(url *url.URL, service string, token string) +} + +func NewRefreshTokenStore() RefreshTokenStore { + return &refreshTokenStore{} +} + +type refreshTokenKey struct { + url string + service string +} + +type refreshTokenStore struct { + lock sync.Mutex + store map[refreshTokenKey]string +} + +func (s *refreshTokenStore) RefreshToken(url *url.URL, service string) string { + s.lock.Lock() + defer s.lock.Unlock() + return s.store[refreshTokenKey{url: url.String(), service: service}] +} + +func (s *refreshTokenStore) SetRefreshToken(url *url.URL, service string, token string) { + s.lock.Lock() + defer s.lock.Unlock() + if s.store == nil { + s.store = make(map[refreshTokenKey]string) + } + s.store[refreshTokenKey{url: url.String(), service: service}] = token +} + +type noopCredentialStore struct{} + +func (s *noopCredentialStore) Basic(url *url.URL) (string, string) { + return "", "" +} + +func (s *noopCredentialStore) RefreshToken(url *url.URL, service string) string { + return "" +} + +func (s *noopCredentialStore) SetRefreshToken(url *url.URL, service string, token string) { +} + +func NewBasicCredentials() *BasicCredentials { + return &BasicCredentials{refreshTokenStore: &refreshTokenStore{}} +} + +type basicForURL struct { + url url.URL + username, password string +} + +type BasicCredentials struct { + creds []basicForURL + *refreshTokenStore +} + +func (c *BasicCredentials) Add(url *url.URL, username, password string) { + c.creds = append(c.creds, basicForURL{*url, username, password}) +} + +func (c *BasicCredentials) Basic(url *url.URL) (string, string) { + for _, cred := range c.creds { + if len(cred.url.Host) != 0 && cred.url.Host != url.Host { + continue + } + if len(cred.url.Path) != 0 && cred.url.Path != url.Path { + continue + } + return cred.username, cred.password + } + return "", "" +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials_test.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials_test.go new file mode 100644 index 000000000..b33b0d7fc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/credentials_test.go @@ -0,0 +1,20 @@ +package registryclient + +import ( + "net/url" + "testing" +) + +func TestBasicCredentials(t *testing.T) { + creds := NewBasicCredentials() + creds.Add(&url.URL{Host: "localhost"}, "test", "other") + if u, p := creds.Basic(&url.URL{Host: "test"}); u != "" || p != "" { + t.Fatalf("unexpected response: %s %s", u, p) + } + if u, p := creds.Basic(&url.URL{Host: "localhost"}); u != "test" || p != "other" { + t.Fatalf("unexpected response: %s %s", u, p) + } + if u, p := creds.Basic(&url.URL{Host: "localhost", Path: "/foo"}); u != "test" || p != "other" { + t.Fatalf("unexpected response: %s %s", u, p) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/trigger/annotations.go b/vendor/github.com/openshift/library-go/pkg/image/trigger/annotations.go new file mode 100644 index 000000000..4ca0617a1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/trigger/annotations.go @@ -0,0 +1,215 @@ +package trigger + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/library-go/pkg/image/referencemutator" +) + +func CalculateAnnotationTriggers(m metav1.Object, prefix string) (string, string, []ObjectFieldTrigger, error) { + var key, namespace string + if namespace = m.GetNamespace(); len(namespace) > 0 { + key = prefix + namespace + "/" + m.GetName() + } else { + key = prefix + m.GetName() + } + t, ok := m.GetAnnotations()[TriggerAnnotationKey] + if !ok { + return key, namespace, nil, nil + } + triggers := []ObjectFieldTrigger{} + if err := json.Unmarshal([]byte(t), &triggers); err != nil { + return key, namespace, nil, err + } + if hasDuplicateTriggers(triggers) { + return key, namespace, nil, fmt.Errorf("duplicate triggers are not allowed") + } + return key, namespace, triggers, nil +} + +func hasDuplicateTriggers(triggers []ObjectFieldTrigger) bool { + for i := range triggers { + for j := i + 1; j < len(triggers); j++ { + if triggers[i].FieldPath == triggers[j].FieldPath { + return true + } + } + } + return false +} + +func parseContainerReference(path string) (init bool, selector string, remainder string, ok bool) { + switch { + case strings.HasPrefix(path, "containers["): + remainder = strings.TrimPrefix(path, "containers[") + case strings.HasPrefix(path, "initContainers["): + init = true + remainder = strings.TrimPrefix(path, "initContainers[") + default: + return false, "", "", false + } + end := strings.Index(remainder, "]") + if end == -1 { + return false, "", "", false + } + selector = remainder[:end] + remainder = remainder[end+1:] + if len(remainder) > 0 && remainder[0] == '.' { + remainder = remainder[1:] + } + return init, selector, remainder, true +} + +func findContainerBySelector(spec referencemutator.PodSpecReferenceMutator, init bool, selector string) (referencemutator.ContainerMutator, bool) { + if i, err := strconv.Atoi(selector); err == nil { + return spec.GetContainerByIndex(init, i) + } + // TODO: potentially make this more flexible, like whitespace + if name := strings.TrimSuffix(strings.TrimPrefix(selector, "?(@.name==\""), "\")"); name != selector { + return spec.GetContainerByName(name) + } + return nil, false +} + +// ContainerForObjectFieldPath returns a reference to the container in the object with pod spec +// underneath fieldPath. Returns error if no such container exists or the field path is invalid. +// Returns the remaining field path beyond the container, if any. +func ContainerForObjectFieldPath(obj runtime.Object, fieldPath string) (referencemutator.ContainerMutator, string, error) { + spec, err := referencemutator.GetPodSpecReferenceMutator(obj) + if err != nil { + return nil, fieldPath, err + } + specPath := spec.Path().String() + containerPath := strings.TrimPrefix(fieldPath, specPath) + if containerPath == fieldPath { + return nil, fieldPath, fmt.Errorf("1 field path is not valid: %s", fieldPath) + } + containerPath = strings.TrimPrefix(containerPath, ".") + init, selector, remainder, ok := parseContainerReference(containerPath) + if !ok { + return nil, fieldPath, fmt.Errorf("2 field path is not valid: %s", fieldPath) + } + container, ok := findContainerBySelector(spec, init, selector) + if !ok { + return nil, fieldPath, fmt.Errorf("no such container: %s", selector) + } + return container, remainder, nil +} + +// UpdateObjectFromImages attempts to set the appropriate object information. If changes are necessary, it lazily copies +// obj and returns it, or if no changes are necessary returns nil. +func UpdateObjectFromImages(obj runtime.Object, tagRetriever TagRetriever) (runtime.Object, error) { + var updated runtime.Object + m, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + spec, err := referencemutator.GetPodSpecReferenceMutator(obj) + if err != nil { + return nil, err + } + path := spec.Path() + basePath := path.String() + "." + _, _, triggers, err := CalculateAnnotationTriggers(m, "/") + if err != nil { + return nil, err + } + klog.V(5).Infof("%T/%s has triggers: %#v", obj, m.GetName(), triggers) + for _, trigger := range triggers { + if trigger.Paused { + continue + } + fieldPath := trigger.FieldPath + if !strings.HasPrefix(trigger.FieldPath, basePath) { + klog.V(5).Infof("%T/%s trigger %s did not match base path %s", obj, m.GetName(), trigger.FieldPath, basePath) + continue + } + fieldPath = strings.TrimPrefix(fieldPath, basePath) + + namespace := trigger.From.Namespace + if len(namespace) == 0 { + namespace = m.GetNamespace() + } + ref, _, ok := tagRetriever.ImageStreamTag(namespace, trigger.From.Name) + if !ok { + klog.V(5).Infof("%T/%s detected no pending image on %s from %#v", obj, m.GetName(), trigger.FieldPath, trigger.From) + continue + } + + init, selector, remainder, ok := parseContainerReference(fieldPath) + if !ok || remainder != "image" { + return nil, fmt.Errorf("field path is not valid: %s", trigger.FieldPath) + } + + container, ok := findContainerBySelector(spec, init, selector) + if !ok { + return nil, fmt.Errorf("no such container: %s", trigger.FieldPath) + } + + if container.GetImage() != ref { + if updated == nil { + updated = obj.DeepCopyObject() + spec, _ = referencemutator.GetPodSpecReferenceMutator(updated) + container, _ = findContainerBySelector(spec, init, selector) + } + klog.V(5).Infof("%T/%s detected change on %s = %s", obj, m.GetName(), trigger.FieldPath, ref) + container.SetImage(ref) + } + } + return updated, nil +} + +// ContainerImageChanged returns true if any container image referenced by newTriggers changed. +func ContainerImageChanged(oldObj, newObj runtime.Object, newTriggers []ObjectFieldTrigger) bool { + for _, trigger := range newTriggers { + if trigger.Paused { + continue + } + + newContainer, _, err := ContainerForObjectFieldPath(newObj, trigger.FieldPath) + if err != nil { + klog.V(5).Infof("%v", err) + continue + } + + oldContainer, _, err := ContainerForObjectFieldPath(oldObj, trigger.FieldPath) + if err != nil { + // might just be a result of the update + continue + } + + if newContainer.GetImage() != oldContainer.GetImage() { + return true + } + } + + return false +} + +type AnnotationUpdater interface { + Update(obj runtime.Object) error +} + +type AnnotationReactor struct { + Updater AnnotationUpdater +} + +func (r *AnnotationReactor) ImageChanged(obj runtime.Object, tagRetriever TagRetriever) error { + changed, err := UpdateObjectFromImages(obj, tagRetriever) + if err != nil { + return err + } + if changed != nil { + return r.Updater.Update(changed) + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/trigger/annotations_test.go b/vendor/github.com/openshift/library-go/pkg/image/trigger/annotations_test.go new file mode 100644 index 000000000..afb3fff5e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/trigger/annotations_test.go @@ -0,0 +1,312 @@ +package trigger + +import ( + "encoding/json" + "reflect" + "sort" + "strings" + "testing" + + kapps "k8s.io/api/apps/v1beta1" + kapiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/client-go/util/jsonpath" +) + +type fakeTagResponse struct { + Namespace string + Name string + Ref string + RV int64 +} + +type fakeTagRetriever []fakeTagResponse + +func (r fakeTagRetriever) ImageStreamTag(namespace, name string) (string, int64, bool) { + for _, resp := range r { + if resp.Namespace != namespace || resp.Name != name { + continue + } + return resp.Ref, resp.RV, true + } + return "", 0, false +} + +type fakeUpdater struct { + Object runtime.Object + Err error +} + +func (u *fakeUpdater) Update(obj runtime.Object) error { + u.Object = obj + return u.Err +} + +func testStatefulSet(params []ObjectFieldTrigger, containers map[string]string) *kapps.StatefulSet { + obj := &kapps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: kapps.StatefulSetSpec{ + Template: kapiv1.PodTemplateSpec{}, + }, + } + data, _ := json.Marshal(params) + obj.Annotations = map[string]string{TriggerAnnotationKey: string(data)} + var names, initNames []string + for k := range containers { + if strings.HasPrefix(k, "-") { + initNames = append(initNames, k[1:]) + } else { + names = append(names, k) + } + } + sort.Sort(sort.StringSlice(initNames)) + sort.Sort(sort.StringSlice(names)) + for _, name := range initNames { + obj.Spec.Template.Spec.InitContainers = append(obj.Spec.Template.Spec.InitContainers, kapiv1.Container{Name: name, Image: containers["-"+name]}) + } + for _, name := range names { + obj.Spec.Template.Spec.Containers = append(obj.Spec.Template.Spec.Containers, kapiv1.Container{Name: name, Image: containers[name]}) + } + return obj +} + +func TestAnnotationJSONPath(t *testing.T) { + _, err := jsonpath.Parse("field_path", "spec.template.spec.containers[?(@.name==\"test\")].image") + if err != nil { + t.Error(err) + } +} + +func TestAnnotationsReactor(t *testing.T) { + testCases := []struct { + tags []fakeTagResponse + obj *kapps.StatefulSet + response *kapps.StatefulSet + expected *kapps.StatefulSet + expectedErr bool + }{ + { + obj: &kapps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + }, + }, + + { + // no container, expect error + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + }, nil), + expectedErr: true, + }, + + { + // container, but path spec is wrong, expect error + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")]", + }, + }, map[string]string{"test": ""}), + expectedErr: true, + }, + { + // container, but path spec is wrong, expect error + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\").image", + }, + }, map[string]string{"test": ""}), + expectedErr: true, + }, + { + // container, but path spec is wrong, expect error + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[@.name=test].image", + }, + }, map[string]string{"test": ""}), + expectedErr: true, + }, + + { + // no ref, no change + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + }, map[string]string{"test": ""}), + }, + + { + // resolved without a change in another namespace + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + }, map[string]string{"test": ""}), + response: &kapps.StatefulSet{}, + expected: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + }, map[string]string{"test": "image-lookup-1"}), + }, + + { + // resolved for init containers + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.initContainers[?(@.name==\"test\")].image", + }, + }, map[string]string{"-test": ""}), + response: &kapps.StatefulSet{}, + expected: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.initContainers[?(@.name==\"test\")].image", + }, + }, map[string]string{"-test": "image-lookup-1"}), + }, + + { + // will not resolve if not automatic + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + Paused: true, + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + }, map[string]string{"test": ""}), + response: &kapps.StatefulSet{}, + }, + + { + // will fire if only one trigger resolves + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + { + From: ObjectReference{Name: "stream-2:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test2\")].image", + }, + }, map[string]string{"test": "", "test2": ""}), + response: &kapps.StatefulSet{}, + expected: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + { + From: ObjectReference{Name: "stream-2:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test2\")].image", + }, + }, map[string]string{"test": "image-lookup-1", "test2": ""}), + }, + + { + // will fire if a trigger has already been resolved before + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + { + From: ObjectReference{Name: "stream-2:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test2\")].image", + }, + }, map[string]string{"test": "", "test2": "old-image"}), + response: &kapps.StatefulSet{}, + expected: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + { + From: ObjectReference{Name: "stream-2:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test2\")].image", + }, + }, map[string]string{"test": "image-lookup-1", "test2": "old-image"}), + }, + + { + // will fire if both triggers are resolved + tags: []fakeTagResponse{{Namespace: "other", Name: "stream-1:1", Ref: "image-lookup-1", RV: 2}}, + obj: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test2\")].image", + }, + }, map[string]string{"test": "", "test2": ""}), + response: &kapps.StatefulSet{}, + expected: testStatefulSet([]ObjectFieldTrigger{ + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test\")].image", + }, + { + From: ObjectReference{Name: "stream-1:1", Namespace: "other", Kind: "ImageStreamTag"}, + FieldPath: "spec.template.spec.containers[?(@.name==\"test2\")].image", + }, + }, map[string]string{"test": "image-lookup-1", "test2": "image-lookup-1"}), + }, + } + + for i, test := range testCases { + u := &fakeUpdater{} + r := AnnotationReactor{Updater: u} + initial := test.obj.DeepCopy() + err := r.ImageChanged(test.obj, fakeTagRetriever(test.tags)) + if !equality.Semantic.DeepEqual(initial, test.obj) { + t.Errorf("%d: should not have mutated: %s", i, diff.ObjectReflectDiff(initial, test.obj)) + } + switch { + case err == nil && test.expectedErr, err != nil && !test.expectedErr: + t.Errorf("%d: unexpected error: %v", i, err) + continue + case err != nil: + continue + } + if test.expected != nil { + if u.Object == nil { + t.Errorf("%d: no response defined", i) + continue + } + if !reflect.DeepEqual(test.expected, u.Object) { + t.Errorf("%d: not equal: %s", i, diff.ObjectReflectDiff(test.expected, u.Object)) + continue + } + } else { + if u.Object != nil { + t.Errorf("%d: unexpected update: %v", i, u.Object) + continue + } + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/trigger/interfaces.go b/vendor/github.com/openshift/library-go/pkg/image/trigger/interfaces.go new file mode 100644 index 000000000..49fa88a02 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/trigger/interfaces.go @@ -0,0 +1,7 @@ +package trigger + +// TagRetriever returns information about a tag, including whether it exists +// and the observed resource version of the object at the time the tag was loaded. +type TagRetriever interface { + ImageStreamTag(namespace, name string) (ref string, rv int64, ok bool) +} diff --git a/vendor/github.com/openshift/library-go/pkg/image/trigger/types.go b/vendor/github.com/openshift/library-go/pkg/image/trigger/types.go new file mode 100644 index 000000000..478ca456a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/image/trigger/types.go @@ -0,0 +1,29 @@ +package trigger + +// TriggerAnnotationKey is the annotation used on resources to signal they wish to have +// container image references changed when an image stream tag is updated. Today, only +// containers can be specified by fieldPath. +const TriggerAnnotationKey = "image.openshift.io/triggers" + +// ObjectFieldTrigger links a field on the current object to another object for mutation. +type ObjectFieldTrigger struct { + // from is the object this should trigger from. The kind and name fields must be set. + From ObjectReference `json:"from"` + // fieldPath is a JSONPath string to the field to edit on the object. Required. + FieldPath string `json:"fieldPath"` + // paused is true if this trigger is temporarily disabled. Optional. + Paused bool `json:"paused,omitempty"` +} + +// ObjectReference identifies an object by its name and kind. +type ObjectReference struct { + // kind is the referenced object's schema. + Kind string `json:"kind"` + // name is the name of the object. + Name string `json:"name"` + // namespace is the namespace the object is located in. Optional if the object is not + // namespaced, or if left empty on a namespaced object, means the current namespace. + Namespace string `json:"namespace,omitempty"` + // apiVersion is the group and version the type exists in. Optional. + APIVersion string `json:"apiVersion,omitempty"` +} diff --git a/vendor/github.com/openshift/library-go/pkg/legacyapi/legacygroupification/groupification.go b/vendor/github.com/openshift/library-go/pkg/legacyapi/legacygroupification/groupification.go new file mode 100644 index 000000000..4a3535653 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/legacyapi/legacygroupification/groupification.go @@ -0,0 +1,228 @@ +package legacygroupification + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + appsv1 "github.com/openshift/api/apps/v1" + authorizationv1 "github.com/openshift/api/authorization/v1" + buildv1 "github.com/openshift/api/build/v1" + imagev1 "github.com/openshift/api/image/v1" + networkv1 "github.com/openshift/api/network/v1" + oauthv1 "github.com/openshift/api/oauth/v1" + projectv1 "github.com/openshift/api/project/v1" + quotav1 "github.com/openshift/api/quota/v1" + routev1 "github.com/openshift/api/route/v1" + securityv1 "github.com/openshift/api/security/v1" + templatev1 "github.com/openshift/api/template/v1" + userv1 "github.com/openshift/api/user/v1" +) + +// deprecated +func IsOAPI(gvk schema.GroupVersionKind) bool { + if len(gvk.Group) > 0 { + return false + } + + _, ok := oapiKindsToGroup[gvk.Kind] + return ok +} + +// deprecated +func OAPIToGroupifiedGVK(gvk *schema.GroupVersionKind) { + if len(gvk.Group) > 0 { + return + } + + newGroup, ok := oapiKindsToGroup[gvk.Kind] + if !ok { + return + } + gvk.Group = newGroup +} + +// deprecated +func OAPIToGroupified(uncast runtime.Object, gvk *schema.GroupVersionKind) { + if len(gvk.Group) > 0 { + return + } + + switch obj := uncast.(type) { + case *unstructured.Unstructured: + newGroup := fixOAPIGroupKindInTopLevelUnstructured(obj.Object) + if len(newGroup) > 0 { + gvk.Group = newGroup + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + } + case *unstructured.UnstructuredList: + newGroup := fixOAPIGroupKindInTopLevelUnstructured(obj.Object) + if len(newGroup) > 0 { + gvk.Group = newGroup + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + } + + case *appsv1.DeploymentConfig, *appsv1.DeploymentConfigList, + *appsv1.DeploymentConfigRollback, + *appsv1.DeploymentLog, + *appsv1.DeploymentRequest: + gvk.Group = appsv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *authorizationv1.ClusterRoleBinding, *authorizationv1.ClusterRoleBindingList, + *authorizationv1.ClusterRole, *authorizationv1.ClusterRoleList, + *authorizationv1.Role, *authorizationv1.RoleList, + *authorizationv1.RoleBinding, *authorizationv1.RoleBindingList, + *authorizationv1.RoleBindingRestriction, *authorizationv1.RoleBindingRestrictionList, + *authorizationv1.SubjectRulesReview, *authorizationv1.SelfSubjectRulesReview, + *authorizationv1.ResourceAccessReview, *authorizationv1.LocalResourceAccessReview, + *authorizationv1.SubjectAccessReview, *authorizationv1.LocalSubjectAccessReview: + gvk.Group = authorizationv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *buildv1.BuildConfig, *buildv1.BuildConfigList, + *buildv1.Build, *buildv1.BuildList, + *buildv1.BuildLog, + *buildv1.BuildRequest, + *buildv1.BinaryBuildRequestOptions: + gvk.Group = buildv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *imagev1.Image, *imagev1.ImageList, + *imagev1.ImageSignature, + *imagev1.ImageStreamImage, + *imagev1.ImageStreamImport, + *imagev1.ImageStreamMapping, + *imagev1.ImageStream, *imagev1.ImageStreamList, + *imagev1.ImageStreamTag: + gvk.Group = imagev1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *networkv1.ClusterNetwork, *networkv1.ClusterNetworkList, + *networkv1.NetNamespace, *networkv1.NetNamespaceList, + *networkv1.HostSubnet, *networkv1.HostSubnetList, + *networkv1.EgressNetworkPolicy, *networkv1.EgressNetworkPolicyList: + gvk.Group = networkv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *projectv1.Project, *projectv1.ProjectList, + *projectv1.ProjectRequest: + gvk.Group = projectv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *quotav1.ClusterResourceQuota, *quotav1.ClusterResourceQuotaList, + *quotav1.AppliedClusterResourceQuota, *quotav1.AppliedClusterResourceQuotaList: + gvk.Group = quotav1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *oauthv1.OAuthAuthorizeToken, *oauthv1.OAuthAuthorizeTokenList, + *oauthv1.OAuthClientAuthorization, *oauthv1.OAuthClientAuthorizationList, + *oauthv1.OAuthClient, *oauthv1.OAuthClientList, + *oauthv1.OAuthAccessToken, *oauthv1.OAuthAccessTokenList: + gvk.Group = oauthv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *routev1.Route, *routev1.RouteList: + gvk.Group = routev1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *securityv1.SecurityContextConstraints, *securityv1.SecurityContextConstraintsList, + *securityv1.PodSecurityPolicySubjectReview, + *securityv1.PodSecurityPolicySelfSubjectReview, + *securityv1.PodSecurityPolicyReview: + gvk.Group = securityv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *templatev1.Template, *templatev1.TemplateList: + gvk.Group = templatev1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + case *userv1.Group, *userv1.GroupList, + *userv1.Identity, *userv1.IdentityList, + *userv1.UserIdentityMapping, + *userv1.User, *userv1.UserList: + gvk.Group = userv1.GroupName + uncast.GetObjectKind().SetGroupVersionKind(*gvk) + + } +} + +var oapiKindsToGroup = map[string]string{ + "DeploymentConfigRollback": "apps.openshift.io", + "DeploymentConfig": "apps.openshift.io", "DeploymentConfigList": "apps.openshift.io", + "DeploymentLog": "apps.openshift.io", + "DeploymentRequest": "apps.openshift.io", + "ClusterRoleBinding": "authorization.openshift.io", "ClusterRoleBindingList": "authorization.openshift.io", + "ClusterRole": "authorization.openshift.io", "ClusterRoleList": "authorization.openshift.io", + "RoleBindingRestriction": "authorization.openshift.io", "RoleBindingRestrictionList": "authorization.openshift.io", + "RoleBinding": "authorization.openshift.io", "RoleBindingList": "authorization.openshift.io", + "Role": "authorization.openshift.io", "RoleList": "authorization.openshift.io", + "SubjectRulesReview": "authorization.openshift.io", "SelfSubjectRulesReview": "authorization.openshift.io", + "ResourceAccessReview": "authorization.openshift.io", "LocalResourceAccessReview": "authorization.openshift.io", + "SubjectAccessReview": "authorization.openshift.io", "LocalSubjectAccessReview": "authorization.openshift.io", + "BuildConfig": "build.openshift.io", "BuildConfigList": "build.openshift.io", + "Build": "build.openshift.io", "BuildList": "build.openshift.io", + "BinaryBuildRequestOptions": "build.openshift.io", + "BuildLog": "build.openshift.io", + "BuildRequest": "build.openshift.io", + "Image": "image.openshift.io", "ImageList": "image.openshift.io", + "ImageSignature": "image.openshift.io", + "ImageStreamImage": "image.openshift.io", + "ImageStreamImport": "image.openshift.io", + "ImageStreamMapping": "image.openshift.io", + "ImageStream": "image.openshift.io", "ImageStreamList": "image.openshift.io", + "ImageStreamTag": "image.openshift.io", "ImageStreamTagList": "image.openshift.io", + "ClusterNetwork": "network.openshift.io", "ClusterNetworkList": "network.openshift.io", + "EgressNetworkPolicy": "network.openshift.io", "EgressNetworkPolicyList": "network.openshift.io", + "HostSubnet": "network.openshift.io", "HostSubnetList": "network.openshift.io", + "NetNamespace": "network.openshift.io", "NetNamespaceList": "network.openshift.io", + "OAuthAccessToken": "oauth.openshift.io", "OAuthAccessTokenList": "oauth.openshift.io", + "OAuthAuthorizeToken": "oauth.openshift.io", "OAuthAuthorizeTokenList": "oauth.openshift.io", + "OAuthClientAuthorization": "oauth.openshift.io", "OAuthClientAuthorizationList": "oauth.openshift.io", + "OAuthClient": "oauth.openshift.io", "OAuthClientList": "oauth.openshift.io", + "Project": "project.openshift.io", "ProjectList": "project.openshift.io", + "ProjectRequest": "project.openshift.io", + "ClusterResourceQuota": "quota.openshift.io", "ClusterResourceQuotaList": "quota.openshift.io", + "AppliedClusterResourceQuota": "quota.openshift.io", "AppliedClusterResourceQuotaList": "quota.openshift.io", + "Route": "route.openshift.io", "RouteList": "route.openshift.io", + "SecurityContextConstraints": "security.openshift.io", "SecurityContextConstraintsList": "security.openshift.io", + "PodSecurityPolicySubjectReview": "security.openshift.io", + "PodSecurityPolicySelfSubjectReview": "security.openshift.io", + "PodSecurityPolicyReview": "security.openshift.io", + "Template": "template.openshift.io", "TemplateList": "template.openshift.io", + "Group": "user.openshift.io", "GroupList": "user.openshift.io", + "Identity": "user.openshift.io", "IdentityList": "user.openshift.io", + "UserIdentityMapping": "user.openshift.io", + "User": "user.openshift.io", "UserList": "user.openshift.io", +} + +func fixOAPIGroupKindInTopLevelUnstructured(obj map[string]interface{}) string { + kind, ok := obj["kind"] + if !ok { + return "" + } + kindStr, ok := kind.(string) + if !ok { + return "" + } + newGroup, ok := oapiKindsToGroup[kindStr] + if !ok { + return "" + } + + apiVersion, ok := obj["apiVersion"] + if !ok { + return newGroup + } + apiVersionStr, ok := apiVersion.(string) + if !ok { + return newGroup + } + + if apiVersionStr != "v1" { + return newGroup + } + obj["apiVersion"] = newGroup + "/v1" + + return newGroup +} diff --git a/vendor/github.com/openshift/library-go/pkg/network/networkutils/networkutils.go b/vendor/github.com/openshift/library-go/pkg/network/networkutils/networkutils.go new file mode 100644 index 000000000..b266db54e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/network/networkutils/networkutils.go @@ -0,0 +1,65 @@ +package networkutils + +import ( + "fmt" + "net" +) + +const ( + SingleTenantPluginName = "redhat/openshift-ovs-subnet" + MultiTenantPluginName = "redhat/openshift-ovs-multitenant" + NetworkPolicyPluginName = "redhat/openshift-ovs-networkpolicy" +) + +var localHosts []string = []string{"127.0.0.1", "::1", "localhost"} +var localSubnets []string = []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "fc00::/7", "fe80::/10"} + +// IsPrivateAddress returns true if given address in format "[:]" is a localhost or an ip from +// private network range (e.g. 172.30.0.1, 192.168.0.1). +func IsPrivateAddress(addr string) bool { + host, _, err := net.SplitHostPort(addr) + if err != nil { + // assume indexName is of the form `host` without the port and go on. + host = addr + } + for _, localHost := range localHosts { + if host == localHost { + return true + } + } + + ip := net.ParseIP(host) + if ip == nil { + return false + } + + for _, subnet := range localSubnets { + ipnet, err := ParseCIDRMask(subnet) + if err != nil { + continue // should not happen + } + if ipnet.Contains(ip) { + return true + } + } + return false +} + +// ParseCIDRMask parses a CIDR string and ensures that it has no bits set beyond the +// network mask length. Use this when the input is supposed to be either a description of +// a subnet (eg, "192.168.1.0/24", meaning "192.168.1.0 to 192.168.1.255"), or a mask for +// matching against (eg, "192.168.1.15/32", meaning "must match all 32 bits of the address +// "192.168.1.15"). Use net.ParseCIDR() when the input is a host address that also +// describes the subnet that it is on (eg, "192.168.1.15/24", meaning "the address +// 192.168.1.15 on the network 192.168.1.0/24"). +func ParseCIDRMask(cidr string) (*net.IPNet, error) { + ip, net, err := net.ParseCIDR(cidr) + if err != nil { + return nil, err + } + if !ip.Equal(net.IP) { + maskLen, addrLen := net.Mask.Size() + return nil, fmt.Errorf("CIDR network specification %q is not in canonical form (should be %s/%d or %s/%d?)", cidr, ip.Mask(net.Mask).String(), maskLen, ip.String(), addrLen) + } + return net, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/network/networkutils/networkutils_test.go b/vendor/github.com/openshift/library-go/pkg/network/networkutils/networkutils_test.go new file mode 100644 index 000000000..4124d2087 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/network/networkutils/networkutils_test.go @@ -0,0 +1,100 @@ +package networkutils + +import ( + "strings" + "testing" +) + +func TestParseCIDRMask(t *testing.T) { + tests := []struct { + cidr string + fixedShort string + fixedLong string + }{ + { + cidr: "192.168.0.0/16", + }, + { + cidr: "192.168.1.0/24", + }, + { + cidr: "192.168.1.1/32", + }, + { + cidr: "192.168.1.0/16", + fixedShort: "192.168.0.0/16", + fixedLong: "192.168.1.0/32", + }, + { + cidr: "192.168.1.1/24", + fixedShort: "192.168.1.0/24", + fixedLong: "192.168.1.1/32", + }, + } + + for _, test := range tests { + _, err := ParseCIDRMask(test.cidr) + if test.fixedShort == "" && test.fixedLong == "" { + if err != nil { + t.Fatalf("unexpected error parsing CIDR mask %q: %v", test.cidr, err) + } + } else { + if err == nil { + t.Fatalf("unexpected lack of error parsing CIDR mask %q", test.cidr) + } + if !strings.Contains(err.Error(), test.fixedShort) { + t.Fatalf("error does not contain expected string %q: %v", test.fixedShort, err) + } + if !strings.Contains(err.Error(), test.fixedLong) { + t.Fatalf("error does not contain expected string %q: %v", test.fixedLong, err) + } + } + } +} + +func TestIsPrivateAddress(t *testing.T) { + for _, tc := range []struct { + address string + isLocal bool + }{ + {"localhost", true}, + {"example.com", false}, + {"registry.localhost", false}, + + {"9.255.255.255", false}, + {"10.0.0.1", true}, + {"10.1.255.255", true}, + {"10.255.255.255", true}, + {"11.0.0.1", false}, + + {"127.0.0.1", true}, + + {"172.15.255.253", false}, + {"172.16.0.1", true}, + {"172.30.0.1", true}, + {"172.31.255.255", true}, + {"172.32.0.1", false}, + + {"192.167.122.1", false}, + {"192.168.0.1", true}, + {"192.168.122.1", true}, + {"192.168.255.255", true}, + {"192.169.1.1", false}, + + {"::1", true}, + + {"fe00::1", false}, + {"fd12:3456:789a:1::1", true}, + {"fe82:3456:789a:1::1", true}, + {"ff00::1", false}, + } { + res := IsPrivateAddress(tc.address) + if tc.isLocal && !res { + t.Errorf("address %q considered not local", tc.address) + continue + } + if !tc.isLocal && res { + t.Errorf("address %q considered local", tc.address) + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/discovery.go b/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/discovery.go new file mode 100644 index 000000000..713a40420 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/discovery.go @@ -0,0 +1,32 @@ +package oauthdiscovery + +// OauthAuthorizationServerMetadata holds OAuth 2.0 Authorization Server Metadata used for discovery +// https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 +type OauthAuthorizationServerMetadata struct { + // The authorization server's issuer identifier, which is a URL that uses the https scheme and has no query or fragment components. + // This is the location where .well-known RFC 5785 [RFC5785] resources containing information about the authorization server are published. + Issuer string `json:"issuer"` + + // URL of the authorization server's authorization endpoint [RFC6749]. + AuthorizationEndpoint string `json:"authorization_endpoint"` + + // URL of the authorization server's token endpoint [RFC6749]. + TokenEndpoint string `json:"token_endpoint"` + + // JSON array containing a list of the OAuth 2.0 [RFC6749] scope values that this authorization server supports. + // Servers MAY choose not to advertise some supported scope values even when this parameter is used. + ScopesSupported []string `json:"scopes_supported"` + + // JSON array containing a list of the OAuth 2.0 response_type values that this authorization server supports. + // The array values used are the same as those used with the response_types parameter defined by "OAuth 2.0 Dynamic Client Registration Protocol" [RFC7591]. + ResponseTypesSupported []string `json:"response_types_supported"` + + // JSON array containing a list of the OAuth 2.0 grant type values that this authorization server supports. + // The array values used are the same as those used with the grant_types parameter defined by "OAuth 2.0 Dynamic Client Registration Protocol" [RFC7591]. + GrantTypesSupported []string `json:"grant_types_supported"` + + // JSON array containing a list of PKCE [RFC7636] code challenge methods supported by this authorization server. + // Code challenge method values are used in the "code_challenge_method" parameter defined in Section 4.3 of [RFC7636]. + // The valid code challenge method values are those registered in the IANA "PKCE Code Challenge Methods" registry [IANA.OAuth.Parameters]. + CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported"` +} diff --git a/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/urls.go b/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/urls.go new file mode 100644 index 000000000..2539d4a39 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/urls.go @@ -0,0 +1,37 @@ +package oauthdiscovery + +import ( + "path" + "strings" +) + +const ( + AuthorizePath = "/authorize" + TokenPath = "/token" + InfoPath = "/info" + + RequestTokenEndpoint = "/token/request" + DisplayTokenEndpoint = "/token/display" + ImplicitTokenEndpoint = "/token/implicit" +) + +const OpenShiftOAuthAPIPrefix = "/oauth" + +func OpenShiftOAuthAuthorizeURL(masterAddr string) string { + return openShiftOAuthURL(masterAddr, AuthorizePath) +} +func OpenShiftOAuthTokenURL(masterAddr string) string { + return openShiftOAuthURL(masterAddr, TokenPath) +} +func OpenShiftOAuthTokenRequestURL(masterAddr string) string { + return openShiftOAuthURL(masterAddr, RequestTokenEndpoint) +} +func OpenShiftOAuthTokenDisplayURL(masterAddr string) string { + return openShiftOAuthURL(masterAddr, DisplayTokenEndpoint) +} +func OpenShiftOAuthTokenImplicitURL(masterAddr string) string { + return openShiftOAuthURL(masterAddr, ImplicitTokenEndpoint) +} +func openShiftOAuthURL(masterAddr, oauthEndpoint string) string { + return strings.TrimRight(masterAddr, "/") + path.Join(OpenShiftOAuthAPIPrefix, oauthEndpoint) +} diff --git a/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry.go b/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry.go new file mode 100644 index 000000000..0f5c5cc0c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry.go @@ -0,0 +1,501 @@ +package oauthserviceaccountclient + +import ( + "fmt" + "net" + "net/url" + "strconv" + "strings" + + "github.com/openshift/library-go/pkg/authorization/scopemetadata" + + clientv1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" + kcoreclient "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" + + oauthv1 "github.com/openshift/api/oauth/v1" + routev1 "github.com/openshift/api/route/v1" + routev1client "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1" +) + +const ( + OAuthWantChallengesAnnotationPrefix = "serviceaccounts.openshift.io/oauth-want-challenges" + + // Prefix used for statically specifying redirect URIs for a service account via annotations + // The value can be partially supplied with the dynamic prefix to override the resource's defaults + OAuthRedirectModelAnnotationURIPrefix = "serviceaccounts.openshift.io/oauth-redirecturi." + + // Prefix used for dynamically specifying redirect URIs using resources for a service account via annotations + OAuthRedirectModelAnnotationReferencePrefix = "serviceaccounts.openshift.io/oauth-redirectreference." + + routeKind = "Route" + // TODO add ingress support + // IngressKind = "Ingress" +) + +var ( + modelPrefixes = []string{ + OAuthRedirectModelAnnotationURIPrefix, + OAuthRedirectModelAnnotationReferencePrefix, + } + + emptyGroupKind = schema.GroupKind{} // Used with static redirect URIs + routeGroupKind = schema.GroupKind{Group: "route.openshift.io", Kind: routeKind} + legacyRouteGroupKind = schema.GroupKind{Group: "", Kind: routeKind} // to support redirect reference with old group + + scheme = runtime.NewScheme() + codecFactory = serializer.NewCodecFactory(scheme) +) + +func init() { + oauthv1.Install(scheme) + oauthv1.DeprecatedInstallWithoutGroup(scheme) +} + +// namesToObjMapperFunc is linked to a given GroupKind. +// Based on the namespace and names provided, it builds a map of resource name to redirect URIs. +// The redirect URIs represent the default values as specified by the resource. +// These values can be overridden by user specified data. Errors returned are informative and non-fatal. +type namesToObjMapperFunc func(namespace string, names sets.String) (map[string]redirectURIList, []error) + +// TODO add ingress support +// var ingressGroupKind = routeapi.SchemeGroupVersion.WithKind(IngressKind).GroupKind() + +// OAuthClientGetter exposes a way to get a specific client. This is useful for other registries to get scope limitations +// on particular clients. This interface will make its easier to write a future cache on it +type OAuthClientGetter interface { + Get(name string, options metav1.GetOptions) (*oauthv1.OAuthClient, error) +} + +type saOAuthClientAdapter struct { + saClient kcoreclient.ServiceAccountsGetter + secretClient kcoreclient.SecretsGetter + eventRecorder record.EventRecorder + routeClient routev1client.RoutesGetter + // TODO add ingress support + //ingressClient ?? + + delegate OAuthClientGetter + grantMethod oauthv1.GrantHandlerType + + decoder runtime.Decoder +} + +// model holds fields that could be used to build redirect URI(s). +// The resource components define where to get the default redirect data from. +// If specified, the uri components are used to override the default data. +// As long as the resulting URI(s) have a scheme and a host, they are considered valid. +type model struct { + scheme string + port string + path string + host string + + group string + kind string + name string +} + +// getGroupKind is used to determine if a group and kind combination is supported. +func (m *model) getGroupKind() schema.GroupKind { + return schema.GroupKind{Group: m.group, Kind: m.kind} +} + +// updateFromURI updates the data in the model with the user provided URL data. +func (m *model) updateFromURI(u *url.URL) { + m.scheme, m.host, m.path = u.Scheme, u.Host, u.Path + if h, p, err := net.SplitHostPort(m.host); err == nil { + m.host = h + m.port = p + } +} + +// updateFromReference updates the data in the model with the user provided object reference data. +func (m *model) updateFromReference(r *oauthv1.RedirectReference) { + m.group, m.kind, m.name = r.Group, r.Kind, r.Name +} + +type modelList []model + +// getNames determines the unique, non-empty resource names specified by the models. +func (ml modelList) getNames() sets.String { + data := sets.NewString() + for _, model := range ml { + if len(model.name) > 0 { + data.Insert(model.name) + } + } + return data +} + +// getRedirectURIs uses the mapping provided by a namesToObjMapperFunc to enumerate all of the redirect URIs +// based on the name of each resource. The user provided data in the model overrides the data in the mapping. +// The returned redirect URIs may contain duplicate and invalid entries. All items in the modelList must have a +// uniform group/kind, and the objMapper must be specifically for that group/kind. +func (ml modelList) getRedirectURIs(objMapper map[string]redirectURIList) redirectURIList { + var data redirectURIList + for _, m := range ml { + if uris, ok := objMapper[m.name]; ok { + for _, uri := range uris { + u := uri // Make sure we do not mutate objMapper + u.merge(&m) + data = append(data, u) + } + } + } + return data +} + +type redirectURI struct { + scheme string + host string + port string + path string +} + +func (uri *redirectURI) String() string { + host := uri.host + if len(uri.port) > 0 { + host = net.JoinHostPort(host, uri.port) + } + return (&url.URL{Scheme: uri.scheme, Host: host, Path: uri.path}).String() +} + +// isValid returns true when both scheme and host are non-empty. +func (uri *redirectURI) isValid() bool { + return len(uri.scheme) > 0 && len(uri.host) > 0 +} + +type redirectURIList []redirectURI + +// extractValidRedirectURIStrings returns the redirect URIs that are valid per `isValid` as strings. +func (rl redirectURIList) extractValidRedirectURIStrings() []string { + var data []string + for _, u := range rl { + if u.isValid() { + data = append(data, u.String()) + } + } + return data +} + +// merge overrides the default data in the uri with the user provided data in the model. +func (uri *redirectURI) merge(m *model) { + if len(m.scheme) > 0 { + uri.scheme = m.scheme + } + if len(m.path) > 0 { + uri.path = m.path + } + if len(m.port) > 0 { + uri.port = m.port + } + if len(m.host) > 0 { + uri.host = m.host + } +} + +var _ OAuthClientGetter = &saOAuthClientAdapter{} + +func NewServiceAccountOAuthClientGetter( + saClient kcoreclient.ServiceAccountsGetter, + secretClient kcoreclient.SecretsGetter, + eventClient kcoreclient.EventInterface, + routeClient routev1client.RoutesGetter, + delegate OAuthClientGetter, + grantMethod oauthv1.GrantHandlerType, +) OAuthClientGetter { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartRecordingToSink(&kcoreclient.EventSinkImpl{Interface: eventClient}) + recorder := eventBroadcaster.NewRecorder(scheme, clientv1.EventSource{Component: "service-account-oauth-client-getter"}) + return &saOAuthClientAdapter{ + saClient: saClient, + secretClient: secretClient, + eventRecorder: recorder, + routeClient: routeClient, + delegate: delegate, + grantMethod: grantMethod, + decoder: codecFactory.UniversalDecoder(), + } +} + +func (a *saOAuthClientAdapter) Get(name string, options metav1.GetOptions) (*oauthv1.OAuthClient, error) { + var err error + saNamespace, saName, err := apiserverserviceaccount.SplitUsername(name) + if err != nil { + return a.delegate.Get(name, options) + } + + sa, err := a.saClient.ServiceAccounts(saNamespace).Get(saName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + var saErrors []error + var failReason string + // Create a warning event combining the collected annotation errors upon failure. + defer func() { + if err != nil && len(saErrors) > 0 && len(failReason) > 0 { + a.eventRecorder.Event(sa, corev1.EventTypeWarning, failReason, utilerrors.NewAggregate(saErrors).Error()) + } + }() + + redirectURIs := []string{} + modelsMap, errs := parseModelsMap(sa.Annotations, a.decoder) + if len(errs) > 0 { + saErrors = append(saErrors, errs...) + } + + if len(modelsMap) > 0 { + uris, extractErrors := a.extractRedirectURIs(modelsMap, saNamespace) + if len(uris) > 0 { + redirectURIs = append(redirectURIs, uris.extractValidRedirectURIStrings()...) + } + if len(extractErrors) > 0 { + saErrors = append(saErrors, extractErrors...) + } + } + if len(redirectURIs) == 0 { + err = fmt.Errorf("%v has no redirectURIs; set %v= or create a dynamic URI using %v=", + name, OAuthRedirectModelAnnotationURIPrefix, OAuthRedirectModelAnnotationReferencePrefix, + ) + failReason = "NoSAOAuthRedirectURIs" + saErrors = append(saErrors, err) + return nil, err + } + + tokens, err := a.getServiceAccountTokens(sa) + if err != nil { + return nil, err + } + if len(tokens) == 0 { + err = fmt.Errorf("%v has no tokens", name) + failReason = "NoSAOAuthTokens" + saErrors = append(saErrors, err) + return nil, err + } + + saWantsChallenges, _ := strconv.ParseBool(sa.Annotations[OAuthWantChallengesAnnotationPrefix]) + + saClient := &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + ScopeRestrictions: getScopeRestrictionsFor(saNamespace, saName), + AdditionalSecrets: tokens, + RespondWithChallenges: saWantsChallenges, + + // TODO update this to allow https redirection to any + // 1. service IP (useless in general) + // 2. service DNS (useless in general) + // 3. loopback? (useful, but maybe a bit weird) + RedirectURIs: sets.NewString(redirectURIs...).List(), + GrantMethod: a.grantMethod, + } + return saClient, nil +} + +// parseModelsMap builds a map of model name to model using a service account's annotations. +// The model name is only used for building the map (it ties together the uri and reference annotations) +// and serves no functional purpose other than making testing easier. Errors returned are informative and non-fatal. +func parseModelsMap(annotations map[string]string, decoder runtime.Decoder) (map[string]model, []error) { + models := map[string]model{} + parseErrors := []error{} + for key, value := range annotations { + prefix, name, ok := parseModelPrefixName(key) + if !ok { + continue + } + m := models[name] + switch prefix { + case OAuthRedirectModelAnnotationURIPrefix: + if u, err := url.Parse(value); err == nil { + m.updateFromURI(u) + } else { + parseErrors = append(parseErrors, err) + } + case OAuthRedirectModelAnnotationReferencePrefix: + r := &oauthv1.OAuthRedirectReference{} + if err := runtime.DecodeInto(decoder, []byte(value), r); err == nil { + m.updateFromReference(&r.Reference) + } else { + parseErrors = append(parseErrors, err) + } + } + models[name] = m + } + return models, parseErrors +} + +// parseModelPrefixName determines if the given key is a model prefix. +// Returns what prefix was used, the name of the model, and true if a model prefix was actually used. +func parseModelPrefixName(key string) (string, string, bool) { + for _, prefix := range modelPrefixes { + if strings.HasPrefix(key, prefix) { + return prefix, key[len(prefix):], true + } + } + return "", "", false +} + +// extractRedirectURIs builds redirect URIs using the given models and namespace. +// The returned redirect URIs may contain duplicates and invalid entries. Errors returned are informative and non-fatal. +func (a *saOAuthClientAdapter) extractRedirectURIs(modelsMap map[string]model, namespace string) (redirectURIList, []error) { + var data redirectURIList + routeErrors := []error{} + groupKindModelListMapper := map[schema.GroupKind]modelList{} // map of GroupKind to all models belonging to it + groupKindModelToURI := map[schema.GroupKind]namesToObjMapperFunc{ + routeGroupKind: a.redirectURIsFromRoutes, + // TODO add support for ingresses by creating the appropriate GroupKind and namesToObjMapperFunc + // ingressGroupKind: a.redirectURIsFromIngresses, + } + + for _, m := range modelsMap { + gk := m.getGroupKind() + if gk == legacyRouteGroupKind { + gk = routeGroupKind // support legacy route group without doing extra API calls + } + if len(m.name) == 0 && gk == emptyGroupKind { // Is this a static redirect URI? + uri := redirectURI{} // No defaults wanted + uri.merge(&m) + data = append(data, uri) + } else if _, ok := groupKindModelToURI[gk]; ok { // a GroupKind is valid if we have a namesToObjMapperFunc to handle it + groupKindModelListMapper[gk] = append(groupKindModelListMapper[gk], m) + } + } + + for gk, models := range groupKindModelListMapper { + if names := models.getNames(); names.Len() > 0 { + objMapper, errs := groupKindModelToURI[gk](namespace, names) + if len(objMapper) > 0 { + data = append(data, models.getRedirectURIs(objMapper)...) + } + if len(errs) > 0 { + routeErrors = append(routeErrors, errs...) + } + } + } + + return data, routeErrors +} + +// redirectURIsFromRoutes is the namesToObjMapperFunc specific to Routes. +// Returns a map of route name to redirect URIs that contain the default data as specified by the route's ingresses. +// Errors returned are informative and non-fatal. +func (a *saOAuthClientAdapter) redirectURIsFromRoutes(namespace string, osRouteNames sets.String) (map[string]redirectURIList, []error) { + var routes []routev1.Route + routeErrors := []error{} + routeInterface := a.routeClient.Routes(namespace) + if osRouteNames.Len() > 1 { + if r, err := routeInterface.List(metav1.ListOptions{}); err == nil { + routes = r.Items + } else { + routeErrors = append(routeErrors, err) + } + } else { + if r, err := routeInterface.Get(osRouteNames.List()[0], metav1.GetOptions{}); err == nil { + routes = append(routes, *r) + } else { + routeErrors = append(routeErrors, err) + } + } + routeMap := map[string]redirectURIList{} + for _, route := range routes { + if osRouteNames.Has(route.Name) { + routeMap[route.Name] = redirectURIsFromRoute(&route) + } + } + return routeMap, routeErrors +} + +// redirectURIsFromRoute returns a list of redirect URIs that contain the default data as specified by the given route's ingresses. +func redirectURIsFromRoute(route *routev1.Route) redirectURIList { + var uris redirectURIList + uri := redirectURI{scheme: "https"} // Default to TLS + uri.path = route.Spec.Path + if route.Spec.TLS == nil { + uri.scheme = "http" + } + for _, ingress := range route.Status.Ingress { + if !isRouteIngressValid(&ingress) { + continue + } + u := uri // Copy to avoid mutating the base uri + u.host = ingress.Host + uris = append(uris, u) + } + // If we get this far we know the Route does actually exist, so we need to have at least one uri + // to allow the user to override it in their annotations in case there is no valid ingress + // `extractValidRedirectURIStrings` guarantees that we eventually have the minimum set of required fields + if len(uris) == 0 { + uris = append(uris, uri) + } + return uris +} + +// isRouteIngressValid determines if the RouteIngress has a host and that its conditions has an element with Type=RouteAdmitted and Status=ConditionTrue +func isRouteIngressValid(routeIngress *routev1.RouteIngress) bool { + if len(routeIngress.Host) == 0 { + return false + } + for _, condition := range routeIngress.Conditions { + if condition.Type == routev1.RouteAdmitted && condition.Status == corev1.ConditionTrue { + return true + } + } + return false +} + +func getScopeRestrictionsFor(namespace, name string) []oauthv1.ScopeRestriction { + return []oauthv1.ScopeRestriction{ + {ExactValues: []string{ + scopemetadata.UserInfo, + scopemetadata.UserAccessCheck, + scopemetadata.UserListScopedProjects, + scopemetadata.UserListAllProjects, + }}, + {ClusterRole: &oauthv1.ClusterRoleScopeRestriction{RoleNames: []string{"*"}, Namespaces: []string{namespace}, AllowEscalation: true}}, + } +} + +// getServiceAccountTokens returns all ServiceAccountToken secrets for the given ServiceAccount +func (a *saOAuthClientAdapter) getServiceAccountTokens(sa *corev1.ServiceAccount) ([]string, error) { + allSecrets, err := a.secretClient.Secrets(sa.Namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + tokens := []string{} + for i := range allSecrets.Items { + secret := &allSecrets.Items[i] + if IsServiceAccountToken(secret, sa) { + tokens = append(tokens, string(secret.Data[corev1.ServiceAccountTokenKey])) + } + } + return tokens, nil +} + +// IsServiceAccountToken returns true if the secret is a valid api token for the service account +func IsServiceAccountToken(secret *corev1.Secret, sa *corev1.ServiceAccount) bool { + if secret.Type != corev1.SecretTypeServiceAccountToken { + return false + } + + name := secret.Annotations[corev1.ServiceAccountNameKey] + uid := secret.Annotations[corev1.ServiceAccountUIDKey] + if name != sa.Name { + // Name must match + return false + } + if len(uid) > 0 && uid != string(sa.UID) { + // If UID is specified, it must match + return false + } + + return true +} diff --git a/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry_test.go b/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry_test.go new file mode 100644 index 000000000..4566271de --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/oauth/oauthserviceaccountclient/oauthclientregistry_test.go @@ -0,0 +1,1261 @@ +package oauthserviceaccountclient + +import ( + "reflect" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/kubernetes/fake" + clientgotesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/record" + + oauthv1 "github.com/openshift/api/oauth/v1" + routev1 "github.com/openshift/api/route/v1" + routev1fake "github.com/openshift/client-go/route/clientset/versioned/fake" +) + +var ( + encoder = codecFactory.LegacyCodec(oauthv1.SchemeGroupVersion) + decoder = codecFactory.UniversalDecoder() + serviceAccountsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "serviceaccounts"} + secretsResource = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"} + secretKind = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"} + routesResource = schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"} + routeClientKind = schema.GroupVersionKind{Group: "route.openshift.io", Version: "v1", Kind: "Route"} +) + +func TestGetClient(t *testing.T) { + testCases := []struct { + name string + clientName string + kubeClient *fake.Clientset + routeClient *routev1fake.Clientset + + expectedDelegation bool + expectedErr string + expectedEventMsg string + expectedClient *oauthv1.OAuthClient + expectedKubeActions []clientgotesting.Action + expectedOSActions []clientgotesting.Action + }{ + { + name: "delegate", + clientName: "not:serviceaccount", + kubeClient: fake.NewSimpleClientset(), + routeClient: routev1fake.NewSimpleClientset(), + expectedDelegation: true, + expectedKubeActions: []clientgotesting.Action{}, + expectedOSActions: []clientgotesting.Action{}, + }, + { + name: "missing sa", + clientName: "system:serviceaccount:ns-01:missing-sa", + kubeClient: fake.NewSimpleClientset(), + routeClient: routev1fake.NewSimpleClientset(), + expectedErr: `serviceaccounts "missing-sa" not found`, + expectedKubeActions: []clientgotesting.Action{clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "missing-sa")}, + expectedOSActions: []clientgotesting.Action{}, + }, + { + name: "sa no redirects", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{}, + }, + }), + routeClient: routev1fake.NewSimpleClientset(), + expectedErr: `system:serviceaccount:ns-01:default has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.`, + expectedEventMsg: `Warning NoSAOAuthRedirectURIs system:serviceaccount:ns-01:default has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.= or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.=`, + + //expectedEventMsg: `Warning NoSAOAuthRedirectURIs [parse ::: missing protocol scheme, system:serviceaccount:ns-01:default has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.= or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.=]`, + expectedKubeActions: []clientgotesting.Action{clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default")}, + expectedOSActions: []clientgotesting.Action{}, + }, + { + name: "sa invalid redirect scheme", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{OAuthRedirectModelAnnotationURIPrefix + "incomplete": "::"}, + }, + }), + routeClient: routev1fake.NewSimpleClientset(), + expectedErr: `system:serviceaccount:ns-01:default has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.`, + expectedEventMsg: `Warning NoSAOAuthRedirectURIs [parse ::: missing protocol scheme, system:serviceaccount:ns-01:default has no redirectURIs; set serviceaccounts.openshift.io/oauth-redirecturi.= or create a dynamic URI using serviceaccounts.openshift.io/oauth-redirectreference.=]`, + expectedKubeActions: []clientgotesting.Action{clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default")}, + expectedOSActions: []clientgotesting.Action{}, + }, + { + name: "sa no tokens", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{OAuthRedirectModelAnnotationURIPrefix + "one": "http://anywhere"}, + }, + }), + routeClient: routev1fake.NewSimpleClientset(), + expectedErr: `system:serviceaccount:ns-01:default has no tokens`, + expectedEventMsg: `Warning NoSAOAuthTokens system:serviceaccount:ns-01:default has no tokens`, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{}, + }, + { + name: "good SA", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + UID: types.UID("any"), + Annotations: map[string]string{OAuthRedirectModelAnnotationURIPrefix + "one": "http://anywhere"}, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{ + corev1.ServiceAccountNameKey: "default", + corev1.ServiceAccountUIDKey: "any", + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + Data: map[string][]byte{corev1.ServiceAccountTokenKey: []byte("foo")}, + }), + routeClient: routev1fake.NewSimpleClientset(), + expectedClient: &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "system:serviceaccount:ns-01:default"}, + ScopeRestrictions: getScopeRestrictionsFor("ns-01", "default"), + AdditionalSecrets: []string{"foo"}, + RedirectURIs: []string{"http://anywhere"}, + GrantMethod: oauthv1.GrantHandlerPrompt, + }, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{}, + }, + { + name: "good SA with valid, simple route redirects", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + UID: types.UID("any"), + Annotations: map[string]string{ + OAuthRedirectModelAnnotationURIPrefix + "one": "http://anywhere", + OAuthRedirectModelAnnotationReferencePrefix + "1": buildRedirectObjectReferenceString(routeKind, "route1", "route.openshift.io"), + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{ + corev1.ServiceAccountNameKey: "default", + corev1.ServiceAccountUIDKey: "any", + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + Data: map[string][]byte{corev1.ServiceAccountTokenKey: []byte("foo")}, + }), + routeClient: routev1fake.NewSimpleClientset( + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route1", + UID: types.UID("route1"), + }, + Spec: routev1.RouteSpec{ + Path: "/defaultpath", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "example1.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + ), + expectedClient: &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "system:serviceaccount:ns-01:default"}, + ScopeRestrictions: getScopeRestrictionsFor("ns-01", "default"), + AdditionalSecrets: []string{"foo"}, + RedirectURIs: []string{"http://anywhere", "https://example1.com/defaultpath"}, + GrantMethod: oauthv1.GrantHandlerPrompt, + }, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(routesResource, "ns-01", "route1"), + }, + }, + { + name: "good SA with invalid route redirects", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + UID: types.UID("any"), + Annotations: map[string]string{ + OAuthRedirectModelAnnotationURIPrefix + "one": "http://anywhere", + OAuthRedirectModelAnnotationReferencePrefix + "1": buildRedirectObjectReferenceString(routeKind, "route1", "wronggroup"), + OAuthRedirectModelAnnotationReferencePrefix + "2": buildRedirectObjectReferenceString("wrongkind", "route1", "route.openshift.io"), + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{ + corev1.ServiceAccountNameKey: "default", + corev1.ServiceAccountUIDKey: "any", + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + Data: map[string][]byte{corev1.ServiceAccountTokenKey: []byte("foo")}, + }), + routeClient: routev1fake.NewSimpleClientset( + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route1", + UID: types.UID("route1"), + }, + Spec: routev1.RouteSpec{ + Path: "/defaultpath", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "example1.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "example2.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "example3.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + ), + expectedClient: &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "system:serviceaccount:ns-01:default"}, + ScopeRestrictions: getScopeRestrictionsFor("ns-01", "default"), + AdditionalSecrets: []string{"foo"}, + RedirectURIs: []string{"http://anywhere"}, + GrantMethod: oauthv1.GrantHandlerPrompt, + }, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{}, + }, + { + name: "good SA with a route that doesn't have a host", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + UID: types.UID("any"), + Annotations: map[string]string{ + OAuthRedirectModelAnnotationURIPrefix + "one": "http://anywhere", + OAuthRedirectModelAnnotationReferencePrefix + "1": buildRedirectObjectReferenceString(routeKind, "route1", ""), + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{ + corev1.ServiceAccountNameKey: "default", + corev1.ServiceAccountUIDKey: "any", + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + Data: map[string][]byte{corev1.ServiceAccountTokenKey: []byte("foo")}, + }), + routeClient: routev1fake.NewSimpleClientset( + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route1", + UID: types.UID("route1"), + }, + Spec: routev1.RouteSpec{ + Path: "/defaultpath", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + ), + expectedClient: &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "system:serviceaccount:ns-01:default"}, + ScopeRestrictions: getScopeRestrictionsFor("ns-01", "default"), + AdditionalSecrets: []string{"foo"}, + RedirectURIs: []string{"http://anywhere"}, + GrantMethod: oauthv1.GrantHandlerPrompt, + }, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(routesResource, "ns-01", "route1"), + }, + }, + { + name: "good SA with routes that don't have hosts, some of which are empty or duplicates", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + UID: types.UID("any"), + Annotations: map[string]string{ + OAuthRedirectModelAnnotationURIPrefix + "one": "http://anywhere", + OAuthRedirectModelAnnotationReferencePrefix + "1": buildRedirectObjectReferenceString(routeKind, "route1", "route.openshift.io"), + OAuthRedirectModelAnnotationReferencePrefix + "2": buildRedirectObjectReferenceString(routeKind, "route2", ""), + OAuthRedirectModelAnnotationReferencePrefix + "3": buildRedirectObjectReferenceString(routeKind, "missingroute", ""), + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{ + corev1.ServiceAccountNameKey: "default", + corev1.ServiceAccountUIDKey: "any", + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + Data: map[string][]byte{corev1.ServiceAccountTokenKey: []byte("foo")}, + }), + routeClient: routev1fake.NewSimpleClientset( + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route1", + UID: types.UID("route1"), + }, + Spec: routev1.RouteSpec{ + Path: "/defaultpath", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "", Conditions: buildValidRouteIngressCondition()}, + {Host: "a.com", Conditions: buildValidRouteIngressCondition()}, + {Host: ""}, + {Host: "a.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "b.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route2", + UID: types.UID("route2"), + }, + Spec: routev1.RouteSpec{ + Path: "/path2", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "a.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "", Conditions: buildValidRouteIngressCondition()}, + {Host: "b.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "b.com"}, + {Host: ""}, + }, + }, + }, + ), + expectedClient: &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "system:serviceaccount:ns-01:default"}, + ScopeRestrictions: getScopeRestrictionsFor("ns-01", "default"), + AdditionalSecrets: []string{"foo"}, + RedirectURIs: []string{"http://anywhere", "https://a.com/defaultpath", "https://a.com/path2", "https://b.com/defaultpath", "https://b.com/path2"}, + GrantMethod: oauthv1.GrantHandlerPrompt, + }, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{ + clientgotesting.NewListAction(routesResource, routeClientKind, "ns-01", metav1.ListOptions{}), + }, + }, + { + name: "host overrides route data", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + UID: types.UID("any"), + Annotations: map[string]string{ + OAuthRedirectModelAnnotationReferencePrefix + "1": buildRedirectObjectReferenceString(routeKind, "route1", ""), + OAuthRedirectModelAnnotationURIPrefix + "1": "//redhat.com", + OAuthRedirectModelAnnotationReferencePrefix + "2": buildRedirectObjectReferenceString(routeKind, "route2", "route.openshift.io"), + OAuthRedirectModelAnnotationURIPrefix + "2": "//google.com", + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{ + corev1.ServiceAccountNameKey: "default", + corev1.ServiceAccountUIDKey: "any", + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + Data: map[string][]byte{corev1.ServiceAccountTokenKey: []byte("foo")}, + }), + routeClient: routev1fake.NewSimpleClientset( + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route1", + UID: types.UID("route1"), + }, + Spec: routev1.RouteSpec{ + Path: "/defaultpath", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: ""}, + }, + }, + }, + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route2", + UID: types.UID("route2"), + }, + Spec: routev1.RouteSpec{ + Path: "/otherpath", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "ignored.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "alsoignored.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + ), + expectedClient: &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "system:serviceaccount:ns-01:default"}, + ScopeRestrictions: getScopeRestrictionsFor("ns-01", "default"), + AdditionalSecrets: []string{"foo"}, + RedirectURIs: []string{"https://google.com/otherpath", "https://redhat.com/defaultpath"}, + GrantMethod: oauthv1.GrantHandlerPrompt, + }, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{ + clientgotesting.NewListAction(routesResource, routeClientKind, "ns-01", metav1.ListOptions{}), + }, + }, + { + name: "good SA with valid, route redirects using the same route twice", + clientName: "system:serviceaccount:ns-01:default", + kubeClient: fake.NewSimpleClientset( + &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + UID: types.UID("any"), + Annotations: map[string]string{ + OAuthRedirectModelAnnotationURIPrefix + "1": "/awesomepath", + OAuthRedirectModelAnnotationReferencePrefix + "1": buildRedirectObjectReferenceString(routeKind, "route1", ""), + OAuthRedirectModelAnnotationURIPrefix + "2": "//:8000", + OAuthRedirectModelAnnotationReferencePrefix + "2": buildRedirectObjectReferenceString(routeKind, "route1", "route.openshift.io"), + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "default", + Annotations: map[string]string{ + corev1.ServiceAccountNameKey: "default", + corev1.ServiceAccountUIDKey: "any", + }, + }, + Type: corev1.SecretTypeServiceAccountToken, + Data: map[string][]byte{corev1.ServiceAccountTokenKey: []byte("foo")}, + }), + routeClient: routev1fake.NewSimpleClientset( + &routev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-01", + Name: "route1", + UID: types.UID("route1"), + }, + Spec: routev1.RouteSpec{ + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "woot.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + ), + expectedClient: &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: "system:serviceaccount:ns-01:default"}, + ScopeRestrictions: getScopeRestrictionsFor("ns-01", "default"), + AdditionalSecrets: []string{"foo"}, + RedirectURIs: []string{"https://woot.com/awesomepath", "https://woot.com:8000"}, + GrantMethod: oauthv1.GrantHandlerPrompt, + }, + expectedKubeActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(serviceAccountsResource, "ns-01", "default"), + clientgotesting.NewListAction(secretsResource, secretKind, "ns-01", metav1.ListOptions{}), + }, + expectedOSActions: []clientgotesting.Action{ + clientgotesting.NewGetAction(routesResource, "ns-01", "route1"), + }, + }, + } + + for _, tc := range testCases { + delegate := &fakeDelegate{} + fakerecorder := record.NewFakeRecorder(100) + getter := saOAuthClientAdapter{ + saClient: tc.kubeClient.CoreV1(), + secretClient: tc.kubeClient.CoreV1(), + eventRecorder: fakerecorder, + routeClient: tc.routeClient.RouteV1(), + delegate: delegate, + grantMethod: oauthv1.GrantHandlerPrompt, + decoder: codecFactory.UniversalDecoder(), + } + client, err := getter.Get(tc.clientName, metav1.GetOptions{}) + switch { + case len(tc.expectedErr) == 0 && err == nil: + case len(tc.expectedErr) == 0 && err != nil, + len(tc.expectedErr) > 0 && err == nil, + len(tc.expectedErr) > 0 && err != nil && !strings.Contains(err.Error(), tc.expectedErr): + t.Errorf("%s: expected %#v, got %#v", tc.name, tc.expectedErr, err) + continue + } + + if tc.expectedDelegation != delegate.called { + t.Errorf("%s: expected %#v, got %#v", tc.name, tc.expectedDelegation, delegate.called) + continue + } + + if !equality.Semantic.DeepEqual(tc.expectedClient, client) { + t.Errorf("%s: expected %#v, got %#v", tc.name, tc.expectedClient, client) + continue + } + + if !reflect.DeepEqual(tc.expectedKubeActions, tc.kubeClient.Actions()) { + t.Errorf("%s: expected %#v, got %#v", tc.name, tc.expectedKubeActions, tc.kubeClient.Actions()) + continue + } + + if !reflect.DeepEqual(tc.expectedOSActions, tc.routeClient.Actions()) { + t.Errorf("%s: expected %#v, got %#v", tc.name, tc.expectedOSActions, tc.routeClient.Actions()) + continue + } + + if len(tc.expectedEventMsg) > 0 { + var ev string + select { + case ev = <-fakerecorder.Events: + default: + } + if tc.expectedEventMsg != ev { + t.Errorf("%s: expected event message %#v, got %#v", tc.name, tc.expectedEventMsg, ev) + } + } + } +} + +type fakeDelegate struct { + called bool +} + +func (d *fakeDelegate) Get(name string, options metav1.GetOptions) (*oauthv1.OAuthClient, error) { + d.called = true + return nil, nil +} + +func TestRedirectURIString(t *testing.T) { + for _, test := range []struct { + name string + uri redirectURI + expected string + }{ + { + name: "host with no port", + uri: redirectURI{ + scheme: "http", + host: "example1.com", + port: "", + path: "/test1", + }, + expected: "http://example1.com/test1", + }, + { + name: "host with port", + uri: redirectURI{ + scheme: "https", + host: "example2.com", + port: "8000", + path: "/test2", + }, + expected: "https://example2.com:8000/test2", + }, + } { + if test.expected != test.uri.String() { + t.Errorf("%s: expected %s, got %s", test.name, test.expected, test.uri.String()) + } + } +} + +func TestMerge(t *testing.T) { + for _, test := range []struct { + name string + uri redirectURI + m model + expected redirectURI + }{ + { + name: "empty model", + uri: redirectURI{ + scheme: "http", + host: "example1.com", + port: "9000", + path: "/test1", + }, + m: model{ + scheme: "", + port: "", + path: "", + }, + expected: redirectURI{ + scheme: "http", + host: "example1.com", + port: "9000", + path: "/test1", + }, + }, + { + name: "full model", + uri: redirectURI{ + scheme: "http", + host: "example1.com", + port: "9000", + path: "/test1", + }, + m: model{ + scheme: "https", + port: "8000", + path: "/ello", + }, + expected: redirectURI{ + scheme: "https", + host: "example1.com", + port: "8000", + path: "/ello", + }, + }, + { + name: "only path", + uri: redirectURI{ + scheme: "http", + host: "example1.com", + port: "9000", + path: "/test1", + }, + m: model{ + scheme: "", + port: "", + path: "/newpath", + }, + expected: redirectURI{ + scheme: "http", + host: "example1.com", + port: "9000", + path: "/newpath", + }, + }, + } { + test.uri.merge(&test.m) + if test.expected != test.uri { + t.Errorf("%s: expected %#v, got %#v", test.name, test.expected, test.uri) + } + } +} + +func TestParseModelsMap(t *testing.T) { + for _, test := range []struct { + name string + annotations map[string]string + expected map[string]model + }{ + { + name: "empty annotations", + annotations: map[string]string{}, + expected: map[string]model{}, + }, + { + name: "no model annotations", + annotations: map[string]string{"one": "anywhere"}, + expected: map[string]model{}, + }, + { + name: "static URI annotations", + annotations: map[string]string{ + OAuthRedirectModelAnnotationURIPrefix + "one": "//google.com", + OAuthRedirectModelAnnotationURIPrefix + "two": "justapath", + OAuthRedirectModelAnnotationURIPrefix + "three": "http://redhat.com", + OAuthRedirectModelAnnotationURIPrefix + "four": "http://hello:90/world", + OAuthRedirectModelAnnotationURIPrefix + "five": "scheme0://host0:port0/path0", + OAuthRedirectModelAnnotationReferencePrefix + "five": buildRedirectObjectReferenceString("kind0", "name0", "group0"), + }, + expected: map[string]model{ + "one": { + scheme: "", + port: "", + path: "", + group: "", + kind: "", + name: "", + host: "google.com", + }, + "two": { + scheme: "", + port: "", + path: "justapath", + group: "", + kind: "", + name: "", + }, + "three": { + scheme: "http", + port: "", + path: "", + group: "", + kind: "", + name: "", + host: "redhat.com", + }, + "four": { + scheme: "http", + port: "90", + path: "/world", + group: "", + kind: "", + name: "", + host: "hello", + }, + "five": { + scheme: "scheme0", + port: "port0", + path: "/path0", + group: "group0", + kind: "kind0", + name: "name0", + host: "host0", + }, + }, + }, + { + name: "simple model", + annotations: map[string]string{ + OAuthRedirectModelAnnotationReferencePrefix + "one": buildRedirectObjectReferenceString(routeKind, "route1", ""), + }, + expected: map[string]model{ + "one": { + scheme: "", + port: "", + path: "", + group: "", + kind: routeKind, + name: "route1", + }, + }, + }, + { + name: "multiple full models", + annotations: map[string]string{ + OAuthRedirectModelAnnotationReferencePrefix + "one": buildRedirectObjectReferenceString(routeKind, "route1", ""), + OAuthRedirectModelAnnotationURIPrefix + "one": "https://:8000/path1", + + OAuthRedirectModelAnnotationReferencePrefix + "two": buildRedirectObjectReferenceString(routeKind, "route2", "route.openshift.io"), + OAuthRedirectModelAnnotationURIPrefix + "two": "http://:9000/path2", + }, + expected: map[string]model{ + "one": { + scheme: "https", + port: "8000", + path: "/path1", + group: "", + kind: routeKind, + name: "route1", + }, + "two": { + scheme: "http", + port: "9000", + path: "/path2", + group: "route.openshift.io", + kind: routeKind, + name: "route2", + }, + }, + }, + } { + models, errs := parseModelsMap(test.annotations, decoder) + if len(errs) > 0 { + t.Errorf("%s: unexpected parseModelsMap errors %v", test.name, errs) + } + if !reflect.DeepEqual(test.expected, models) { + t.Errorf("%s: expected %#v, got %#v", test.name, test.expected, models) + } + } +} + +func TestGetRedirectURIs(t *testing.T) { + for _, test := range []struct { + name string + namespace string + models modelList + routes []*routev1.Route + expected redirectURIList + }{ + { + name: "single ingress routes", + namespace: "ns01", + models: modelList{ + { + scheme: "https", + port: "8000", + path: "/path1", + group: "", + kind: routeKind, + name: "route1", + }, + { + scheme: "http", + port: "9000", + path: "", + group: "", + kind: routeKind, + name: "route2", + }, + }, + routes: []*routev1.Route{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route1", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/pathA", + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "exampleA.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route2", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/pathB", + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "exampleB.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + }, + expected: redirectURIList{ + { + scheme: "https", + host: "exampleA.com", + port: "8000", + path: "/path1", + }, + { + scheme: "http", + host: "exampleB.com", + port: "9000", + path: "/pathB", + }, + }, + }, + { + name: "multiple ingress routes", + namespace: "ns01", + models: modelList{ + { + scheme: "https", + port: "8000", + path: "/path1", + group: "", + kind: routeKind, + name: "route1", + }, + { + scheme: "http", + port: "9000", + path: "", + group: "", + kind: routeKind, + name: "route2", + }, + { + scheme: "http", + port: "", + path: "/secondroute2path", + group: "", + kind: routeKind, + name: "route2", + }, + }, + routes: []*routev1.Route{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route1", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/pathA", + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "A.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "B.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "C.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route2", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/pathB", + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "0.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "1.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + }, + expected: redirectURIList{ + { + scheme: "https", + host: "A.com", + port: "8000", + path: "/path1", + }, + { + scheme: "https", + host: "B.com", + port: "8000", + path: "/path1", + }, + { + scheme: "https", + host: "C.com", + port: "8000", + path: "/path1", + }, + { + scheme: "http", + host: "0.com", + port: "9000", + path: "/pathB", + }, + { + scheme: "http", + host: "1.com", + port: "9000", + path: "/pathB", + }, + { + scheme: "http", + host: "0.com", + port: "", + path: "/secondroute2path", + }, + { + scheme: "http", + host: "1.com", + port: "", + path: "/secondroute2path", + }, + }, + }, + } { + a := buildRouteClient(test.routes) + uris, errs := a.redirectURIsFromRoutes(test.namespace, test.models.getNames()) + if len(errs) > 0 { + t.Errorf("%s: unexpected redirectURIsFromRoutes errors %v", test.name, errs) + } + actual := test.models.getRedirectURIs(uris) + if !reflect.DeepEqual(test.expected, actual) { + t.Errorf("%s: expected %#v, got %#v", test.name, test.expected, actual) + } + } +} + +func TestRedirectURIsFromRoutes(t *testing.T) { + for _, test := range []struct { + name string + namespace string + names sets.String + routes []*routev1.Route + expected map[string]redirectURIList + }{ + { + name: "single route with single ingress", + namespace: "ns01", + names: sets.NewString("routeA"), + routes: []*routev1.Route{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "routeA", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/pathA", + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "exampleA.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + }, + expected: map[string]redirectURIList{ + "routeA": { + { + scheme: "http", + host: "exampleA.com", + port: "", + path: "/pathA", + }, + }, + }, + }, + { + name: "multiple routes with multiple ingresses", + namespace: "ns01", + names: sets.NewString("route0", "route1", "route2"), + routes: []*routev1.Route{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route0", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/path0", + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "example0A.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "example0B.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "example0C.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route1", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/path1", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "redhat.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "coreos.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "github.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "route2", + Namespace: "ns01", + }, + Spec: routev1.RouteSpec{ + Path: "/path2", + TLS: &routev1.TLSConfig{}, + }, + Status: routev1.RouteStatus{ + Ingress: []routev1.RouteIngress{ + {Host: "google.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "yahoo.com", Conditions: buildValidRouteIngressCondition()}, + {Host: "bing.com", Conditions: buildValidRouteIngressCondition()}, + }, + }, + }, + }, + expected: map[string]redirectURIList{ + "route0": { + { + scheme: "http", + host: "example0A.com", + port: "", + path: "/path0", + }, + { + scheme: "http", + host: "example0B.com", + port: "", + path: "/path0", + }, + { + scheme: "http", + host: "example0C.com", + port: "", + path: "/path0", + }, + }, + "route1": { + { + scheme: "https", + host: "redhat.com", + port: "", + path: "/path1", + }, + { + scheme: "https", + host: "coreos.com", + port: "", + path: "/path1", + }, + { + scheme: "https", + host: "github.com", + port: "", + path: "/path1", + }, + }, + "route2": { + { + scheme: "https", + host: "google.com", + port: "", + path: "/path2", + }, + { + scheme: "https", + host: "yahoo.com", + port: "", + path: "/path2", + }, + { + scheme: "https", + host: "bing.com", + port: "", + path: "/path2", + }, + }, + }, + }, + } { + a := buildRouteClient(test.routes) + uris, errs := a.redirectURIsFromRoutes(test.namespace, test.names) + if len(errs) > 0 { + t.Errorf("%s: unexpected redirectURIsFromRoutes errors %v", test.name, errs) + } + if !reflect.DeepEqual(test.expected, uris) { + t.Errorf("%s: expected %#v, got %#v", test.name, test.expected, uris) + } + } +} + +func buildRouteClient(routes []*routev1.Route) saOAuthClientAdapter { + objects := []runtime.Object{} + for _, route := range routes { + objects = append(objects, route) + } + return saOAuthClientAdapter{ + routeClient: routev1fake.NewSimpleClientset(objects...).RouteV1(), + eventRecorder: record.NewFakeRecorder(100), + } +} + +func buildRedirectObjectReferenceString(kind, name, group string) string { + ref := &oauthv1.OAuthRedirectReference{ + Reference: oauthv1.RedirectReference{ + Kind: kind, + Name: name, + Group: group, + }, + } + data, err := runtime.Encode(encoder, ref) + if err != nil { + panic(err) + } + return string(data) +} + +func buildValidRouteIngressCondition() []routev1.RouteIngressCondition { + return []routev1.RouteIngressCondition{{Type: routev1.RouteAdmitted, Status: corev1.ConditionTrue}} +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go index 8f75dc7db..cf99398b4 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/certrotation/client_cert_rotation_controller.go @@ -2,16 +2,19 @@ package certrotation import ( "fmt" + "strings" "time" - "k8s.io/klog" - - operatorv1 "github.com/openshift/api/operator/v1" - "github.com/openshift/library-go/pkg/operator/v1helpers" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/v1helpers" ) const ( @@ -62,7 +65,7 @@ func NewCertRotationController( TargetRotation: targetRotation, OperatorClient: operatorClient, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), strings.Replace(name, "-", "_", -1)), } signingRotation.Informer.Informer().AddEventHandler(c.eventHandler()) @@ -79,16 +82,16 @@ func NewCertRotationController( func (c CertRotationController) sync() error { syncErr := c.syncWorker() - condition := operatorv1.OperatorCondition{ - Type: "CertRotation_" + c.name + "_Degraded", + newCondition := operatorv1.OperatorCondition{ + Type: fmt.Sprintf(condition.CertRotationDegradedConditionTypeFmt, c.name), Status: operatorv1.ConditionFalse, } if syncErr != nil { - condition.Status = operatorv1.ConditionTrue - condition.Reason = "RotationError" - condition.Message = syncErr.Error() + newCondition.Status = operatorv1.ConditionTrue + newCondition.Reason = "RotationError" + newCondition.Message = syncErr.Error() } - if _, _, updateErr := v1helpers.UpdateStaticPodStatus(c.OperatorClient, v1helpers.UpdateStaticPodConditionFn(condition)); updateErr != nil { + if _, _, updateErr := v1helpers.UpdateStaticPodStatus(c.OperatorClient, v1helpers.UpdateStaticPodConditionFn(newCondition)); updateErr != nil { return updateErr } @@ -113,16 +116,29 @@ func (c CertRotationController) syncWorker() error { return nil } +func (c *CertRotationController) WaitForReady(stopCh <-chan struct{}) { + klog.Infof("Waiting for CertRotationController - %q", c.name) + defer klog.Infof("Finished waiting for CertRotationController - %q", c.name) + + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + utilruntime.HandleError(fmt.Errorf("caches did not sync")) + return + } +} + +// RunOnce will run the cert rotation logic, but will not try to update the static pod status. +// This eliminates the need to pass an OperatorClient and avoids dubious writes and status. +func (c *CertRotationController) RunOnce() error { + return c.syncWorker() +} + func (c *CertRotationController) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() klog.Infof("Starting CertRotationController - %q", c.name) defer klog.Infof("Shutting down CertRotationController - %q", c.name) - if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { - utilruntime.HandleError(fmt.Errorf("caches did not sync")) - return - } + c.WaitForReady(stopCh) // doesn't matter what workers say, only start one. go wait.Until(c.runWorker, time.Second, stopCh) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go b/vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go new file mode 100644 index 000000000..def0d6a90 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go @@ -0,0 +1,66 @@ +package condition + +const ( + // ManagementStateDegradedConditionType is true when the operator ManagementState is not "Managed".. + // Possible reasons are Unmanaged, Removed or Unknown. Any of these cases means the operator is not actively managing the operand. + // This condition is set to false when the ManagementState is set to back to "Managed". + ManagementStateDegradedConditionType = "ManagementStateDegraded" + + // UnsupportedConfigOverridesUpgradeableConditionType is true when operator unsupported config overrides is changed. + // When NoUnsupportedConfigOverrides reason is given it means there are no unsupported config overrides. + // When UnsupportedConfigOverridesSet reason is given it means the unsupported config overrides are set, which might impact the ability + // of operator to successfully upgrade its operand. + UnsupportedConfigOverridesUpgradeableConditionType = "UnsupportedConfigOverridesUpgradeable" + + // MonitoringResourceControllerDegradedConditionType is true when the operator is unable to create or reconcile the ServiceMonitor + // CR resource, which is required by monitoring operator to collect Prometheus data from the operator. When this condition is true and the ServiceMonitor + // is already created, it won't have impact on collecting metrics. However, if the ServiceMonitor was not created, the metrics won't be available for + // collection until this condition is set to false. + // The condition is set to false automatically when the operator successfully synchronize the ServiceMonitor resource. + MonitoringResourceControllerDegradedConditionType = "MonitoringResourceControllerDegraded" + + // BackingResourceControllerDegradedConditionType is true when the operator is unable to create or reconcile the resources needed + // to successfully run the installer pods (installer CRB and SA). If these were already created, this condition is not fatal, however if the resources + // were not created it means the installer pod creation will fail. + // This condition is set to false when the operator can successfully synchronize installer SA and CRB. + BackingResourceControllerDegradedConditionType = "BackingResourceControllerDegraded" + + // StaticPodsDegradedConditionType is true when the operator observe errors when installing the new revision static pods. + // This condition report Error reason when the pods are terminated or not ready or waiting during which the operand quality of service is degraded. + // This condition is set to False when the pods change state to running and are observed ready. + StaticPodsDegradedConditionType = "StaticPodsDegraded" + + // ConfigObservationDegradedConditionType is true when the operator failed to observe or process configuration change. + // This is not transient condition and normally a correction or manual intervention is required on the config custom resource. + ConfigObservationDegradedConditionType = "ConfigObservationDegraded" + + // ResourceSyncControllerDegradedConditionType is true when the operator failed to synchronize one or more secrets or config maps required + // to run the operand. Operand ability to provide service might be affected by this condition. + // This condition is set to false when the operator is able to create secrets and config maps. + ResourceSyncControllerDegradedConditionType = "ResourceSyncControllerDegraded" + + // CertRotationDegradedConditionTypeFmt is true when the operator failed to properly rotate one or more certificates required by the operand. + // The RotationError reason is given with message describing details of this failure. This condition can be fatal when ignored as the existing certificate(s) + // validity can expire and without rotating/renewing them manual recovery might be required to fix the cluster. + CertRotationDegradedConditionTypeFmt = "CertRotation_%s_Degraded" + + // InstallerControllerDegradedConditionType is true when the operator is not able to create new installer pods so the new revisions + // cannot be rolled out. This might happen when one or more required secrets or config maps does not exists. + // In case the missing secret or config map is available, this condition is automatically set to false. + InstallerControllerDegradedConditionType = "InstallerControllerDegraded" + + // NodeInstallerDegradedConditionType is true when the operator is not able to create new installer pods because there are no schedulable nodes + // available to run the installer pods. + // The AllNodesAtLatestRevision reason is set when all master nodes are updated to the latest revision. It is false when some masters are pending revision. + // ZeroNodesActive reason is set to True when no active master nodes are observed. Is set to False when there is at least one active master node. + NodeInstallerDegradedConditionType = "NodeInstallerDegraded" + + // RevisionControllerDegradedConditionType is true when the operator is not able to create new desired revision because an error occurred when + // the operator attempted to created required resource(s) (secrets, configmaps, ...). + // This condition mean no new revision will be created. + RevisionControllerDegradedConditionType = "RevisionControllerDegraded" + + // NodeControllerDegradedConditionType is true when the operator observed a master node that is not ready. + // Note that a node is not ready when its Condition.NodeReady wasn't set to true + NodeControllerDegradedConditionType = "NodeControllerDegraded" +) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go index 65846c975..a465636bc 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider.go @@ -6,6 +6,7 @@ import ( "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" configv1 "github.com/openshift/api/config/v1" "github.com/openshift/library-go/pkg/operator/configobserver" @@ -76,6 +77,7 @@ func (c *cloudProviderObserver) ObserveCloudProviderNames(genericListers configo return observedConfig, errs } if err != nil { + errs = append(errs, err) return previouslyObservedConfig, errs } @@ -92,8 +94,10 @@ func (c *cloudProviderObserver) ObserveCloudProviderNames(genericListers configo Namespace: sourceCloudConfigNamespace, Name: sourceCloudConfigMap, } - // we set cloudprovider configmap values only for vsphere. - if cloudProvider != "vsphere" { + + // we set cloudprovider configmap values only for some cloud providers. + validCloudProviders := sets.NewString("azure", "gce", "vsphere") + if !validCloudProviders.Has(cloudProvider) { sourceCloudConfigMap = "" } @@ -143,10 +147,12 @@ func getPlatformName(platformType configv1.PlatformType, recorder events.Recorde cloudProvider = "azure" case configv1.VSpherePlatformType: cloudProvider = "vsphere" + case configv1.BareMetalPlatformType: + case configv1.GCPPlatformType: + cloudProvider = "gce" case configv1.LibvirtPlatformType: case configv1.OpenStackPlatformType: - // TODO(flaper87): Enable this once we've figured out a way to write the cloud provider config in the master nodes - //cloudProvider = "openstack" + cloudProvider = "openstack" case configv1.NonePlatformType: default: // the new doc on the infrastructure fields requires that we treat an unrecognized thing the same bare metal. diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider_test.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider_test.go index 1d801515f..1260ae3a6 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/cloudprovider/observe_cloudprovider_test.go @@ -55,15 +55,20 @@ func TestObserveCloudProviderNames(t *testing.T) { platform: configv1.AzurePlatformType, expected: "azure", cloudProviderCount: 1, + }, { + platform: configv1.BareMetalPlatformType, + cloudProviderCount: 0, }, { platform: configv1.LibvirtPlatformType, cloudProviderCount: 0, }, { platform: configv1.OpenStackPlatformType, - cloudProviderCount: 0, + expected: "openstack", + cloudProviderCount: 1, }, { platform: configv1.GCPPlatformType, - cloudProviderCount: 0, + expected: "gce", + cloudProviderCount: 1, }, { platform: configv1.NonePlatformType, cloudProviderCount: 0, diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go index 2c566647f..42c750f89 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go @@ -19,12 +19,12 @@ import ( "k8s.io/client-go/util/workqueue" operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/condition" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" "github.com/openshift/library-go/pkg/operator/v1helpers" ) -const operatorStatusTypeConfigObservationDegraded = "ConfigObservationDegraded" const configObserverWorkKey = "key" // Listers is an interface which will be passed to the config observer funcs. It is expected to be hard-cast to the "correct" type @@ -126,7 +126,7 @@ func (c ConfigObserver) sync() error { // update failing condition cond := operatorv1.OperatorCondition{ - Type: operatorStatusTypeConfigObservationDegraded, + Type: condition.ConfigObservationDegradedConditionType, Status: operatorv1.ConditionFalse, } if configError != nil { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller_test.go index 0b28510a8..9641551f4 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller_test.go @@ -6,6 +6,7 @@ import ( "strings" "testing" + "github.com/openshift/library-go/pkg/operator/condition" "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" "github.com/davecgh/go-spew/spew" @@ -103,7 +104,7 @@ func TestSyncStatus(t *testing.T) { "baz": "three", }}, expectedCondition: &operatorv1.OperatorCondition{ - Type: operatorStatusTypeConfigObservationDegraded, + Type: condition.ConfigObservationDegradedConditionType, Status: operatorv1.ConditionFalse, }, }, @@ -136,7 +137,7 @@ func TestSyncStatus(t *testing.T) { "bar": "two", }}, expectedCondition: &operatorv1.OperatorCondition{ - Type: operatorStatusTypeConfigObservationDegraded, + Type: condition.ConfigObservationDegradedConditionType, Status: operatorv1.ConditionTrue, Reason: "Error", Message: "some failure", @@ -163,7 +164,7 @@ func TestSyncStatus(t *testing.T) { expectError: true, expectedObservedConfig: nil, expectedCondition: &operatorv1.OperatorCondition{ - Type: operatorStatusTypeConfigObservationDegraded, + Type: condition.ConfigObservationDegradedConditionType, Status: operatorv1.ConditionTrue, Reason: "Error", Message: "error writing updated observed config: update spec failure", @@ -190,7 +191,7 @@ func TestSyncStatus(t *testing.T) { expectError: true, expectedCondition: &operatorv1.OperatorCondition{ - Type: operatorStatusTypeConfigObservationDegraded, + Type: condition.ConfigObservationDegradedConditionType, Status: operatorv1.ConditionTrue, Reason: "Error", Message: "non-deterministic config observation detected", @@ -250,7 +251,7 @@ func TestSyncStatus(t *testing.T) { case tc.expectedCondition != nil && operatorConfigClient.status == nil: t.Error("missing expected status") case tc.expectedCondition != nil: - condition := v1helpers.FindOperatorCondition(operatorConfigClient.status.Conditions, operatorStatusTypeConfigObservationDegraded) + condition := v1helpers.FindOperatorCondition(operatorConfigClient.status.Conditions, condition.ConfigObservationDegradedConditionType) condition.LastTransitionTime = tc.expectedCondition.LastTransitionTime if !reflect.DeepEqual(tc.expectedCondition, condition) { t.Fatalf("\n===== condition expected:\n%v\n===== condition actual:\n%v", toYAML(tc.expectedCondition), toYAML(condition)) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go index dd2788669..cd38603d5 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go @@ -56,7 +56,9 @@ func (f *featureFlags) ObserveFeatureFlags(genericListers configobserver.Listers if apierrors.IsNotFound(err) { configResource = &configv1.FeatureGate{ Spec: configv1.FeatureGateSpec{ - FeatureSet: configv1.Default, + FeatureGateSelection: configv1.FeatureGateSelection{ + FeatureSet: configv1.Default, + }, }, } } else if err != nil { @@ -64,24 +66,9 @@ func (f *featureFlags) ObserveFeatureFlags(genericListers configobserver.Listers return prevObservedConfig, errs } - var newConfigValue []string - if featureSet, ok := configv1.FeatureSets[configResource.Spec.FeatureSet]; ok { - for _, enable := range featureSet.Enabled { - // only add whitelisted feature flags - if !f.allowAll && !f.knownFeatures.Has(enable) { - continue - } - newConfigValue = append(newConfigValue, enable+"=true") - } - for _, disable := range featureSet.Disabled { - // only add whitelisted feature flags - if !f.allowAll && !f.knownFeatures.Has(disable) { - continue - } - newConfigValue = append(newConfigValue, disable+"=false") - } - } else { - errs = append(errs, fmt.Errorf(".spec.featureSet %q not found", featureSet)) + newConfigValue, err := f.getWhitelistedFeatureNames(configResource) + if err != nil { + errs = append(errs, err) return prevObservedConfig, errs } if !reflect.DeepEqual(currentConfigValue, newConfigValue) { @@ -95,3 +82,53 @@ func (f *featureFlags) ObserveFeatureFlags(genericListers configobserver.Listers return observedConfig, errs } + +func (f *featureFlags) getWhitelistedFeatureNames(fg *configv1.FeatureGate) ([]string, error) { + var err error + newConfigValue := []string{} + enabledFeatures := []string{} + disabledFeatures := []string{} + formatEnabledFunc := func(fs string) string { + return fmt.Sprintf("%s=true", fs) + } + formatDisabledFunc := func(fs string) string { + return fmt.Sprintf("%s=false", fs) + } + + enabledFeatures, disabledFeatures, err = getFeaturesFromTheSpec(fg) + if err != nil { + return nil, err + } + + for _, enable := range enabledFeatures { + // only add whitelisted feature flags + if !f.allowAll && !f.knownFeatures.Has(enable) { + continue + } + newConfigValue = append(newConfigValue, formatEnabledFunc(enable)) + } + for _, disable := range disabledFeatures { + // only add whitelisted feature flags + if !f.allowAll && !f.knownFeatures.Has(disable) { + continue + } + newConfigValue = append(newConfigValue, formatDisabledFunc(disable)) + } + + return newConfigValue, nil +} + +func getFeaturesFromTheSpec(fg *configv1.FeatureGate) ([]string, []string, error) { + if fg.Spec.FeatureSet == configv1.CustomNoUpgrade { + if fg.Spec.FeatureGateSelection.CustomNoUpgrade != nil { + return fg.Spec.FeatureGateSelection.CustomNoUpgrade.Enabled, fg.Spec.FeatureGateSelection.CustomNoUpgrade.Disabled, nil + } + return []string{}, []string{}, nil + } + + featureSet, ok := configv1.FeatureSets[fg.Spec.FeatureSet] + if !ok { + return []string{}, []string{}, fmt.Errorf(".spec.featureSet %q not found", featureSet) + } + return featureSet.Enabled, featureSet.Disabled, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates_test.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates_test.go index 371550ca3..53aeb1a22 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates_test.go @@ -4,15 +4,15 @@ import ( "reflect" "testing" - "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/cache" configv1 "github.com/openshift/api/config/v1" configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" ) type testLister struct { @@ -37,8 +37,11 @@ func TestObserveFeatureFlags(t *testing.T) { tests := []struct { name string - configValue configv1.FeatureSet - expectedResult []string + configValue configv1.FeatureSet + expectedResult []string + expectError bool + customNoUpgrade *configv1.CustomFeatureGates + knownFeatures sets.String }{ { name: "default", @@ -57,10 +60,38 @@ func TestObserveFeatureFlags(t *testing.T) { "ExperimentalCriticalPodAnnotation=true", "RotateKubeletServerCertificate=true", "SupportPodPidsLimit=true", - "CSIBlockVolume=true", "LocalStorageCapacityIsolation=false", }, }, + { + name: "custom no upgrade and all allowed", + configValue: configv1.CustomNoUpgrade, + expectedResult: []string{ + "CustomFeatureEnabled=true", + "CustomFeatureDisabled=false", + }, + customNoUpgrade: &configv1.CustomFeatureGates{ + Enabled: []string{"CustomFeatureEnabled"}, + Disabled: []string{"CustomFeatureDisabled"}, + }, + }, + { + name: "custom no upgrade flag set and none upgrades were provided", + configValue: configv1.CustomNoUpgrade, + expectedResult: []string{}, + }, + { + name: "custom no upgrade and known features", + configValue: configv1.CustomNoUpgrade, + expectedResult: []string{ + "CustomFeatureEnabled=true", + }, + customNoUpgrade: &configv1.CustomFeatureGates{ + Enabled: []string{"CustomFeatureEnabled"}, + Disabled: []string{"CustomFeatureDisabled"}, + }, + knownFeatures: sets.NewString("CustomFeatureEnabled"), + }, } for _, tc := range tests { @@ -69,7 +100,10 @@ func TestObserveFeatureFlags(t *testing.T) { indexer.Add(&configv1.FeatureGate{ ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, Spec: configv1.FeatureGateSpec{ - FeatureSet: tc.configValue, + FeatureGateSelection: configv1.FeatureGateSelection{ + FeatureSet: tc.configValue, + CustomNoUpgrade: tc.customNoUpgrade, + }, }, }) listers := testLister{ @@ -79,12 +113,15 @@ func TestObserveFeatureFlags(t *testing.T) { initialExistingConfig := map[string]interface{}{} - observeFn := NewObserveFeatureFlagsFunc(nil, configPath) + observeFn := NewObserveFeatureFlagsFunc(tc.knownFeatures, configPath) observed, errs := observeFn(listers, eventRecorder, initialExistingConfig) - if len(errs) != 0 { + if len(errs) != 0 && !tc.expectError { t.Fatal(errs) } + if len(errs) == 0 && tc.expectError { + t.Fatal("expected an error but got nothing") + } actual, _, err := unstructured.NestedStringSlice(observed, configPath...) if err != nil { t.Fatalf("unexpected error: %v", err) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/observe_network.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/observe_network.go index 5a3f937dd..8d4676558 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/observe_network.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/network/observe_network.go @@ -2,8 +2,9 @@ package network import ( "fmt" + "net" - configv1 "github.com/openshift/api/config" + configv1 "github.com/openshift/api/config/v1" configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -14,28 +15,28 @@ import ( func GetClusterCIDRs(lister configlistersv1.NetworkLister, recorder events.Recorder) ([]string, error) { network, err := lister.Get("cluster") if errors.IsNotFound(err) { - recorder.Warningf("ObserveRestrictedCIDRFailed", "Required networks.%s/cluster not found", configv1.GroupName) + recorder.Warningf("GetClusterCIDRsFailed", "Required networks.%s/cluster not found", configv1.GroupName) return nil, nil } if err != nil { - recorder.Warningf("ObserveRestrictedCIDRFailed", "error getting networks.%s/cluster: %v", configv1.GroupName, err) + recorder.Warningf("GetClusterCIDRsFailed", "error getting networks.%s/cluster: %v", configv1.GroupName, err) return nil, err } if len(network.Status.ClusterNetwork) == 0 { - recorder.Warningf("ObserveClusterCIDRFailed", "Required status.clusterNetwork field is not set in networks.%s/cluster", configv1.GroupName) + recorder.Warningf("GetClusterCIDRsFailed", "Required status.clusterNetwork field is not set in networks.%s/cluster", configv1.GroupName) return nil, fmt.Errorf("networks.%s/cluster: status.clusterNetwork not found", configv1.GroupName) } var clusterCIDRs []string for i, clusterNetwork := range network.Status.ClusterNetwork { if len(clusterNetwork.CIDR) == 0 { - recorder.Warningf("ObserveRestrictedCIDRFailed", "Required status.clusterNetwork[%d].cidr field is not set in networks.%s/cluster", i, configv1.GroupName) + recorder.Warningf("GetClusterCIDRsFailed", "Required status.clusterNetwork[%d].cidr field is not set in networks.%s/cluster", i, configv1.GroupName) return nil, fmt.Errorf("networks.%s/cluster: status.clusterNetwork[%d].cidr not found", configv1.GroupName, i) } clusterCIDRs = append(clusterCIDRs, clusterNetwork.CIDR) } - // TODO fallback to podCIDR? is that still a thing? + return clusterCIDRs, nil } @@ -43,17 +44,86 @@ func GetClusterCIDRs(lister configlistersv1.NetworkLister, recorder events.Recor func GetServiceCIDR(lister configlistersv1.NetworkLister, recorder events.Recorder) (string, error) { network, err := lister.Get("cluster") if errors.IsNotFound(err) { - recorder.Warningf("ObserveServiceClusterIPRangesFailed", "Required networks.%s/cluster not found", configv1.GroupName) + recorder.Warningf("GetServiceCIDRFailed", "Required networks.%s/cluster not found", configv1.GroupName) return "", nil } if err != nil { - recorder.Warningf("ObserveServiceClusterIPRangesFailed", "error getting networks.%s/cluster: %v", configv1.GroupName, err) + recorder.Warningf("GetServiceCIDRFailed", "error getting networks.%s/cluster: %v", configv1.GroupName, err) return "", err } if len(network.Status.ServiceNetwork) == 0 || len(network.Status.ServiceNetwork[0]) == 0 { - recorder.Warningf("ObserveServiceClusterIPRangesFailed", "Required status.serviceNetwork field is not set in networks.%s/cluster", configv1.GroupName) + recorder.Warningf("GetServiceCIDRFailed", "Required status.serviceNetwork field is not set in networks.%s/cluster", configv1.GroupName) return "", fmt.Errorf("networks.%s/cluster: status.serviceNetwork not found", configv1.GroupName) } + return network.Status.ServiceNetwork[0], nil } + +// GetExternalIPPolicy retrieves the ExternalIPPolicy for the cluster. +// The policy may be null. +func GetExternalIPPolicy(lister configlistersv1.NetworkLister, recorder events.Recorder) (*configv1.ExternalIPPolicy, error) { + network, err := lister.Get("cluster") + if errors.IsNotFound(err) { + recorder.Warningf("GetExternalIPPolicyFailed", "Required networks.%s/cluster not found", configv1.GroupName) + return nil, nil + } + if err != nil { + recorder.Warningf("GetExternalIPPolicyFailed", "error getting networks.%s/cluster: %v", configv1.GroupName, err) + return nil, err + } + + if network.Spec.ExternalIP == nil { + return nil, nil + } + + pol := network.Spec.ExternalIP.Policy + if pol != nil { + if err := validateCIDRs(pol.AllowedCIDRs); err != nil { + recorder.Warningf("GetExternalIPPolicyFailed", "error parsing networks.%s/cluster Spec.ExternalIP.Policy.AllowedCIDRs: invalid cidr: %v", configv1.GroupName, err) + return nil, err + } + if err := validateCIDRs(pol.RejectedCIDRs); err != nil { + recorder.Warningf("GetExternalIPPolicyFailed", "error parsing networks.%s/cluster Spec.ExternalIP.Policy.RejectedCIDRs: invalid cidr: %v", configv1.GroupName, err) + return nil, err + } + } + + return network.Spec.ExternalIP.Policy, nil +} + +// GetExternalIPAutoAssignCIDRs retrieves the ExternalIPAutoAssignCIDRs, if configured. +func GetExternalIPAutoAssignCIDRs(lister configlistersv1.NetworkLister, recorder events.Recorder) ([]string, error) { + network, err := lister.Get("cluster") + if errors.IsNotFound(err) { + recorder.Warningf("GetExternalIPAutoAssignCIDRsFailed", "Required networks.%s/cluster not found", configv1.GroupName) + return nil, nil + } + if err != nil { + recorder.Warningf("GetExternalIPAutoAssignCIDRsFailed", "error getting networks.%s/cluster: %v", configv1.GroupName, err) + return nil, err + } + + if network.Spec.ExternalIP == nil { + return nil, nil + } + + // ensure all ips are valid + if err := validateCIDRs(network.Spec.ExternalIP.AutoAssignCIDRs); err != nil { + recorder.Warningf("GetExternalIPAutoAssignCIDRsFailed", "error parsing networks.%s/cluster Spec.ExternalIP.AutoAssignCIDRs: invalid cidr: %v", configv1.GroupName, err) + return nil, err + } + + return network.Spec.ExternalIP.AutoAssignCIDRs, nil +} + +// validateCIDRs returns an err if any cidr in the list is invalid +func validateCIDRs(in []string) error { + for _, cidr := range in { + _, _, err := net.ParseCIDR(cidr) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy.go new file mode 100644 index 000000000..53988d6b8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy.go @@ -0,0 +1,88 @@ +package proxy + +import ( + "reflect" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/operator/configobserver" + "github.com/openshift/library-go/pkg/operator/events" +) + +type ProxyLister interface { + ProxyLister() configlistersv1.ProxyLister +} + +func NewProxyObserveFunc(configPath []string) configobserver.ObserveConfigFunc { + return (&observeProxyFlags{ + configPath: configPath, + }).ObserveProxyConfig +} + +type observeProxyFlags struct { + configPath []string +} + +// ObserveProxyConfig observes the proxy.config.openshift.io/cluster object and writes +// its content to an unstructured object in a string map at the path from the constructor +func (f *observeProxyFlags) ObserveProxyConfig(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) { + proxyLister := genericListers.(ProxyLister) + + errs := []error{} + prevObservedProxyConfig := map[string]interface{}{} + + // grab the current Proxy config to later check whether it was updated + currentProxyMap, _, err := unstructured.NestedStringMap(existingConfig, f.configPath...) + if err != nil { + return prevObservedProxyConfig, append(errs, err) + } + + if len(currentProxyMap) > 0 { + unstructured.SetNestedStringMap(prevObservedProxyConfig, currentProxyMap, f.configPath...) + } + + observedConfig := map[string]interface{}{} + proxyConfig, err := proxyLister.ProxyLister().Get("cluster") + if errors.IsNotFound(err) { + recorder.Warningf("ObserveProxyConfig", "proxy.%s/cluster not found", configv1.GroupName) + return observedConfig, errs + } + if err != nil { + errs = append(errs, err) + return existingConfig, errs + } + + newProxyMap := proxyToMap(proxyConfig) + if len(newProxyMap) > 0 { + if err := unstructured.SetNestedStringMap(observedConfig, newProxyMap, f.configPath...); err != nil { + errs = append(errs, err) + } + } + + if !reflect.DeepEqual(currentProxyMap, newProxyMap) { + recorder.Eventf("ObserveProxyConfig", "proxy changed to %q", newProxyMap) + } + + return observedConfig, errs +} + +func proxyToMap(proxy *configv1.Proxy) map[string]string { + proxyMap := map[string]string{} + + if noProxy := proxy.Spec.NoProxy; len(noProxy) > 0 { + proxyMap["NO_PROXY"] = noProxy + } + + if httpProxy := proxy.Spec.HTTPProxy; len(httpProxy) > 0 { + proxyMap["HTTP_PROXY"] = httpProxy + } + + if httpsProxy := proxy.Spec.HTTPSProxy; len(httpsProxy) > 0 { + proxyMap["HTTPS_PROXY"] = httpsProxy + } + + return proxyMap +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy_test.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy_test.go new file mode 100644 index 000000000..4909ea8fc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/proxy/observe_proxy_test.go @@ -0,0 +1,91 @@ +package proxy + +import ( + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" + + configv1 "github.com/openshift/api/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" +) + +type testLister struct { + lister configlistersv1.ProxyLister +} + +func (l testLister) ProxyLister() configlistersv1.ProxyLister { + return l.lister +} + +func (l testLister) ResourceSyncer() resourcesynccontroller.ResourceSyncer { + return nil +} + +func (l testLister) PreRunHasSynced() []cache.InformerSynced { + return nil +} + +func TestObserveProxyConfig(t *testing.T) { + configPath := []string{"openshift", "proxy"} + + tests := []struct { + name string + proxySpec configv1.ProxySpec + expected map[string]interface{} + expectedError []error + }{ + { + name: "all unset", + proxySpec: configv1.ProxySpec{}, + expected: map[string]interface{}{}, + expectedError: []error{}, + }, + { + name: "all set", + proxySpec: configv1.ProxySpec{ + HTTPProxy: "http://someplace.it", + HTTPSProxy: "https://someplace.it", + NoProxy: "127.0.0.1", + }, + expected: map[string]interface{}{ + "openshift": map[string]interface{}{ + "proxy": map[string]interface{}{ + "HTTP_PROXY": "http://someplace.it", + "HTTPS_PROXY": "https://someplace.it", + "NO_PROXY": "127.0.0.1", + }, + }, + }, + expectedError: []error{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + indexer.Add(&configv1.Proxy{ + ObjectMeta: metav1.ObjectMeta{Name: "cluster"}, + Spec: tt.proxySpec, + }) + listers := testLister{ + lister: configlistersv1.NewProxyLister(indexer), + } + eventRecorder := events.NewInMemoryRecorder("") + + initialExistingConfig := map[string]interface{}{} + + observeFn := NewProxyObserveFunc(configPath) + + got, errorsGot := observeFn(listers, eventRecorder, initialExistingConfig) + if !reflect.DeepEqual(got, tt.expected) { + t.Errorf("observeProxyFlags.ObserveProxyConfig() got = %v, want %v", got, tt.expected) + } + if !reflect.DeepEqual(errorsGot, tt.expectedError) { + t.Errorf("observeProxyFlags.ObserveProxyConfig() errorsGot = %v, want %v", errorsGot, tt.expectedError) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go index 40b7354e2..b64d9f6a9 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go @@ -38,7 +38,10 @@ func (r *inMemoryEventRecorder) ComponentName() string { } func (r *inMemoryEventRecorder) ForComponent(component string) Recorder { - return &inMemoryEventRecorder{events: []*corev1.Event{}, source: component} + r.Lock() + defer r.Unlock() + r.source = component + return r } func (r *inMemoryEventRecorder) WithComponentSuffix(suffix string) Recorder { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client.go b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client.go new file mode 100644 index 000000000..e15a691a4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client.go @@ -0,0 +1,190 @@ +package genericoperatorclient + +import ( + "time" + + "github.com/imdario/mergo" + + "k8s.io/apimachinery/pkg/runtime" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/v1helpers" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/informers" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +const globalConfigName = "cluster" + +func NewClusterScopedOperatorClient(config *rest.Config, gvr schema.GroupVersionResource) (v1helpers.OperatorClient, dynamicinformer.DynamicSharedInformerFactory, error) { + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, nil, err + } + client := dynamicClient.Resource(gvr) + + informers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 12*time.Hour) + informer := informers.ForResource(gvr) + + return &dynamicOperatorClient{ + informer: informer, + client: client, + }, informers, nil +} + +type dynamicOperatorClient struct { + informer informers.GenericInformer + client dynamic.ResourceInterface +} + +func (c dynamicOperatorClient) Informer() cache.SharedIndexInformer { + return c.informer.Informer() +} + +func (c dynamicOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + uncastInstance, err := c.informer.Lister().Get(globalConfigName) + if err != nil { + return nil, nil, "", err + } + instance := uncastInstance.(*unstructured.Unstructured) + + spec, err := getOperatorSpecFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + status, err := getOperatorStatusFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + + return spec, status, instance.GetResourceVersion(), nil +} + +func (c dynamicOperatorClient) UpdateOperatorSpec(resourceVersion string, spec *operatorv1.OperatorSpec) (*operatorv1.OperatorSpec, string, error) { + uncastOriginal, err := c.informer.Lister().Get(globalConfigName) + if err != nil { + return nil, "", err + } + original := uncastOriginal.(*unstructured.Unstructured) + + copy := original.DeepCopy() + copy.SetResourceVersion(resourceVersion) + if err := setOperatorSpecFromUnstructured(copy.UnstructuredContent(), spec); err != nil { + return nil, "", err + } + + ret, err := c.client.Update(copy, metav1.UpdateOptions{}) + if err != nil { + return nil, "", err + } + retSpec, err := getOperatorSpecFromUnstructured(ret.UnstructuredContent()) + if err != nil { + return nil, "", err + } + + return retSpec, ret.GetResourceVersion(), nil +} + +func (c dynamicOperatorClient) UpdateOperatorStatus(resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) { + uncastOriginal, err := c.informer.Lister().Get(globalConfigName) + if err != nil { + return nil, err + } + original := uncastOriginal.(*unstructured.Unstructured) + + copy := original.DeepCopy() + copy.SetResourceVersion(resourceVersion) + if err := setOperatorStatusFromUnstructured(copy.UnstructuredContent(), status); err != nil { + return nil, err + } + + ret, err := c.client.UpdateStatus(copy, metav1.UpdateOptions{}) + if err != nil { + return nil, err + } + retStatus, err := getOperatorStatusFromUnstructured(ret.UnstructuredContent()) + if err != nil { + return nil, err + } + + return retStatus, nil +} + +func getOperatorSpecFromUnstructured(obj map[string]interface{}) (*operatorv1.OperatorSpec, error) { + uncastSpec, exists, err := unstructured.NestedMap(obj, "spec") + if !exists { + return &operatorv1.OperatorSpec{}, nil + } + if err != nil { + return nil, err + } + + ret := &operatorv1.OperatorSpec{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uncastSpec, ret); err != nil { + return nil, err + } + return ret, nil +} + +func setOperatorSpecFromUnstructured(obj map[string]interface{}, spec *operatorv1.OperatorSpec) error { + // we cannot simply set the entire map because doing so would stomp unknown fields, like say a static pod operator spec when cast as an operator spec + newUnstructuredSpec, err := runtime.DefaultUnstructuredConverter.ToUnstructured(spec) + if err != nil { + return err + } + + originalUnstructuredSpec, exists, err := unstructured.NestedMap(obj, "spec") + if !exists { + return unstructured.SetNestedMap(obj, newUnstructuredSpec, "spec") + } + if err != nil { + return err + } + if err := mergo.Merge(&originalUnstructuredSpec, newUnstructuredSpec, mergo.WithOverride); err != nil { + return err + } + + return unstructured.SetNestedMap(obj, originalUnstructuredSpec, "spec") +} + +func getOperatorStatusFromUnstructured(obj map[string]interface{}) (*operatorv1.OperatorStatus, error) { + uncastStatus, exists, err := unstructured.NestedMap(obj, "status") + if !exists { + return &operatorv1.OperatorStatus{}, nil + } + if err != nil { + return nil, err + } + + ret := &operatorv1.OperatorStatus{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uncastStatus, ret); err != nil { + return nil, err + } + return ret, nil +} + +func setOperatorStatusFromUnstructured(obj map[string]interface{}, spec *operatorv1.OperatorStatus) error { + // we cannot simply set the entire map because doing so would stomp unknown fields, like say a static pod operator spec when cast as an operator spec + newUnstructuredStatus, err := runtime.DefaultUnstructuredConverter.ToUnstructured(spec) + if err != nil { + return err + } + + originalUnstructuredStatus, exists, err := unstructured.NestedMap(obj, "status") + if !exists { + return unstructured.SetNestedMap(obj, newUnstructuredStatus, "status") + } + if err != nil { + return err + } + if err := mergo.Merge(&originalUnstructuredStatus, newUnstructuredStatus, mergo.WithOverride); err != nil { + return err + } + + return unstructured.SetNestedMap(obj, originalUnstructuredStatus, "status") +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client_test.go b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client_test.go new file mode 100644 index 000000000..15e39e896 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_operator_client_test.go @@ -0,0 +1,138 @@ +package genericoperatorclient + +import ( + "reflect" + "testing" + + "k8s.io/utils/diff" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +func TestSetOperatorSpecFromUnstructured(t *testing.T) { + tests := []struct { + name string + + in map[string]interface{} + spec *operatorv1.OperatorSpec + expected map[string]interface{} + }{ + { + name: "keep-unknown", + in: map[string]interface{}{ + "spec": map[string]interface{}{ + "non-standard-field": "value", + }, + }, + spec: &operatorv1.OperatorSpec{ + LogLevel: operatorv1.Trace, + }, + expected: map[string]interface{}{ + "spec": map[string]interface{}{ + "non-standard-field": "value", + "logLevel": "Trace", + "managementState": "", + "operatorLogLevel": "", + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := setOperatorSpecFromUnstructured(test.in, test.spec) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(test.in, test.expected) { + t.Errorf(diff.ObjectDiff(test.in, test.expected)) + } + }) + } +} + +func TestSetOperatorStatusFromUnstructured(t *testing.T) { + tests := []struct { + name string + + in map[string]interface{} + status *operatorv1.OperatorStatus + expected map[string]interface{} + }{ + { + name: "keep-unknown", + in: map[string]interface{}{ + "status": map[string]interface{}{ + "non-standard-field": "value", + }, + }, + status: &operatorv1.OperatorStatus{ + Conditions: []operatorv1.OperatorCondition{ + { + Type: "Degraded", + }, + }, + }, + expected: map[string]interface{}{ + "status": map[string]interface{}{ + "non-standard-field": "value", + "conditions": []interface{}{ + map[string]interface{}{ + "lastTransitionTime": nil, + "status": "", + "type": "Degraded", + }, + }, + "readyReplicas": int64(0), + }, + }, + }, + { + name: "replace-condition", + in: map[string]interface{}{ + "status": map[string]interface{}{ + "non-standard-field": "value", + "conditions": []interface{}{ + map[string]interface{}{ + "lastTransitionTime": nil, + "status": "", + "type": "overwriteme", + }, + }, + }, + }, + status: &operatorv1.OperatorStatus{ + Conditions: []operatorv1.OperatorCondition{ + { + Type: "Degraded", + }, + }, + }, + expected: map[string]interface{}{ + "status": map[string]interface{}{ + "non-standard-field": "value", + "conditions": []interface{}{ + map[string]interface{}{ + "lastTransitionTime": nil, + "status": "", + "type": "Degraded", + }, + }, + "readyReplicas": int64(0), + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := setOperatorStatusFromUnstructured(test.in, test.status) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(test.in, test.expected) { + t.Errorf(diff.ObjectGoPrintDiff(test.in, test.expected)) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_staticpod_operator_client.go b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_staticpod_operator_client.go new file mode 100644 index 000000000..35ae57d14 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/genericoperatorclient/dynamic_staticpod_operator_client.go @@ -0,0 +1,201 @@ +package genericoperatorclient + +import ( + "time" + + "github.com/imdario/mergo" + + "k8s.io/apimachinery/pkg/runtime" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/v1helpers" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/rest" +) + +func NewStaticPodOperatorClient(config *rest.Config, gvr schema.GroupVersionResource) (v1helpers.StaticPodOperatorClient, dynamicinformer.DynamicSharedInformerFactory, error) { + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, nil, err + } + client := dynamicClient.Resource(gvr) + + informers := dynamicinformer.NewDynamicSharedInformerFactory(dynamicClient, 12*time.Hour) + informer := informers.ForResource(gvr) + + return &dynamicStaticPodOperatorClient{ + dynamicOperatorClient: dynamicOperatorClient{ + informer: informer, + client: client, + }, + }, informers, nil +} + +type dynamicStaticPodOperatorClient struct { + dynamicOperatorClient +} + +func (c dynamicStaticPodOperatorClient) GetStaticPodOperatorState() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + uncastInstance, err := c.informer.Lister().Get("cluster") + if err != nil { + return nil, nil, "", err + } + instance := uncastInstance.(*unstructured.Unstructured) + + spec, err := getStaticPodOperatorSpecFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + status, err := getStaticPodOperatorStatusFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + + return spec, status, instance.GetResourceVersion(), nil +} + +func (c dynamicStaticPodOperatorClient) GetStaticPodOperatorStateWithQuorum() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + instance, err := c.client.Get("cluster", metav1.GetOptions{}) + if err != nil { + return nil, nil, "", err + } + + spec, err := getStaticPodOperatorSpecFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + status, err := getStaticPodOperatorStatusFromUnstructured(instance.UnstructuredContent()) + if err != nil { + return nil, nil, "", err + } + + return spec, status, instance.GetResourceVersion(), nil +} + +func (c dynamicStaticPodOperatorClient) UpdateStaticPodOperatorSpec(resourceVersion string, spec *operatorv1.StaticPodOperatorSpec) (*operatorv1.StaticPodOperatorSpec, string, error) { + uncastOriginal, err := c.informer.Lister().Get("cluster") + if err != nil { + return nil, "", err + } + original := uncastOriginal.(*unstructured.Unstructured) + + copy := original.DeepCopy() + copy.SetResourceVersion(resourceVersion) + if err := setStaticPodOperatorSpecFromUnstructured(copy.UnstructuredContent(), spec); err != nil { + return nil, "", err + } + + ret, err := c.client.Update(copy, metav1.UpdateOptions{}) + if err != nil { + return nil, "", err + } + retSpec, err := getStaticPodOperatorSpecFromUnstructured(ret.UnstructuredContent()) + if err != nil { + return nil, "", err + } + + return retSpec, ret.GetResourceVersion(), nil +} + +func (c dynamicStaticPodOperatorClient) UpdateStaticPodOperatorStatus(resourceVersion string, status *operatorv1.StaticPodOperatorStatus) (*operatorv1.StaticPodOperatorStatus, error) { + uncastOriginal, err := c.informer.Lister().Get("cluster") + if err != nil { + return nil, err + } + original := uncastOriginal.(*unstructured.Unstructured) + + copy := original.DeepCopy() + copy.SetResourceVersion(resourceVersion) + if err := setStaticPodOperatorStatusFromUnstructured(copy.UnstructuredContent(), status); err != nil { + return nil, err + } + + ret, err := c.client.UpdateStatus(copy, metav1.UpdateOptions{}) + if err != nil { + return nil, err + } + retStatus, err := getStaticPodOperatorStatusFromUnstructured(ret.UnstructuredContent()) + if err != nil { + return nil, err + } + + return retStatus, nil +} + +func getStaticPodOperatorSpecFromUnstructured(obj map[string]interface{}) (*operatorv1.StaticPodOperatorSpec, error) { + uncastSpec, exists, err := unstructured.NestedMap(obj, "spec") + if !exists { + return &operatorv1.StaticPodOperatorSpec{}, nil + } + if err != nil { + return nil, err + } + + ret := &operatorv1.StaticPodOperatorSpec{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uncastSpec, ret); err != nil { + return nil, err + } + return ret, nil +} + +func setStaticPodOperatorSpecFromUnstructured(obj map[string]interface{}, spec *operatorv1.StaticPodOperatorSpec) error { + // we cannot simply set the entire map because doing so would stomp unknown fields, like say a static pod operator spec when cast as an operator spec + newUnstructuredSpec, err := runtime.DefaultUnstructuredConverter.ToUnstructured(spec) + if err != nil { + return err + } + + originalUnstructuredSpec, exists, err := unstructured.NestedMap(obj, "spec") + if !exists { + return unstructured.SetNestedMap(obj, newUnstructuredSpec, "spec") + } + if err != nil { + return err + } + if err := mergo.Merge(&originalUnstructuredSpec, newUnstructuredSpec, mergo.WithOverride); err != nil { + return err + } + + return unstructured.SetNestedMap(obj, originalUnstructuredSpec, "spec") +} + +func getStaticPodOperatorStatusFromUnstructured(obj map[string]interface{}) (*operatorv1.StaticPodOperatorStatus, error) { + uncastStatus, exists, err := unstructured.NestedMap(obj, "status") + if !exists { + return &operatorv1.StaticPodOperatorStatus{}, nil + } + if err != nil { + return nil, err + } + + ret := &operatorv1.StaticPodOperatorStatus{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(uncastStatus, ret); err != nil { + return nil, err + } + return ret, nil +} + +func setStaticPodOperatorStatusFromUnstructured(obj map[string]interface{}, spec *operatorv1.StaticPodOperatorStatus) error { + // we cannot simply set the entire map because doing so would stomp unknown fields, like say a static pod operator spec when cast as an operator spec + newUnstructuredStatus, err := runtime.DefaultUnstructuredConverter.ToUnstructured(spec) + if err != nil { + return err + } + + originalUnstructuredStatus, exists, err := unstructured.NestedMap(obj, "status") + if !exists { + return unstructured.SetNestedMap(obj, newUnstructuredStatus, "status") + } + if err != nil { + return err + } + if err := mergo.Merge(&originalUnstructuredStatus, newUnstructuredStatus, mergo.WithOverride); err != nil { + return err + } + + return unstructured.SetNestedMap(obj, originalUnstructuredStatus, "status") +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller.go index fbfe7e33f..0ebcd57b1 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller.go @@ -10,6 +10,8 @@ import ( "k8s.io/client-go/util/workqueue" "k8s.io/klog" + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/events" operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" ) @@ -51,23 +53,25 @@ func (c LogLevelController) sync() error { return err } - logLevel := fmt.Sprintf("%d", LogLevelToKlog(detailedSpec.OperatorLogLevel)) + currentLogLevel := CurrentLogLevel() + desiredLogLevel := detailedSpec.OperatorLogLevel - var level klog.Level + if len(desiredLogLevel) == 0 { + desiredLogLevel = operatorv1.Normal + } - oldLevel, ok := level.Get().(klog.Level) - if !ok { - oldLevel = level + // When the current loglevel is the desired one, do nothing + if currentLogLevel == desiredLogLevel { + return nil } - if err := level.Set(logLevel); err != nil { - c.eventRecorder.Warningf("LoglevelChangeFailed", "Unable to set loglevel level %v", err) + // Set the new loglevel if the operator spec changed + if err := SetVerbosityValue(desiredLogLevel); err != nil { + c.eventRecorder.Warningf("OperatorLoglevelChangeFailed", "Unable to change operator log level from %q to %q: %v", currentLogLevel, desiredLogLevel, err) return err } - if oldLevel.String() != logLevel { - c.eventRecorder.Eventf("LoglevelChange", "Changed loglevel level to %q", logLevel) - } + c.eventRecorder.Eventf("OperatorLoglevelChange", "Operator log level changed from %q to %q", currentLogLevel, desiredLogLevel) return nil } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller_test.go new file mode 100644 index 000000000..ff21148d4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/logging_controller_test.go @@ -0,0 +1,68 @@ +package loglevel + +import ( + "testing" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +func TestClusterOperatorLoggingController(t *testing.T) { + if err := SetVerbosityValue(operatorv1.Normal); err != nil { + t.Fatal(err) + } + + operatorSpec := &operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + } + + fakeStaticPodOperatorClient := v1helpers.NewFakeOperatorClient( + operatorSpec, + &operatorv1.OperatorStatus{}, + nil, + ) + + recorder := events.NewInMemoryRecorder("") + + controller := NewClusterOperatorLoggingController(fakeStaticPodOperatorClient, recorder) + + // no-op, desired == current + // When OperatorLogLevel is "" we assume the loglevel is Normal. + if err := controller.sync(); err != nil { + t.Fatal(err) + } + + if len(recorder.Events()) > 0 { + t.Fatalf("expected zero events, got %d", len(recorder.Events())) + } + + // change the log level to trace should 1 emit event + operatorSpec.OperatorLogLevel = operatorv1.Trace + if err := controller.sync(); err != nil { + t.Fatal(err) + } + + if operatorEvents := recorder.Events(); len(operatorEvents) == 1 { + expectedEventMessage := `Operator log level changed from "Normal" to "Trace"` + if message := operatorEvents[0].Message; message != expectedEventMessage { + t.Fatalf("expected event message %q, got %q", expectedEventMessage, message) + } + } else { + t.Fatalf("expected 1 event, got %d", len(operatorEvents)) + } + + // next sync should not produce any extra event + if err := controller.sync(); err != nil { + t.Fatal(err) + } + + if operatorEvents := recorder.Events(); len(operatorEvents) != 1 { + t.Fatalf("expected 1 event recorded, got %d", len(operatorEvents)) + } + + if current := CurrentLogLevel(); current != operatorv1.Trace { + t.Fatalf("expected log level 'Trace', got %v", current) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/loglevel/util.go b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/util.go index d6b942797..91e4251f0 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/loglevel/util.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/loglevel/util.go @@ -1,7 +1,15 @@ package loglevel -import operatorv1 "github.com/openshift/api/operator/v1" +import ( + "flag" + "fmt" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// LogLevelToKlog transforms operator log level to a klog numeric verbosity level. func LogLevelToKlog(logLevel operatorv1.LogLevel) int { switch logLevel { case operatorv1.Normal: @@ -16,3 +24,66 @@ func LogLevelToKlog(logLevel operatorv1.LogLevel) int { return 2 } } + +// CurrentLogLevel attempts to guess the current log level that is used by klog. +// We can use flags here as well, but this is less ugly ano more programmatically correct than flags. +func CurrentLogLevel() operatorv1.LogLevel { + switch { + case klog.V(8) == true: + return operatorv1.TraceAll + case klog.V(6) == true: + return operatorv1.Trace + case klog.V(4) == true: + return operatorv1.Debug + case klog.V(2) == true: + return operatorv1.Normal + default: + return operatorv1.Normal + } +} + +// SetVerbosityValue is a nasty hack and attempt to manipulate the global flags as klog does not expose +// a way to dynamically change the loglevel in runtime. +func SetVerbosityValue(logLevel operatorv1.LogLevel) error { + if logLevel == CurrentLogLevel() { + return nil + } + + var level *klog.Level + + // Convert operator loglevel to klog numeric string + desiredLevelValue := fmt.Sprintf("%d", LogLevelToKlog(logLevel)) + + // First, if the '-v' was specified in command line, attempt to acquire the level pointer from it. + if f := flag.CommandLine.Lookup("v"); f != nil { + if flagValue, ok := f.Value.(*klog.Level); ok { + level = flagValue + } + } + + // Second, if the '-v' was not set but is still present in flags defined for the command, attempt to acquire it + // by visiting all flags. + if level == nil { + flag.VisitAll(func(f *flag.Flag) { + if level != nil { + return + } + if levelFlag, ok := f.Value.(*klog.Level); ok { + level = levelFlag + } + }) + } + + if level != nil { + return level.Set(desiredLevelValue) + } + + // Third, if modifying the flag value (which is recommended by klog) fails, then fallback to modifying + // the internal state of klog using the empty new level. + var newLevel klog.Level + if err := newLevel.Set(desiredLevelValue); err != nil { + return fmt.Errorf("failed set klog.logging.verbosity %s: %v", desiredLevelValue, err) + } + + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller.go index 71e4f4cf7..40ba5c7ab 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state_controller.go @@ -2,6 +2,7 @@ package management import ( "fmt" + "strings" "time" "k8s.io/klog" @@ -14,6 +15,7 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/condition" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/v1helpers" operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" @@ -43,7 +45,7 @@ func NewOperatorManagementStateController( operatorClient: operatorClient, eventRecorder: recorder, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ManagementStateController-"+name), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ManagementStateController_"+strings.Replace(name, "-", "_", -1)), } operatorClient.Informer().AddEventHandler(c.eventHandler()) @@ -61,7 +63,7 @@ func (c ManagementStateController) sync() error { } cond := operatorv1.OperatorCondition{ - Type: "ManagementStateDegraded", + Type: condition.ManagementStateDegradedConditionType, Status: operatorv1.ConditionFalse, } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/render/options/config.go b/vendor/github.com/openshift/library-go/pkg/operator/render/options/config.go index ef3ea461a..019b61ce4 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/render/options/config.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/render/options/config.go @@ -20,6 +20,9 @@ type ManifestConfig struct { // Image is the pull spec of the image to use for the controller manager. Image string + // OperatorImage is the pull spec of the image to use for the operator (optional). + OperatorImage string + // ImagePullPolicy specifies the image pull policy to use for the images. ImagePullPolicy string } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/render/options/manifest.go b/vendor/github.com/openshift/library-go/pkg/operator/render/options/manifest.go index e893edfaf..7c58c648b 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/render/options/manifest.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/render/options/manifest.go @@ -10,7 +10,7 @@ import ( // ManifestOptions contains the values that influence manifest contents. type ManifestOptions struct { Namespace string - Image string + Image, OperatorImage string ImagePullPolicy string ConfigHostPath string ConfigFileName string @@ -37,6 +37,8 @@ func (o *ManifestOptions) AddFlags(fs *pflag.FlagSet, humanReadableComponentName fmt.Sprintf("Target namespace for phase 3 %s pods.", humanReadableComponentName)) fs.StringVar(&o.Image, "manifest-image", o.Image, fmt.Sprintf("Image to use for the %s.", humanReadableComponentName)) + fs.StringVar(&o.OperatorImage, "manifest-operator-image", o.OperatorImage, + fmt.Sprintf("Operator image to use for the %s.", humanReadableComponentName)) fs.StringVar(&o.ImagePullPolicy, "manifest-image-pull-policy", o.ImagePullPolicy, fmt.Sprintf("Image pull policy to use for the %s.", humanReadableComponentName)) fs.StringVar(&o.ConfigHostPath, "manifest-config-host-path", o.ConfigHostPath, @@ -85,6 +87,7 @@ func (o *ManifestOptions) Validate() error { func (o *ManifestOptions) ApplyTo(cfg *ManifestConfig) error { cfg.Namespace = o.Namespace cfg.Image = o.Image + cfg.OperatorImage = o.OperatorImage cfg.ImagePullPolicy = o.ImagePullPolicy cfg.ConfigHostPath = o.ConfigHostPath cfg.ConfigFileName = o.ConfigFileName diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go index 81a81a2d0..c10efe73f 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go @@ -1,22 +1,23 @@ package resourceapply import ( - "k8s.io/klog" - "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" apiregistrationv1client "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1" + "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" ) // ApplyAPIService merges objectmeta and requires apiservice coordinates. It does not touch CA bundles, which should be managed via service CA controller. -func ApplyAPIService(client apiregistrationv1client.APIServicesGetter, required *apiregistrationv1.APIService) (*apiregistrationv1.APIService, bool, error) { +func ApplyAPIService(client apiregistrationv1client.APIServicesGetter, recorder events.Recorder, required *apiregistrationv1.APIService) (*apiregistrationv1.APIService, bool, error) { existing, err := client.APIServices().Get(required.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { actual, err := client.APIServices().Create(required) + reportCreateEvent(recorder, required, err) return actual, true, err } if err != nil { @@ -41,5 +42,6 @@ func ApplyAPIService(client apiregistrationv1client.APIServicesGetter, required klog.Infof("APIService %q changes: %s", existing.Name, JSONPatch(existing, existingCopy)) } actual, err := client.APIServices().Update(existingCopy) + reportUpdateEvent(recorder, required, err) return actual, true, err } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go index 870b7ceb6..36777d6eb 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go @@ -196,6 +196,10 @@ func ApplyConfigMap(client coreclientv1.ConfigMapsGetter, recorder events.Record // ApplySecret merges objectmeta, requires data func ApplySecret(client coreclientv1.SecretsGetter, recorder events.Recorder, required *corev1.Secret) (*corev1.Secret, bool, error) { + if len(required.StringData) > 0 { + return nil, false, fmt.Errorf("Secret.stringData is not supported") + } + existing, err := client.Secrets(required.Namespace).Get(required.Name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { actual, err := client.Secrets(required.Namespace).Create(required) @@ -210,6 +214,7 @@ func ApplySecret(client coreclientv1.SecretsGetter, recorder events.Recorder, re existingCopy := existing.DeepCopy() resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + dataSame := equality.Semantic.DeepEqual(existingCopy.Data, required.Data) if dataSame && !*modified { return existingCopy, false, nil @@ -217,7 +222,23 @@ func ApplySecret(client coreclientv1.SecretsGetter, recorder events.Recorder, re existingCopy.Data = required.Data if klog.V(4) { - klog.Infof("Secret %q changes: %v", required.Namespace+"/"+required.Name, JSONPatch(existing, required)) + safeRequired := required.DeepCopy() + safeExisting := existing.DeepCopy() + + for s := range safeExisting.Data { + safeExisting.Data[s] = []byte("OLD") + } + for s := range safeRequired.Data { + if _, preexisting := existing.Data[s]; !preexisting { + safeRequired.Data[s] = []byte("NEW") + } else if !equality.Semantic.DeepEqual(existing.Data[s], safeRequired.Data[s]) { + safeRequired.Data[s] = []byte("MODIFIED") + } else { + safeRequired.Data[s] = []byte("OLD") + } + } + + klog.Infof("Secret %q changes: %v", required.Namespace+"/"+required.Name, JSONPatch(safeExisting, safeRequired)) } actual, err := client.Secrets(required.Namespace).Update(existingCopy) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go index 43f413024..ff9388a95 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go @@ -131,7 +131,7 @@ func ApplyRole(client rbacclientv1.RolesGetter, recorder events.Recorder, requir if klog.V(4) { klog.Infof("Role %q changes: %v", required.Namespace+"/"+required.Name, JSONPatch(existing, existingCopy)) } - actual, err := client.Roles(required.Namespace).Update(existing) + actual, err := client.Roles(required.Namespace).Update(existingCopy) reportUpdateEvent(recorder, required, err) return actual, true, err } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go index dc3a9db3d..b28c8770a 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go @@ -7,6 +7,7 @@ import ( "reflect" "k8s.io/klog" + "sigs.k8s.io/yaml" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -17,7 +18,14 @@ import ( // MergeConfigMap takes a configmap, the target key, special overlay funcs a list of config configs to overlay on top of each other // It returns the resultant configmap and a bool indicating if any changes were made to the configmap func MergeConfigMap(configMap *corev1.ConfigMap, configKey string, specialCases map[string]MergeFunc, configYAMLs ...[]byte) (*corev1.ConfigMap, bool, error) { - configBytes, err := MergeProcessConfig(specialCases, configYAMLs...) + return MergePrunedConfigMap(nil, configMap, configKey, specialCases, configYAMLs...) +} + +// MergePrunedConfigMap takes a configmap, the target key, special overlay funcs a list of config configs to overlay on top of each other +// It returns the resultant configmap and a bool indicating if any changes were made to the configmap. +// It roundtrips the config through the given schema. +func MergePrunedConfigMap(schema runtime.Object, configMap *corev1.ConfigMap, configKey string, specialCases map[string]MergeFunc, configYAMLs ...[]byte) (*corev1.ConfigMap, bool, error) { + configBytes, err := MergePrunedProcessConfig(schema, specialCases, configYAMLs...) if err != nil { return nil, false, err } @@ -85,6 +93,44 @@ func MergeProcessConfig(specialCases map[string]MergeFunc, configYAMLs ...[]byte return currentConfigYAML, nil } +// MergePrunedProcessConfig merges a series of config yaml files together with each later one overlaying all previous. +// The result is roundtripped through the given schema if it is non-nil. +func MergePrunedProcessConfig(schema runtime.Object, specialCases map[string]MergeFunc, configYAMLs ...[]byte) ([]byte, error) { + bs, err := MergeProcessConfig(specialCases, configYAMLs...) + if err != nil { + return nil, err + } + + if schema == nil { + return bs, nil + } + + // roundtrip through the schema + typed := schema.DeepCopyObject() + if err := yaml.Unmarshal(bs, typed); err != nil { + return nil, err + } + typedBytes, err := json.Marshal(typed) + if err != nil { + return nil, err + } + var untypedJSON map[string]interface{} + if err := json.Unmarshal(typedBytes, &untypedJSON); err != nil { + return nil, err + } + + // and intersect output with input because we cannot rely on omitempty in the schema + inputBytes, err := yaml.YAMLToJSON(bs) + if err != nil { + return nil, err + } + var inputJSON map[string]interface{} + if err := json.Unmarshal(inputBytes, &inputJSON); err != nil { + return nil, err + } + return json.Marshal(intersectJSON(inputJSON, untypedJSON)) +} + type MergeFunc func(dst, src interface{}, currentPath string) (interface{}, error) // mergeConfig overwrites entries in curr by additional. It modifies curr. @@ -132,3 +178,53 @@ func mergeConfig(curr, additional map[string]interface{}, currentPath string, sp return nil } + +// jsonIntersection returns the intersection of both JSON object, +// preferring the values of the first argument. +func intersectJSON(x1, x2 map[string]interface{}) map[string]interface{} { + if x1 == nil || x2 == nil { + return nil + } + ret := map[string]interface{}{} + for k, v1 := range x1 { + v2, ok := x2[k] + if !ok { + continue + } + ret[k] = intersectValue(v1, v2) + } + return ret +} + +func intersectArray(x1, x2 []interface{}) []interface{} { + if x1 == nil || x2 == nil { + return nil + } + ret := make([]interface{}, 0, len(x1)) + for i := range x1 { + if i >= len(x2) { + break + } + ret = append(ret, intersectValue(x1[i], x2[i])) + } + return ret +} + +func intersectValue(x1, x2 interface{}) interface{} { + switch x1 := x1.(type) { + case map[string]interface{}: + x2, ok := x2.(map[string]interface{}) + if !ok { + return x1 + } + return intersectJSON(x1, x2) + case []interface{}: + x2, ok := x2.([]interface{}) + if !ok { + return x1 + } + return intersectArray(x1, x2) + default: + return x1 + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger_test.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger_test.go index b44ad2048..efaff9d6b 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger_test.go @@ -6,6 +6,8 @@ import ( "testing" "k8s.io/apimachinery/pkg/util/diff" + + controlplanev1 "github.com/openshift/api/kubecontrolplane/v1" ) func TestMergeConfig(t *testing.T) { @@ -209,3 +211,63 @@ bravo: two }) } } + +func TestMergePrunedConfig(t *testing.T) { + tests := []struct { + name string + curr string + additional string + specialCases map[string]MergeFunc + + expected string + expectedErr string + }{ + { + name: "prune unknown values", + curr: ` +apiVersion: foo +kind: the-kind +alpha: first +`, + additional: ` +consolePublicURL: http://foo/bar +`, + expected: `{"apiVersion":"foo","consolePublicURL":"http://foo/bar","kind":"the-kind"}`, + }, + { + name: "prune unknown values with array", + curr: ` +apiVersion: foo +kind: the-kind +corsAllowedOrigins: +- (?i)//openshift(:|\z) +`, + additional: ` +consolePublicURL: http://foo/bar +`, + expected: `{"apiVersion":"foo","consolePublicURL":"http://foo/bar","corsAllowedOrigins":["(?i)//openshift(:|\\z)"],"kind":"the-kind"}`, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actual, err := MergePrunedProcessConfig(&controlplanev1.KubeAPIServerConfig{}, test.specialCases, []byte(test.curr), []byte(test.additional)) + switch { + case err == nil && len(test.expectedErr) == 0: + case err == nil && len(test.expectedErr) != 0: + t.Fatalf("missing %q", test.expectedErr) + case err != nil && len(test.expectedErr) == 0: + t.Fatal(err) + case err != nil && len(test.expectedErr) != 0 && !strings.Contains(err.Error(), test.expectedErr): + t.Fatalf("expected %q, got %q", test.expectedErr, err) + } + if err != nil { + return + } + + if test.expected != string(actual) { + t.Error(diff.StringDiff(test.expected, string(actual))) + } + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go index 4d4cc4f12..03912f487 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go @@ -21,16 +21,14 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/condition" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/management" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" "github.com/openshift/library-go/pkg/operator/v1helpers" ) -const ( - operatorStatusResourceSyncControllerDegraded = "ResourceSyncControllerDegraded" - controllerWorkQueueKey = "key" -) +const controllerWorkQueueKey = "key" // ResourceSyncController is a controller that will copy source configmaps and secrets to their destinations. // It will also mirror deletions by deleting destinations. @@ -191,7 +189,7 @@ func (c *ResourceSyncController) sync() error { if len(errors) > 0 { cond := operatorv1.OperatorCondition{ - Type: operatorStatusResourceSyncControllerDegraded, + Type: condition.ResourceSyncControllerDegradedConditionType, Status: operatorv1.ConditionTrue, Reason: "Error", Message: v1helpers.NewMultiLineAggregate(errors).Error(), @@ -203,7 +201,7 @@ func (c *ResourceSyncController) sync() error { } cond := operatorv1.OperatorCondition{ - Type: operatorStatusResourceSyncControllerDegraded, + Type: condition.ResourceSyncControllerDegradedConditionType, Status: operatorv1.ConditionFalse, } if _, _, updateError := v1helpers.UpdateStatus(c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller_test.go index 6a7b081f2..fdc65191f 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller_test.go @@ -198,7 +198,7 @@ func TestSyncConfigMap(t *testing.T) { "operator": operatorInformers, }), v1helpers.CachedSecretGetter(kubeClient.CoreV1(), kubeInformersForNamespaces), - v1helpers.CachedConfigMapGetter(kubeClient.Core(), kubeInformersForNamespaces), + v1helpers.CachedConfigMapGetter(kubeClient.CoreV1(), kubeInformersForNamespaces), eventRecorder, ) c.configMapGetter = kubeClient.CoreV1() diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staleconditions/remove_stale_conditions.go b/vendor/github.com/openshift/library-go/pkg/operator/staleconditions/remove_stale_conditions.go new file mode 100644 index 000000000..f9b94d827 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staleconditions/remove_stale_conditions.go @@ -0,0 +1,116 @@ +package staleconditions + +import ( + "fmt" + "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const workQueueKey = "key" + +type RemoveStaleConditions struct { + conditions []string + + operatorClient v1helpers.OperatorClient + cachesToSync []cache.InformerSynced + + eventRecorder events.Recorder + // queue only ever has one item, but it has nice error handling backoff/retry semantics + queue workqueue.RateLimitingInterface +} + +func NewRemoveStaleConditions( + conditions []string, + operatorClient v1helpers.OperatorClient, + eventRecorder events.Recorder, +) *RemoveStaleConditions { + c := &RemoveStaleConditions{ + conditions: conditions, + + operatorClient: operatorClient, + eventRecorder: eventRecorder, + cachesToSync: []cache.InformerSynced{operatorClient.Informer().HasSynced}, + + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "RemoveStaleConditions"), + } + + operatorClient.Informer().AddEventHandler(c.eventHandler()) + + return c +} + +func (c RemoveStaleConditions) sync() error { + removeStaleConditionsFn := func(status *operatorv1.OperatorStatus) error { + for _, condition := range c.conditions { + v1helpers.RemoveOperatorCondition(&status.Conditions, condition) + } + return nil + } + + if _, _, err := v1helpers.UpdateStatus(c.operatorClient, removeStaleConditionsFn); err != nil { + return err + } + + return nil +} + +// Run starts the kube-scheduler and blocks until stopCh is closed. +func (c *RemoveStaleConditions) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting RemoveStaleConditions") + defer klog.Infof("Shutting down RemoveStaleConditions") + + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + utilruntime.HandleError(fmt.Errorf("caches did not sync")) + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + <-stopCh +} + +func (c *RemoveStaleConditions) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *RemoveStaleConditions) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +// eventHandler queues the operator to check spec and status +func (c *RemoveStaleConditions) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(workQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(workQueueKey) }, + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go index 00586222c..f62218ac3 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go @@ -49,7 +49,7 @@ func NewCertSyncControllerCommand(configmaps, secrets []revision.RevisionResourc } cmd.Flags().StringVar(&o.DestinationDir, "destination-dir", o.DestinationDir, "Directory to write to") - cmd.Flags().StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "Namespace to read from") + cmd.Flags().StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "Namespace to read from (default to 'POD_NAMESPACE' environment variable)") cmd.Flags().StringVar(&o.KubeConfigFile, "kubeconfig", o.KubeConfigFile, "Location of the master configuration file to run from.") return cmd @@ -66,10 +66,8 @@ func (o *CertSyncControllerOptions) Run() error { observer.AddReactor(fileobserver.ExitOnChangeReactor, map[string][]byte{o.KubeConfigFile: initialContent}, o.KubeConfigFile) stopCh := make(chan struct{}) - go observer.Run(stopCh) kubeInformers := informers.NewSharedInformerFactoryWithOptions(o.kubeClient, 10*time.Minute, informers.WithNamespace(o.Namespace)) - go kubeInformers.Start(stopCh) eventRecorder := events.NewKubeRecorder(o.kubeClient.CoreV1().Events(o.Namespace), "cert-syncer", &corev1.ObjectReference{ @@ -84,13 +82,18 @@ func (o *CertSyncControllerOptions) Run() error { o.Namespace, o.configMaps, o.secrets, + o.kubeClient, kubeInformers, eventRecorder, ) if err != nil { return err } + + // start everything. Informers start after they have been requested. go controller.Run(1, stopCh) + go observer.Run(stopCh) + go kubeInformers.Start(stopCh) <-stopCh klog.Infof("Shutting down certificate syncer") @@ -104,6 +107,10 @@ func (o *CertSyncControllerOptions) Complete() error { return err } + if len(o.Namespace) == 0 && len(os.Getenv("POD_NAMESPACE")) > 0 { + o.Namespace = os.Getenv("POD_NAMESPACE") + } + protoKubeConfig := rest.CopyConfig(kubeConfig) protoKubeConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" protoKubeConfig.ContentType = "application/vnd.kubernetes.protobuf" diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go index 397adb0f6..e581a4418 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go @@ -8,16 +8,18 @@ import ( "reflect" "time" - "k8s.io/klog" - apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1interface "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "k8s.io/klog" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/staticpod/controller/revision" @@ -29,7 +31,9 @@ type CertSyncController struct { configMaps []revision.RevisionResource secrets []revision.RevisionResource + configmapGetter corev1interface.ConfigMapInterface configMapLister v1.ConfigMapLister + secretGetter corev1interface.SecretInterface secretLister v1.SecretLister eventRecorder events.Recorder @@ -38,7 +42,7 @@ type CertSyncController struct { preRunCaches []cache.InformerSynced } -func NewCertSyncController(targetDir, targetNamespace string, configmaps, secrets []revision.RevisionResource, informers informers.SharedInformerFactory, eventRecorder events.Recorder) (*CertSyncController, error) { +func NewCertSyncController(targetDir, targetNamespace string, configmaps, secrets []revision.RevisionResource, kubeClient kubernetes.Interface, informers informers.SharedInformerFactory, eventRecorder events.Recorder) (*CertSyncController, error) { c := &CertSyncController{ destinationDir: targetDir, namespace: targetNamespace, @@ -46,8 +50,10 @@ func NewCertSyncController(targetDir, targetNamespace string, configmaps, secret secrets: secrets, eventRecorder: eventRecorder.WithComponentSuffix("cert-sync-controller"), + configmapGetter: kubeClient.CoreV1().ConfigMaps(targetNamespace), configMapLister: informers.Core().V1().ConfigMaps().Lister(), secretLister: informers.Core().V1().Secrets().Lister(), + secretGetter: kubeClient.CoreV1().Secrets(targetNamespace), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "CertSyncController"), preRunCaches: []cache.InformerSynced{ @@ -79,18 +85,69 @@ func (c *CertSyncController) sync() error { case apierrors.IsNotFound(err) && !cm.Optional: errors = append(errors, err) continue + case apierrors.IsNotFound(err) && cm.Optional: + // Check with the live call it is really missing + configMap, err = c.configmapGetter.Get(cm.Name, metav1.GetOptions{}) + if err == nil { + klog.Infof("Caches are stale. They don't see configmap '%s/%s', yet it is present", configMap.Namespace, configMap.Name) + // We will get re-queued when we observe the change + continue + } + if !apierrors.IsNotFound(err) { + errors = append(errors, err) + continue + } + // remove missing content if err := os.RemoveAll(getConfigMapDir(c.destinationDir, cm.Name)); err != nil { errors = append(errors, err) } continue + case err != nil: errors = append(errors, err) continue } contentDir := getConfigMapDir(c.destinationDir, cm.Name) + + data := map[string]string{} + for filename := range configMap.Data { + fullFilename := filepath.Join(contentDir, filename) + + existingContent, err := ioutil.ReadFile(fullFilename) + if err != nil { + if !os.IsNotExist(err) { + klog.Error(err) + } + continue + } + + data[filename] = string(existingContent) + } + + // Check if cached configmap differs + if reflect.DeepEqual(configMap.Data, data) { + continue + } + + klog.V(2).Infof("Syncing updated configmap '%s/%s'.", configMap.Namespace, configMap.Name) + + // We need to do a live get here so we don't overwrite a newer file with one from a stale cache + configMap, err = c.configmapGetter.Get(configMap.Name, metav1.GetOptions{}) + if err != nil { + // Even if the error is not exists we will act on it when caches catch up + errors = append(errors, err) + continue + } + + // Check if the live configmap differs + if reflect.DeepEqual(configMap.Data, data) { + klog.Infof("Caches are stale. The live configmap '%s/%s' is reflected on filesystem, but cached one differs", configMap.Namespace, configMap.Name) + continue + } + klog.Infof("Creating directory %q ...", contentDir) if err := os.MkdirAll(contentDir, 0755); err != nil && !os.IsExist(err) { errors = append(errors, err) @@ -99,7 +156,7 @@ func (c *CertSyncController) sync() error { for filename, content := range configMap.Data { fullFilename := filepath.Join(contentDir, filename) // if the existing is the same, do nothing - if existingContent, err := ioutil.ReadFile(fullFilename); err == nil && reflect.DeepEqual(existingContent, []byte(content)) { + if reflect.DeepEqual(data[fullFilename], content) { continue } @@ -117,18 +174,69 @@ func (c *CertSyncController) sync() error { case apierrors.IsNotFound(err) && !s.Optional: errors = append(errors, err) continue + case apierrors.IsNotFound(err) && s.Optional: + // Check with the live call it is really missing + secret, err = c.secretGetter.Get(s.Name, metav1.GetOptions{}) + if err == nil { + klog.Infof("Caches are stale. They don't see secret '%s/%s', yet it is present", secret.Namespace, secret.Name) + // We will get re-queued when we observe the change + continue + } + if !apierrors.IsNotFound(err) { + errors = append(errors, err) + continue + } + // remove missing content if err := os.RemoveAll(getSecretDir(c.destinationDir, s.Name)); err != nil { errors = append(errors, err) } continue + case err != nil: errors = append(errors, err) continue } contentDir := getSecretDir(c.destinationDir, s.Name) + + data := map[string][]byte{} + for filename := range secret.Data { + fullFilename := filepath.Join(contentDir, filename) + + existingContent, err := ioutil.ReadFile(fullFilename) + if err != nil { + if !os.IsNotExist(err) { + klog.Error(err) + } + continue + } + + data[filename] = existingContent + } + + // Check if cached secret differs + if reflect.DeepEqual(secret.Data, data) { + continue + } + + klog.V(2).Infof("Syncing updated secret '%s/%s'.", secret.Namespace, secret.Name) + + // We need to do a live get here so we don't overwrite a newer file with one from a stale cache + secret, err = c.secretGetter.Get(secret.Name, metav1.GetOptions{}) + if err != nil { + // Even if the error is not exists we will act on it when caches catch up + errors = append(errors, err) + continue + } + + // Check if the live secret differs + if reflect.DeepEqual(secret.Data, data) { + klog.Infof("Caches are stale. The live secret '%s/%s' is reflected on filesystem, but cached one differs", secret.Namespace, secret.Name) + continue + } + klog.Infof("Creating directory %q ...", contentDir) if err := os.MkdirAll(contentDir, 0755); err != nil && !os.IsExist(err) { errors = append(errors, err) @@ -138,7 +246,7 @@ func (c *CertSyncController) sync() error { // TODO fix permissions fullFilename := filepath.Join(contentDir, filename) // if the existing is the same, do nothing - if existingContent, err := ioutil.ReadFile(fullFilename); err == nil && reflect.DeepEqual(existingContent, content) { + if reflect.DeepEqual(data[fullFilename], content) { continue } @@ -161,6 +269,12 @@ func (c *CertSyncController) Run(workers int, stopCh <-chan struct{}) { klog.Infof("Starting CertSyncer") defer klog.Infof("Shutting down CertSyncer") + if !cache.WaitForCacheSync(stopCh, c.preRunCaches...) { + klog.Error("failed waiting for caches") + return + } + klog.V(2).Infof("CertSyncer caches synced") + // doesn't matter what workers say, only start one. go wait.Until(c.runWorker, time.Second, stopCh) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller.go index 9c55363df..192af0f20 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller.go @@ -5,6 +5,7 @@ import ( "path/filepath" "time" + "github.com/openshift/library-go/pkg/operator/condition" "github.com/openshift/library-go/pkg/operator/management" "github.com/openshift/library-go/pkg/operator/v1helpers" @@ -27,9 +28,8 @@ import ( ) const ( - operatorStatusBackingResourceControllerDegraded = "BackingResourceControllerDegraded" - controllerWorkQueueKey = "key" - manifestDir = "pkg/operator/staticpod/controller/backingresource" + controllerWorkQueueKey = "key" + manifestDir = "pkg/operator/staticpod/controller/backingresource" ) // BackingResourceController is a controller that watches the operator config and updates @@ -113,7 +113,7 @@ func (c BackingResourceController) sync() error { // update failing condition cond := operatorv1.OperatorCondition{ - Type: operatorStatusBackingResourceControllerDegraded, + Type: condition.BackingResourceControllerDegradedConditionType, Status: operatorv1.ConditionFalse, } if err != nil { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller_test.go index 9f08361eb..faf25470e 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/backingresource/backing_resource_controller_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + "github.com/openshift/library-go/pkg/operator/condition" "github.com/openshift/library-go/pkg/operator/v1helpers" "k8s.io/api/core/v1" @@ -154,7 +155,7 @@ func TestBackingResourceController(t *testing.T) { ), expectSyncError: `test error`, validateStatus: func(t *testing.T, status *operatorv1.OperatorStatus) { - if status.Conditions[0].Type != operatorStatusBackingResourceControllerDegraded { + if status.Conditions[0].Type != condition.BackingResourceControllerDegradedConditionType { t.Errorf("expected status condition to be failing, got %v", status.Conditions[0].Type) } if status.Conditions[0].Reason != "Error" { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go index 4e9a780ed..c8821a00e 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller.go @@ -10,21 +10,24 @@ import ( "time" "github.com/davecgh/go-spew/spew" - "k8s.io/klog" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "k8s.io/klog" operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/condition" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/loglevel" "github.com/openshift/library-go/pkg/operator/management" @@ -36,11 +39,9 @@ import ( ) const ( - operatorStatusInstallerControllerDegraded = "InstallerControllerDegraded" - nodeInstallerDegraded = "NodeInstallerDegraded" - installerControllerWorkQueueKey = "key" - manifestDir = "pkg/operator/staticpod/controller/installer" - manifestInstallerPodPath = "manifests/installer-pod.yaml" + installerControllerWorkQueueKey = "key" + manifestDir = "pkg/operator/staticpod/controller/installer" + manifestInstallerPodPath = "manifests/installer-pod.yaml" hostResourceDirDir = "/etc/kubernetes/static-pod-resources" hostPodManifestDir = "/etc/kubernetes/manifests" @@ -69,6 +70,7 @@ type InstallerController struct { operatorClient v1helpers.StaticPodOperatorClient configMapsGetter corev1client.ConfigMapsGetter + secretsGetter corev1client.SecretsGetter podsGetter corev1client.PodsGetter cachesToSync []cache.InformerSynced @@ -86,16 +88,16 @@ type InstallerController struct { // InstallerPodMutationFunc is a function that has a chance at changing the installer pod before it is created type InstallerPodMutationFunc func(pod *corev1.Pod, nodeName string, operatorSpec *operatorv1.StaticPodOperatorSpec, revision int32) error -func (o *InstallerController) WithInstallerPodMutationFn(installerPodMutationFn InstallerPodMutationFunc) *InstallerController { - o.installerPodMutationFns = append(o.installerPodMutationFns, installerPodMutationFn) - return o +func (c *InstallerController) WithInstallerPodMutationFn(installerPodMutationFn InstallerPodMutationFunc) *InstallerController { + c.installerPodMutationFns = append(c.installerPodMutationFns, installerPodMutationFn) + return c } -func (o *InstallerController) WithCerts(certDir string, certConfigMaps, certSecrets []revision.RevisionResource) *InstallerController { - o.certDir = certDir - o.certConfigMaps = certConfigMaps - o.certSecrets = certSecrets - return o +func (c *InstallerController) WithCerts(certDir string, certConfigMaps, certSecrets []revision.RevisionResource) *InstallerController { + c.certDir = certDir + c.certConfigMaps = certConfigMaps + c.certSecrets = certSecrets + return c } // staticPodState is the status of a static pod that has been installed to a node. @@ -119,6 +121,7 @@ func NewInstallerController( kubeInformersForTargetNamespace informers.SharedInformerFactory, operatorClient v1helpers.StaticPodOperatorClient, configMapsGetter corev1client.ConfigMapsGetter, + secretsGetter corev1client.SecretsGetter, podsGetter corev1client.PodsGetter, eventRecorder events.Recorder, ) *InstallerController { @@ -131,6 +134,7 @@ func NewInstallerController( operatorClient: operatorClient, configMapsGetter: configMapsGetter, + secretsGetter: secretsGetter, podsGetter: podsGetter, eventRecorder: eventRecorder.WithComponentSuffix("installer-controller"), @@ -150,41 +154,40 @@ func NewInstallerController( return c } -func (c *InstallerController) getStaticPodState(nodeName string) (state staticPodState, revision string, errors []string, err error) { +func (c *InstallerController) getStaticPodState(nodeName string) (state staticPodState, revision, reason string, errors []string, err error) { pod, err := c.podsGetter.Pods(c.targetNamespace).Get(mirrorPodNameForNode(c.staticPodName, nodeName), metav1.GetOptions{}) if err != nil { - if apierrors.IsNotFound(err) { - return staticPodStatePending, "", nil, nil - } - return staticPodStatePending, "", nil, err + return staticPodStatePending, "", "", nil, err } switch pod.Status.Phase { case corev1.PodRunning, corev1.PodSucceeded: for _, c := range pod.Status.Conditions { if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue { - return staticPodStateReady, pod.Labels[revisionLabel], nil, nil + return staticPodStateReady, pod.Labels[revisionLabel], "static pod is ready", nil, nil } } + return staticPodStatePending, pod.Labels[revisionLabel], "static pod is not ready", nil, nil case corev1.PodFailed: - return staticPodStateFailed, pod.Labels[revisionLabel], []string{pod.Status.Message}, nil + return staticPodStateFailed, pod.Labels[revisionLabel], "static pod has failed", []string{pod.Status.Message}, nil } - return staticPodStatePending, "", nil, nil + return staticPodStatePending, pod.Labels[revisionLabel], fmt.Sprintf("static pod has unknown phase: %v", pod.Status.Phase), nil, nil } // nodeToStartRevisionWith returns a node index i and guarantees for every node < i that it is // - not updating // - ready // - at the revision claimed in CurrentRevision. -func nodeToStartRevisionWith(getStaticPodState func(nodeName string) (state staticPodState, revision string, errors []string, err error), nodes []operatorv1.NodeStatus) (int, error) { +func nodeToStartRevisionWith(getStaticPodState func(nodeName string) (state staticPodState, revision, reason string, errors []string, err error), nodes []operatorv1.NodeStatus) (int, string, error) { if len(nodes) == 0 { - return 0, fmt.Errorf("nodes array cannot be empty") + return 0, "", fmt.Errorf("nodes array cannot be empty") } // find upgrading node as this will be the first to start new revision (to minimize number of down nodes) for i := range nodes { if nodes[i].TargetRevision != 0 { - return i, nil + reason := fmt.Sprintf("node %s is progressing towards %d", nodes[i].NodeName, nodes[i].TargetRevision) + return i, reason, nil } } @@ -193,16 +196,17 @@ func nodeToStartRevisionWith(getStaticPodState func(nodeName string) (state stat oldestNotReadyRevision := math.MaxInt32 for i := range nodes { currNodeState := &nodes[i] - state, revision, _, err := getStaticPodState(currNodeState.NodeName) + state, runningRevision, _, _, err := getStaticPodState(currNodeState.NodeName) if err != nil && apierrors.IsNotFound(err) { - return i, nil + return i, fmt.Sprintf("node %s static pod not found", currNodeState.NodeName), nil } if err != nil { - return 0, err + return 0, "", err } - revisionNum, err := strconv.Atoi(revision) + revisionNum, err := strconv.Atoi(runningRevision) if err != nil { - return i, nil + reason := fmt.Sprintf("node %s has an invalid current revision %q", currNodeState.NodeName, runningRevision) + return i, reason, nil } if state != staticPodStateReady && revisionNum < oldestNotReadyRevision { oldestNotReadyRevisionNode = i @@ -210,21 +214,26 @@ func nodeToStartRevisionWith(getStaticPodState func(nodeName string) (state stat } } if oldestNotReadyRevisionNode >= 0 { - return oldestNotReadyRevisionNode, nil + reason := fmt.Sprintf("node %s with revision %d is the oldest not ready", nodes[oldestNotReadyRevisionNode].NodeName, oldestNotReadyRevision) + return oldestNotReadyRevisionNode, reason, nil } - // find a node that is has the wrong revision. Take the oldest one. + // find a node that has the wrong revision. Take the oldest one. oldestPodRevisionNode := -1 oldestPodRevision := math.MaxInt32 for i := range nodes { currNodeState := &nodes[i] - _, revision, _, err := getStaticPodState(currNodeState.NodeName) + _, runningRevision, _, _, err := getStaticPodState(currNodeState.NodeName) + if err != nil && apierrors.IsNotFound(err) { + return i, fmt.Sprintf("node %s static pod not found", currNodeState.NodeName), nil + } if err != nil { - return 0, err + return 0, "", err } - revisionNum, err := strconv.Atoi(revision) + revisionNum, err := strconv.Atoi(runningRevision) if err != nil { - return i, nil + reason := fmt.Sprintf("node %s has an invalid current revision %q", currNodeState.NodeName, runningRevision) + return i, reason, nil } if revisionNum != int(currNodeState.CurrentRevision) && revisionNum < oldestPodRevision { oldestPodRevisionNode = i @@ -232,7 +241,8 @@ func nodeToStartRevisionWith(getStaticPodState func(nodeName string) (state stat } } if oldestPodRevisionNode >= 0 { - return oldestPodRevisionNode, nil + reason := fmt.Sprintf("node %s with revision %d is the oldest not matching its expected revision %d", nodes[oldestPodRevisionNode].NodeName, oldestPodRevisionNode, nodes[oldestPodRevisionNode].CurrentRevision) + return oldestPodRevisionNode, reason, nil } // last but not least, choose the one with the older current revision. This will imply that failed installer pods will be retried. @@ -246,10 +256,12 @@ func nodeToStartRevisionWith(getStaticPodState func(nodeName string) (state stat } } if oldestCurrentRevisionNode >= 0 { - return oldestCurrentRevisionNode, nil + reason := fmt.Sprintf("node %s with revision %d is the oldest", nodes[oldestCurrentRevisionNode].NodeName, oldestCurrentRevision) + return oldestCurrentRevisionNode, reason, nil } - return 0, nil + reason := fmt.Sprintf("node %s of revision %d is no worse than any other node, but comes first", nodes[0].NodeName, oldestCurrentRevision) + return 0, reason, nil } // manageInstallationPods takes care of creating content for the static pods to install. @@ -269,7 +281,7 @@ func (c *InstallerController) manageInstallationPods(operatorSpec *operatorv1.St } // start with node which is in worst state (instead of terminating healthy pods first) - startNode, err := nodeToStartRevisionWith(c.getStaticPodState, operatorStatus.NodeStatuses) + startNode, nodeChoiceReason, err := nodeToStartRevisionWith(c.getStaticPodState, operatorStatus.NodeStatuses) if err != nil { return true, err } @@ -283,6 +295,7 @@ func (c *InstallerController) manageInstallationPods(operatorSpec *operatorv1.St if l > 0 { prev := (startNode + l - 1) % len(operatorStatus.NodeStatuses) prevNodeState = &operatorStatus.NodeStatuses[prev] + nodeChoiceReason = fmt.Sprintf("node %s is the next node in the line", currNodeState.NodeName) } // if we are in a transition, check to see whether our installer pod completed @@ -294,7 +307,7 @@ func (c *InstallerController) manageInstallationPods(operatorSpec *operatorv1.St } pendingNewRevision := operatorStatus.LatestAvailableRevision > currNodeState.TargetRevision - newCurrNodeState, oom, err := c.newNodeStateForInstallInProgress(currNodeState, pendingNewRevision) + newCurrNodeState, installerPodFailed, reason, err := c.newNodeStateForInstallInProgress(currNodeState, pendingNewRevision) if err != nil { return true, err } @@ -302,27 +315,27 @@ func (c *InstallerController) manageInstallationPods(operatorSpec *operatorv1.St // if we make a change to this status, we want to write it out to the API before we commence work on the next node. // it's an extra write/read, but it makes the state debuggable from outside this process if !equality.Semantic.DeepEqual(newCurrNodeState, currNodeState) { - klog.Infof("%q moving to %v", currNodeState.NodeName, spew.Sdump(*newCurrNodeState)) + klog.Infof("%q moving to %v because %s", currNodeState.NodeName, spew.Sdump(*newCurrNodeState), reason) newOperatorStatus, updated, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, setNodeStatusFn(newCurrNodeState), setAvailableProgressingNodeInstallerFailingConditions) if updateError != nil { return false, updateError } else if updated && currNodeState.CurrentRevision != newCurrNodeState.CurrentRevision { - c.eventRecorder.Eventf("NodeCurrentRevisionChanged", "Updated node %q from revision %d to %d", currNodeState.NodeName, - currNodeState.CurrentRevision, newCurrNodeState.CurrentRevision) + c.eventRecorder.Eventf("NodeCurrentRevisionChanged", "Updated node %q from revision %d to %d because %s", currNodeState.NodeName, + currNodeState.CurrentRevision, newCurrNodeState.CurrentRevision, reason) } if err := c.updateRevisionStatus(newOperatorStatus); err != nil { klog.Errorf("error updating revision status configmap: %v", err) } return false, nil } else { - klog.V(2).Infof("%q is in transition to %d, but has not made progress", currNodeState.NodeName, currNodeState.TargetRevision) + klog.V(2).Infof("%q is in transition to %d, but has not made progress because %s", currNodeState.NodeName, currNodeState.TargetRevision, reason) } - if !oom { + + // We want to retry the installer pod by deleting and then rekicking. Also we don't set LastFailedRevision. + if !installerPodFailed { break } - - // OOM is special. We want to retry the installer pod by falling through here. Also we don't set LastFailedRevision. - klog.V(2).Infof("Retrying %q for revision %d because it was OOM killed", currNodeState.NodeName, currNodeState.TargetRevision) + klog.Infof("Retrying %q for revision %d because %s", currNodeState.NodeName, currNodeState.TargetRevision, reason) installerPodName := getInstallerPodName(currNodeState.TargetRevision, currNodeState.NodeName) if err := c.podsGetter.Pods(c.targetNamespace).Delete(installerPodName, &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { return true, err @@ -331,10 +344,10 @@ func (c *InstallerController) manageInstallationPods(operatorSpec *operatorv1.St revisionToStart := c.getRevisionToStart(currNodeState, prevNodeState, operatorStatus) if revisionToStart == 0 { - klog.V(4).Infof("%q does not need update", currNodeState.NodeName) + klog.V(4).Infof("%s, but node %s does not need update", nodeChoiceReason, currNodeState.NodeName) continue } - klog.Infof("%q needs new revision %d", currNodeState.NodeName, revisionToStart) + klog.Infof("%s and needs new revision %d", nodeChoiceReason, revisionToStart) newCurrNodeState := currNodeState.DeepCopy() newCurrNodeState.TargetRevision = revisionToStart @@ -347,8 +360,8 @@ func (c *InstallerController) manageInstallationPods(operatorSpec *operatorv1.St if _, updated, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, setNodeStatusFn(newCurrNodeState), setAvailableProgressingNodeInstallerFailingConditions); updateError != nil { return false, updateError } else if updated && currNodeState.TargetRevision != newCurrNodeState.TargetRevision && newCurrNodeState.TargetRevision != 0 { - c.eventRecorder.Eventf("NodeTargetRevisionChanged", "Updating node %q from revision %d to %d", currNodeState.NodeName, - currNodeState.CurrentRevision, newCurrNodeState.TargetRevision) + c.eventRecorder.Eventf("NodeTargetRevisionChanged", "Updating node %q from revision %d to %d because %s", currNodeState.NodeName, + currNodeState.CurrentRevision, newCurrNodeState.TargetRevision, nodeChoiceReason) } return false, nil @@ -419,20 +432,17 @@ func setAvailableProgressingNodeInstallerFailingConditions(newStatus *operatorv1 failingCount := map[int32]int{} failing := map[int32][]string{} for _, currNodeStatus := range newStatus.NodeStatuses { + counts[currNodeStatus.CurrentRevision] = counts[currNodeStatus.CurrentRevision] + 1 if currNodeStatus.CurrentRevision != 0 { numAvailable++ } // keep track of failures so that we can report failing status if currNodeStatus.LastFailedRevision != 0 { - existing := failingCount[currNodeStatus.CurrentRevision] - failingCount[currNodeStatus.CurrentRevision] = existing + 1 + failingCount[currNodeStatus.LastFailedRevision] = failingCount[currNodeStatus.LastFailedRevision] + 1 failing[currNodeStatus.LastFailedRevision] = append(failing[currNodeStatus.LastFailedRevision], currNodeStatus.LastFailedRevisionErrors...) } - existing := counts[currNodeStatus.CurrentRevision] - counts[currNodeStatus.CurrentRevision] = existing + 1 - if newStatus.LatestAvailableRevision == currNodeStatus.CurrentRevision { numAtLatestRevision += 1 } else { @@ -441,8 +451,13 @@ func setAvailableProgressingNodeInstallerFailingConditions(newStatus *operatorv1 } revisionStrings := []string{} - for revision, count := range counts { - revisionStrings = append(revisionStrings, fmt.Sprintf("%d nodes are at revision %d", count, revision)) + for _, currentRevision := range Int32KeySet(counts).List() { + count := counts[currentRevision] + revisionStrings = append(revisionStrings, fmt.Sprintf("%d nodes are at revision %d", count, currentRevision)) + } + // if we are progressing and no nodes have achieved that level, we should indicate + if numProgressing > 0 && counts[newStatus.LatestAvailableRevision] == 0 { + revisionStrings = append(revisionStrings, fmt.Sprintf("%d nodes have achieved new revision %d", 0, newStatus.LatestAvailableRevision)) } revisionDescription := strings.Join(revisionStrings, "; ") @@ -479,24 +494,21 @@ func setAvailableProgressingNodeInstallerFailingConditions(newStatus *operatorv1 if len(failing) > 0 { failingStrings := []string{} - for failingRevision, errorStrings := range failing { - // Do not report failing for nodes that are actually not failing. - if failingCount[failingRevision] == 0 { - continue - } + for _, failingRevision := range Int32KeySet(failing).List() { + errorStrings := failing[failingRevision] failingStrings = append(failingStrings, fmt.Sprintf("%d nodes are failing on revision %d:\n%v", failingCount[failingRevision], failingRevision, strings.Join(errorStrings, "\n"))) } failingDescription := strings.Join(failingStrings, "; ") v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ - Type: nodeInstallerDegraded, + Type: condition.NodeInstallerDegradedConditionType, Status: operatorv1.ConditionTrue, Reason: "InstallerPodFailed", Message: failingDescription, }) } else { v1helpers.SetOperatorCondition(&newStatus.Conditions, operatorv1.OperatorCondition{ - Type: nodeInstallerDegraded, + Type: condition.NodeInstallerDegradedConditionType, Status: operatorv1.ConditionFalse, }) } @@ -505,63 +517,81 @@ func setAvailableProgressingNodeInstallerFailingConditions(newStatus *operatorv1 } // newNodeStateForInstallInProgress returns the new NodeState, whether it was killed by OOM or an error -func (c *InstallerController) newNodeStateForInstallInProgress(currNodeState *operatorv1.NodeStatus, newRevisionPending bool) (status *operatorv1.NodeStatus, oom bool, err error) { +func (c *InstallerController) newNodeStateForInstallInProgress(currNodeState *operatorv1.NodeStatus, newRevisionPending bool) (status *operatorv1.NodeStatus, installerPodFailed bool, reason string, err error) { ret := currNodeState.DeepCopy() installerPod, err := c.podsGetter.Pods(c.targetNamespace).Get(getInstallerPodName(currNodeState.TargetRevision, currNodeState.NodeName), metav1.GetOptions{}) if apierrors.IsNotFound(err) { ret.LastFailedRevision = currNodeState.TargetRevision ret.TargetRevision = currNodeState.CurrentRevision ret.LastFailedRevisionErrors = []string{err.Error()} - return ret, false, nil + return ret, false, "installer pod was not found", nil } if err != nil { - return nil, false, err + return nil, false, "", err } failed := false errors := []string{} + reason = "" switch installerPod.Status.Phase { case corev1.PodSucceeded: if newRevisionPending { // stop early, don't wait for ready static pod because a new revision is waiting - failed = true - errors = append(errors, "static pod has been installed, but is not ready while new revision is pending") - break + ret.LastFailedRevision = currNodeState.TargetRevision + ret.TargetRevision = 0 + ret.LastFailedRevisionErrors = []string{"static pod of revision has been installed, but is not ready while new revision % is pending"} + return ret, false, "new revision pending", nil } - state, revision, failedErrors, err := c.getStaticPodState(currNodeState.NodeName) + state, currentRevision, staticPodReason, failedErrors, err := c.getStaticPodState(currNodeState.NodeName) + if err != nil && apierrors.IsNotFound(err) { + // pod not launched yet + // TODO: have a timeout here and retry the installer + reason = "static pod is pending" + break + } if err != nil { - return nil, false, err + return nil, false, "", err } - if revision != strconv.Itoa(int(currNodeState.TargetRevision)) { + if currentRevision != strconv.Itoa(int(currNodeState.TargetRevision)) { // new updated pod to be launched + if len(currentRevision) == 0 { + reason = fmt.Sprintf("waiting for static pod of revision %d", currNodeState.TargetRevision) + } else { + reason = fmt.Sprintf("waiting for static pod of revision %d, found %s", currNodeState.TargetRevision, currentRevision) + } break } switch state { case staticPodStateFailed: failed = true + reason = staticPodReason errors = failedErrors case staticPodStateReady: - ret.CurrentRevision = currNodeState.TargetRevision + if currNodeState.TargetRevision > ret.CurrentRevision { + ret.CurrentRevision = currNodeState.TargetRevision + } ret.TargetRevision = 0 ret.LastFailedRevision = 0 ret.LastFailedRevisionErrors = nil - return ret, false, nil + return ret, false, staticPodReason, nil + default: + reason = "static pod is pending" } case corev1.PodFailed: failed = true + reason = "installer pod failed" for _, containerStatus := range installerPod.Status.ContainerStatuses { if containerStatus.State.Terminated != nil && len(containerStatus.State.Terminated.Message) > 0 { errors = append(errors, fmt.Sprintf("%s: %s", containerStatus.Name, containerStatus.State.Terminated.Message)) - if containerStatus.State.Terminated.Reason == "OOMKilled" { - // do not set LastFailedRevision - return currNodeState, true, nil - } + c.eventRecorder.Warningf("InstallerPodFailed", "installer errors: %v", strings.Join(errors, "\n")) + // do not set LastFailedRevision + return currNodeState, true, fmt.Sprintf("installer pod failed: %v", strings.Join(errors, "\n")), nil } } } @@ -573,10 +603,10 @@ func (c *InstallerController) newNodeStateForInstallInProgress(currNodeState *op errors = append(errors, "no detailed termination message, see `oc get -n %q pods/%q -oyaml`", installerPod.Namespace, installerPod.Name) } ret.LastFailedRevisionErrors = errors - return ret, false, nil + return ret, false, "installer pod failed", nil } - return ret, false, nil + return ret, false, reason, nil } // getRevisionToStart returns the revision we need to start or zero if none @@ -700,6 +730,76 @@ func getInstallerPodImageFromEnv() string { return os.Getenv("OPERATOR_IMAGE") } +func (c InstallerController) ensureSecretRevisionResourcesExists(secrets []revision.RevisionResource, hasRevisionSuffix bool, latestRevisionNumber int32) error { + missing := sets.NewString() + for _, secret := range secrets { + if secret.Optional { + continue + } + name := secret.Name + if !hasRevisionSuffix { + name = fmt.Sprintf("%s-%d", name, latestRevisionNumber) + } + _, err := c.secretsGetter.Secrets(c.targetNamespace).Get(name, metav1.GetOptions{}) + if err == nil { + continue + } + if apierrors.IsNotFound(err) { + missing.Insert(name) + } + } + if missing.Len() == 0 { + return nil + } + return fmt.Errorf("secrets: %s", strings.Join(missing.List(), ",")) +} + +func (c InstallerController) ensureConfigMapRevisionResourcesExists(configs []revision.RevisionResource, hasRevisionSuffix bool, latestRevisionNumber int32) error { + missing := sets.NewString() + for _, config := range configs { + if config.Optional { + continue + } + name := config.Name + if !hasRevisionSuffix { + name = fmt.Sprintf("%s-%d", name, latestRevisionNumber) + } + _, err := c.configMapsGetter.ConfigMaps(c.targetNamespace).Get(name, metav1.GetOptions{}) + if err == nil { + continue + } + if apierrors.IsNotFound(err) { + missing.Insert(name) + } + } + if missing.Len() == 0 { + return nil + } + return fmt.Errorf("configmaps: %s", strings.Join(missing.List(), ",")) +} + +// ensureRequiredResourcesExist makes sure that all non-optional resources are ready or it will return an error to trigger a requeue so that we try again. +func (c InstallerController) ensureRequiredResourcesExist(revisionNumber int32) error { + errs := []error{} + + errs = append(errs, c.ensureConfigMapRevisionResourcesExists(c.certConfigMaps, true, revisionNumber)) + errs = append(errs, c.ensureConfigMapRevisionResourcesExists(c.configMaps, false, revisionNumber)) + errs = append(errs, c.ensureSecretRevisionResourcesExists(c.certSecrets, true, revisionNumber)) + errs = append(errs, c.ensureSecretRevisionResourcesExists(c.secrets, false, revisionNumber)) + + aggregatedErr := utilerrors.NewAggregate(errs) + if aggregatedErr == nil { + return nil + } + + eventMessages := []string{} + for _, err := range aggregatedErr.Errors() { + eventMessages = append(eventMessages, err.Error()) + } + c.eventRecorder.Warningf("RequiredInstallerResourcesMissing", strings.Join(eventMessages, ", ")) + return fmt.Errorf("missing required resources: %v", aggregatedErr) +} + func (c InstallerController) sync() error { operatorSpec, originalOperatorStatus, resourceVersion, err := c.operatorClient.GetStaticPodOperatorState() if err != nil { @@ -711,15 +811,21 @@ func (c InstallerController) sync() error { return nil } - requeue, syncErr := c.manageInstallationPods(operatorSpec, operatorStatus, resourceVersion) - if requeue && syncErr == nil { - return fmt.Errorf("synthetic requeue request") + err = c.ensureRequiredResourcesExist(originalOperatorStatus.LatestAvailableRevision) + + // Only manage installation pods when all required certs are present. + if err == nil { + requeue, syncErr := c.manageInstallationPods(operatorSpec, operatorStatus, resourceVersion) + if requeue && syncErr == nil { + return fmt.Errorf("synthetic requeue request") + } + err = syncErr } - err = syncErr - // update failing condition + // Update failing condition + // If required certs are missing, this will report degraded as we can't create installer pods because of this pre-condition. cond := operatorv1.OperatorCondition{ - Type: operatorStatusInstallerControllerDegraded, + Type: condition.InstallerControllerDegradedConditionType, Status: operatorv1.ConditionFalse, } if err != nil { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller_test.go index 0d65b4811..74e4ea4ae 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/installer_controller_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -20,21 +20,30 @@ import ( "k8s.io/client-go/util/workqueue" operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/condition" "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/events/eventstesting" "github.com/openshift/library-go/pkg/operator/staticpod/controller/revision" "github.com/openshift/library-go/pkg/operator/v1helpers" ) func TestNewNodeStateForInstallInProgress(t *testing.T) { - kubeClient := fake.NewSimpleClientset() + kubeClient := fake.NewSimpleClientset( + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "test-config"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "test-secret"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: fmt.Sprintf("%s-%d", "test-secret", 1)}}, + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: fmt.Sprintf("%s-%d", "test-config", 1)}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: fmt.Sprintf("%s-%d", "test-secret", 2)}}, + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: fmt.Sprintf("%s-%d", "test-config", 2)}}, + ) - var installerPod *v1.Pod + var installerPod *corev1.Pod kubeClient.PrependReactor("create", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { if installerPod != nil { return true, nil, errors.NewAlreadyExists(schema.GroupResource{Resource: "pods"}, installerPod.Name) } - installerPod = action.(ktesting.CreateAction).GetObject().(*v1.Pod) + installerPod = action.(ktesting.CreateAction).GetObject().(*corev1.Pod) kubeClient.PrependReactor("get", "pods", getPodsReactor(installerPod)) return true, installerPod, nil }) @@ -57,9 +66,10 @@ func TestNewNodeStateForInstallInProgress(t *testing.T) { }, }, nil, + nil, ) - eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) podCommand := []string{"/bin/true", "--foo=test", "--bar"} c := NewInstallerController( "test", "test-pod", @@ -70,6 +80,7 @@ func TestNewNodeStateForInstallInProgress(t *testing.T) { fakeStaticPodOperatorClient, kubeClient.CoreV1(), kubeClient.CoreV1(), + kubeClient.CoreV1(), eventRecorder, ) c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { @@ -136,7 +147,7 @@ func TestNewNodeStateForInstallInProgress(t *testing.T) { } t.Log("installer succeeded") - installerPod.Status.Phase = v1.PodSucceeded + installerPod.Status.Phase = corev1.PodSucceeded if err := c.sync(); err != nil { t.Fatal(err) @@ -148,21 +159,21 @@ func TestNewNodeStateForInstallInProgress(t *testing.T) { } t.Log("static pod launched, but is not ready") - staticPod := &v1.Pod{ + staticPod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "test-pod-test-node-1", Namespace: "test", Labels: map[string]string{"revision": "1"}, }, - Spec: v1.PodSpec{}, - Status: v1.PodStatus{ - Conditions: []v1.PodCondition{ + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ { - Status: v1.ConditionFalse, - Type: v1.PodReady, + Status: corev1.ConditionFalse, + Type: corev1.PodReady, }, }, - Phase: v1.PodRunning, + Phase: corev1.PodRunning, }, } kubeClient.PrependReactor("get", "pods", getPodsReactor(staticPod)) @@ -177,7 +188,7 @@ func TestNewNodeStateForInstallInProgress(t *testing.T) { } t.Log("static pod is ready") - staticPod.Status.Conditions[0].Status = v1.ConditionTrue + staticPod.Status.Conditions[0].Status = corev1.ConditionTrue if err := c.sync(); err != nil { t.Fatal(err) @@ -195,12 +206,12 @@ func TestNewNodeStateForInstallInProgress(t *testing.T) { fakeStaticPodOperatorClient.UpdateStaticPodOperatorStatus("1", currStatus) installerPod.Name = "installer-2-test-node-1" - installerPod.Status.Phase = v1.PodFailed - installerPod.Status.ContainerStatuses = []v1.ContainerStatus{ + installerPod.Status.Phase = corev1.PodFailed + installerPod.Status.ContainerStatuses = []corev1.ContainerStatus{ { Name: "installer", - State: v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{Message: "fake death"}, + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{Message: "fake death"}, }, }, } @@ -209,16 +220,13 @@ func TestNewNodeStateForInstallInProgress(t *testing.T) { } _, currStatus, _, _ = fakeStaticPodOperatorClient.GetStaticPodOperatorState() - if generation := currStatus.NodeStatuses[0].LastFailedRevision; generation != 2 { - t.Errorf("expected last failed revision generation for node to be 2, got %d", generation) + if generation := currStatus.NodeStatuses[0].LastFailedRevision; generation != 0 { + t.Errorf("expected last failed revision generation for node to be 0, got %d", generation) } - if errors := currStatus.NodeStatuses[0].LastFailedRevisionErrors; len(errors) > 0 { - if errors[0] != "installer: fake death" { - t.Errorf("expected the error to be set to 'fake death', got %#v", errors) - } - } else { - t.Errorf("expected errors to be not empty") + // installer pod failures are suppressed + if errors := currStatus.NodeStatuses[0].LastFailedRevisionErrors; len(errors) != 0 { + t.Error(errors) } if v1helpers.FindOperatorCondition(currStatus.Conditions, operatorv1.OperatorStatusTypeProgressing) == nil { @@ -229,7 +237,7 @@ func TestNewNodeStateForInstallInProgress(t *testing.T) { } } -func getPodsReactor(pods ...*v1.Pod) ktesting.ReactionFunc { +func getPodsReactor(pods ...*corev1.Pod) ktesting.ReactionFunc { return func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { podName := action.(ktesting.GetAction).GetName() for _, p := range pods { @@ -242,11 +250,16 @@ func getPodsReactor(pods ...*v1.Pod) ktesting.ReactionFunc { } func TestCreateInstallerPod(t *testing.T) { - kubeClient := fake.NewSimpleClientset() + kubeClient := fake.NewSimpleClientset( + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "test-config"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: "test-secret"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: fmt.Sprintf("%s-%d", "test-secret", 1)}}, + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "test", Name: fmt.Sprintf("%s-%d", "test-config", 1)}}, + ) - var installerPod *v1.Pod + var installerPod *corev1.Pod kubeClient.PrependReactor("create", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { - installerPod = action.(ktesting.CreateAction).GetObject().(*v1.Pod) + installerPod = action.(ktesting.CreateAction).GetObject().(*corev1.Pod) return false, nil, nil }) kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("test")) @@ -268,8 +281,9 @@ func TestCreateInstallerPod(t *testing.T) { }, }, nil, + nil, ) - eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) c := NewInstallerController( "test", "test-pod", @@ -280,6 +294,7 @@ func TestCreateInstallerPod(t *testing.T) { fakeStaticPodOperatorClient, kubeClient.CoreV1(), kubeClient.CoreV1(), + kubeClient.CoreV1(), eventRecorder, ) c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { @@ -409,9 +424,9 @@ func TestEnsureInstallerPod(t *testing.T) { t.Run(tt.name, func(t *testing.T) { kubeClient := fake.NewSimpleClientset() - var installerPod *v1.Pod + var installerPod *corev1.Pod kubeClient.PrependReactor("create", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { - installerPod = action.(ktesting.CreateAction).GetObject().(*v1.Pod) + installerPod = action.(ktesting.CreateAction).GetObject().(*corev1.Pod) return false, nil, nil }) kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("test")) @@ -433,8 +448,9 @@ func TestEnsureInstallerPod(t *testing.T) { }, }, nil, + nil, ) - eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) c := NewInstallerController( "test", "test-pod", @@ -445,6 +461,7 @@ func TestEnsureInstallerPod(t *testing.T) { fakeStaticPodOperatorClient, kubeClient.CoreV1(), kubeClient.CoreV1(), + kubeClient.CoreV1(), eventRecorder, ) c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { @@ -480,23 +497,23 @@ func TestEnsureInstallerPod(t *testing.T) { } func TestCreateInstallerPodMultiNode(t *testing.T) { - newStaticPod := func(name string, revision int, phase v1.PodPhase, ready bool) *v1.Pod { - condStatus := v1.ConditionTrue + newStaticPod := func(name string, revision int, phase corev1.PodPhase, ready bool) *corev1.Pod { + condStatus := corev1.ConditionTrue if !ready { - condStatus = v1.ConditionFalse + condStatus = corev1.ConditionFalse } - return &v1.Pod{ + return &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: "test", Labels: map[string]string{"revision": strconv.Itoa(revision)}, }, - Spec: v1.PodSpec{}, - Status: v1.PodStatus{ - Conditions: []v1.PodCondition{ + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{ + Conditions: []corev1.PodCondition{ { Status: condStatus, - Type: v1.PodReady, + Type: corev1.PodReady, }, }, Phase: phase, @@ -507,7 +524,7 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { tests := []struct { name string nodeStatuses []operatorv1.NodeStatus - staticPods []*v1.Pod + staticPods []*corev1.Pod latestAvailableRevision int32 expectedUpgradeOrder []int expectedSyncError []bool @@ -547,10 +564,10 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{0, 1, 2}, }, @@ -572,10 +589,10 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{1, 0, 2}, }, @@ -597,10 +614,10 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{1, 0, 2}, }, @@ -622,10 +639,10 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{}, }, @@ -647,10 +664,10 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 2, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 2, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 2, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 2, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{0, 1, 2}, }, @@ -671,10 +688,10 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, false), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, false), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{1, 0, 2}, }, @@ -695,10 +712,10 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 2, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, // we call sync 2*3 times: // 1. notice update of node 1 @@ -727,10 +744,10 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, false), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, false), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, false), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, false), }, expectedUpgradeOrder: []int{1, 2, 0}, }, @@ -751,10 +768,10 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, v1.PodRunning, false), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, corev1.PodRunning, false), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{1, 0, 2}, }, @@ -775,10 +792,10 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 4, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodRunning, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 4, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodRunning, true), }, expectedUpgradeOrder: []int{1, 2, 0}, }, @@ -799,10 +816,10 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 3, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodSucceeded, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 3, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodSucceeded, true), }, expectedUpgradeOrder: []int{1, 2}, }, @@ -823,10 +840,10 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 1, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 3, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, v1.PodSucceeded, true), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 3, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 1, corev1.PodSucceeded, true), }, expectedUpgradeOrder: []int{2, 1}, }, @@ -847,10 +864,10 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { CurrentRevision: 2, }, }, - staticPods: []*v1.Pod{ - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 2, v1.PodRunning, true), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, v1.PodRunning, false), - newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 2, v1.PodRunning, false), + staticPods: []*corev1.Pod{ + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-1"), 2, corev1.PodRunning, true), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-2"), 1, corev1.PodRunning, false), + newStaticPod(mirrorPodNameForNode("test-pod", "test-node-3"), 2, corev1.PodRunning, false), }, expectedUpgradeOrder: []int{1, 2, 0}, }, @@ -870,9 +887,11 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { for i, test := range tests { t.Run(test.name, func(t *testing.T) { - createdInstallerPods := []*v1.Pod{} - installerPods := map[string]*v1.Pod{} - updatedStaticPods := map[string]*v1.Pod{} + createdInstallerPods := []*corev1.Pod{} + installerPods := map[string]*corev1.Pod{} + updatedStaticPods := map[string]*corev1.Pod{} + + namespace := fmt.Sprintf("test-%d", i) installerNodeAndID := func(installerName string) (string, int) { ss := strings.SplitN(strings.TrimPrefix(installerName, "installer-"), "-", 2) @@ -883,23 +902,28 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { return ss[1], id } - kubeClient := fake.NewSimpleClientset() + kubeClient := fake.NewSimpleClientset( + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: "test-secret"}}, + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: "test-config"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: fmt.Sprintf("%s-%d", "test-secret", test.latestAvailableRevision)}}, + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: fmt.Sprintf("%s-%d", "test-config", test.latestAvailableRevision)}}, + ) kubeClient.PrependReactor("create", "pods", func(action ktesting.Action) (handled bool, ret runtime.Object, err error) { - createdPod := action.(ktesting.CreateAction).GetObject().(*v1.Pod) + createdPod := action.(ktesting.CreateAction).GetObject().(*corev1.Pod) createdInstallerPods = append(createdInstallerPods, createdPod) if _, found := installerPods[createdPod.Name]; found { - return false, nil, errors.NewAlreadyExists(v1.SchemeGroupVersion.WithResource("pods").GroupResource(), createdPod.Name) + return false, nil, errors.NewAlreadyExists(corev1.SchemeGroupVersion.WithResource("pods").GroupResource(), createdPod.Name) } installerPods[createdPod.Name] = createdPod if test.numOfInstallersOOM > 0 { test.numOfInstallersOOM-- - createdPod.Status.Phase = v1.PodFailed - createdPod.Status.ContainerStatuses = []v1.ContainerStatus{ + createdPod.Status.Phase = corev1.PodFailed + createdPod.Status.ContainerStatuses = []corev1.ContainerStatus{ { Name: "container", - State: v1.ContainerState{ - Terminated: &v1.ContainerStateTerminated{ + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ ExitCode: 1, Reason: "OOMKilled", Message: "killed by OOM", @@ -912,12 +936,12 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { // Once the installer pod is created, set its status to succeeded. // Note that in reality, this will probably take couple sync cycles to happen, however it is useful to do this fast // to rule out timing bugs. - createdPod.Status.Phase = v1.PodSucceeded + createdPod.Status.Phase = corev1.PodSucceeded nodeName, id := installerNodeAndID(createdPod.Name) staticPodName := mirrorPodNameForNode("test-pod", nodeName) - updatedStaticPods[staticPodName] = newStaticPod(staticPodName, id, v1.PodRunning, true) + updatedStaticPods[staticPodName] = newStaticPod(staticPodName, id, corev1.PodRunning, true) } return true, nil, nil @@ -972,12 +996,13 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { NodeStatuses: test.nodeStatuses, }, statusUpdateErrorFunc, + nil, ) - eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) c := NewInstallerController( - fmt.Sprintf("test-%d", i), "test-pod", + namespace, "test-pod", []revision.RevisionResource{{Name: "test-config"}}, []revision.RevisionResource{{Name: "test-secret"}}, []string{"/bin/true"}, @@ -985,6 +1010,7 @@ func TestCreateInstallerPodMultiNode(t *testing.T) { fakeStaticPodOperatorClient, kubeClient.CoreV1(), kubeClient.CoreV1(), + kubeClient.CoreV1(), eventRecorder, ) c.ownerRefsFn = func(revision int32) ([]metav1.OwnerReference, error) { @@ -1223,15 +1249,15 @@ func TestNodeToStartRevisionWith(t *testing.T) { }, } { t.Run(test.name, func(t *testing.T) { - fakeGetStaticPodState := func(nodeName string) (state staticPodState, revision string, errs []string, err error) { + fakeGetStaticPodState := func(nodeName string) (state staticPodState, revision, reason string, errs []string, err error) { for _, p := range test.pods { if p.name == nodeName { - return p.state, strconv.Itoa(int(p.revision)), nil, nil + return p.state, strconv.Itoa(int(p.revision)), "", nil, nil } } - return staticPodStatePending, "", nil, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, nodeName) + return staticPodStatePending, "", "", nil, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, nodeName) } - i, err := nodeToStartRevisionWith(fakeGetStaticPodState, test.nodes) + i, _, err := nodeToStartRevisionWith(fakeGetStaticPodState, test.nodes) if err == nil && test.expectedErr { t.Fatalf("expected error, got none") } @@ -1306,7 +1332,7 @@ func TestSetConditions(t *testing.T) { t.Errorf("Progressing condition: expected status %v, actual status %v", tc.expectedProgressingStatus, pendingCondition.Status) } - failingCondition := v1helpers.FindOperatorCondition(status.Conditions, nodeInstallerDegraded) + failingCondition := v1helpers.FindOperatorCondition(status.Conditions, condition.NodeInstallerDegradedConditionType) if failingCondition == nil { t.Error("Failing condition: not found") } else if failingCondition.Status != tc.expectedFailingStatus { @@ -1316,3 +1342,106 @@ func TestSetConditions(t *testing.T) { } } + +func TestEnsureRequiredResources(t *testing.T) { + tests := []struct { + name string + certConfigMaps []revision.RevisionResource + certSecrets []revision.RevisionResource + + revisionNumber int32 + configMaps []revision.RevisionResource + secrets []revision.RevisionResource + + startingResources []runtime.Object + expectedErr string + }{ + { + name: "none", + }, + { + name: "skip-optional", + certConfigMaps: []revision.RevisionResource{ + {Name: "foo-cm", Optional: true}, + }, + certSecrets: []revision.RevisionResource{ + {Name: "foo-s", Optional: true}, + }, + }, + { + name: "wait-required", + configMaps: []revision.RevisionResource{ + {Name: "foo-cm"}, + }, + secrets: []revision.RevisionResource{ + {Name: "foo-s"}, + }, + expectedErr: "missing required resources: [configmaps: foo-cm-0, secrets: foo-s-0]", + }, + { + name: "found-required", + configMaps: []revision.RevisionResource{ + {Name: "foo-cm"}, + }, + secrets: []revision.RevisionResource{ + {Name: "foo-s"}, + }, + startingResources: []runtime.Object{ + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "foo-cm-0"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "foo-s-0"}}, + }, + }, + { + name: "wait-required-certs", + certConfigMaps: []revision.RevisionResource{ + {Name: "foo-cm"}, + }, + certSecrets: []revision.RevisionResource{ + {Name: "foo-s"}, + }, + expectedErr: "missing required resources: [configmaps: foo-cm, secrets: foo-s]", + }, + { + name: "found-required-certs", + certConfigMaps: []revision.RevisionResource{ + {Name: "foo-cm"}, + }, + certSecrets: []revision.RevisionResource{ + {Name: "foo-s"}, + }, + startingResources: []runtime.Object{ + &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "foo-cm"}}, + &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "foo-s"}}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + client := fake.NewSimpleClientset(test.startingResources...) + c := &InstallerController{ + targetNamespace: "ns", + certConfigMaps: test.certConfigMaps, + certSecrets: test.certSecrets, + configMaps: test.configMaps, + secrets: test.secrets, + eventRecorder: eventstesting.NewTestingEventRecorder(t), + + configMapsGetter: client.CoreV1(), + secretsGetter: client.CoreV1(), + } + + actual := c.ensureRequiredResourcesExist(test.revisionNumber) + switch { + case len(test.expectedErr) == 0 && actual == nil: + case len(test.expectedErr) == 0 && actual != nil: + t.Fatal(actual) + case len(test.expectedErr) != 0 && actual == nil: + t.Fatal(actual) + case len(test.expectedErr) != 0 && actual != nil && !strings.Contains(actual.Error(), test.expectedErr): + t.Fatalf("actual error: %q does not match expected: %q", actual.Error(), test.expectedErr) + } + + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/int32.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/int32.go new file mode 100644 index 000000000..87256fe20 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installer/int32.go @@ -0,0 +1,187 @@ +package installer + +import ( + "reflect" + "sort" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. +type Int32 map[int32]sets.Empty + +// NewInt32 creates a Int32 from a list of values. +func NewInt32(items ...int32) Int32 { + ss := Int32{} + ss.Insert(items...) + return ss +} + +// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int32KeySet(theMap interface{}) Int32 { + v := reflect.ValueOf(theMap) + ret := Int32{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int32)) + } + return ret +} + +// Insert adds items to the set. +func (s Int32) Insert(items ...int32) { + for _, item := range items { + s[item] = sets.Empty{} + } +} + +// Delete removes all items from the set. +func (s Int32) Delete(items ...int32) { + for _, item := range items { + delete(s, item) + } +} + +// Has returns true if and only if item is contained in the set. +func (s Int32) Has(item int32) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int32) HasAll(items ...int32) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int32) HasAny(items ...int32) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int32) Difference(s2 Int32) Int32 { + result := NewInt32() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int32) Union(s2 Int32) Int32 { + result := NewInt32() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int32) Intersection(s2 Int32) Int32 { + var walk, other Int32 + result := NewInt32() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int32) IsSuperset(s2 Int32) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int32) Equal(s2 Int32) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt32 []int32 + +func (s sortableSliceOfInt32) Len() int { return len(s) } +func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) } +func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int32 slice. +func (s Int32) List() []int32 { + res := make(sortableSliceOfInt32, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int32(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int32) UnsortedList() []int32 { + res := make([]int32, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int32) PopAny() (int32, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int32 + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int32) Len() int { + return len(s) +} + +func lessInt32(lhs, rhs int32) bool { + return lhs < rhs +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller.go index 77b59f137..99ed89e2a 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller.go @@ -5,11 +5,9 @@ import ( "path/filepath" "time" - "github.com/openshift/library-go/pkg/operator/management" - "github.com/openshift/library-go/pkg/operator/v1helpers" - "k8s.io/klog" + "k8s.io/apimachinery/pkg/api/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" @@ -20,18 +18,23 @@ import ( "k8s.io/client-go/util/workqueue" operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/assets" + "github.com/openshift/library-go/pkg/operator/condition" "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" "github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/bindata" + "github.com/openshift/library-go/pkg/operator/v1helpers" ) const ( - operatorStatusMonitoringResourceControllerDegraded = "MonitoringResourceControllerDegraded" - controllerWorkQueueKey = "key" - manifestDir = "pkg/operator/staticpod/controller/monitoring" + controllerWorkQueueKey = "key" + manifestDir = "pkg/operator/staticpod/controller/monitoring" ) +var syntheticRequeueError = fmt.Errorf("synthetic requeue request") + type MonitoringResourceController struct { targetNamespace string serviceMonitorName string @@ -119,14 +122,21 @@ func (c MonitoringResourceController) sync() error { errs = append(errs, fmt.Errorf("manifests/service-monitor.yaml: %v", err)) } else { _, serviceMonitorErr := resourceapply.ApplyServiceMonitor(c.dynamicClient, c.eventRecorder, serviceMonitorBytes) - errs = append(errs, serviceMonitorErr) + // This is to handle 'the server could not find the requested resource' which occurs when the CRD is not available + // yet (the CRD is provided by prometheus operator). This produce noise and plenty of events. + if errors.IsNotFound(serviceMonitorErr) { + klog.V(4).Infof("Unable to apply service monitor: %v", err) + return syntheticRequeueError + } else if serviceMonitorErr != nil { + errs = append(errs, serviceMonitorErr) + } } err = v1helpers.NewMultiLineAggregate(errs) // NOTE: Failing to create the monitoring resources should not lead to operator failed state. cond := operatorv1.OperatorCondition{ - Type: operatorStatusMonitoringResourceControllerDegraded, + Type: condition.MonitoringResourceControllerDegradedConditionType, Status: operatorv1.ConditionFalse, } if err != nil { @@ -179,7 +189,10 @@ func (c *MonitoringResourceController) processNextWorkItem() bool { return true } - utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + if err != syntheticRequeueError { + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + } + c.queue.AddRateLimited(dsKey) return true diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller_test.go index 377e22492..7b62524ef 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/monitoring/monitoring_resource_controller_test.go @@ -65,6 +65,7 @@ func TestNewMonitoringResourcesController(t *testing.T) { }, &operatorv1.StaticPodOperatorStatus{}, nil, + nil, ), validateActions: func(t *testing.T, actions []clienttesting.Action) { if len(actions) != 4 { @@ -97,6 +98,7 @@ func TestNewMonitoringResourcesController(t *testing.T) { }, &operatorv1.StaticPodOperatorStatus{}, nil, + nil, ), startingDynamicObjects: []runtime.Object{mustAssetServiceMonitor("target-namespace")}, validateActions: func(t *testing.T, actions []clienttesting.Action) {}, diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller.go index 85fb82965..ef21b3fd8 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller.go @@ -2,13 +2,10 @@ package node import ( "fmt" + "strings" "time" - "github.com/openshift/library-go/pkg/operator/v1helpers" - - "k8s.io/klog" - - "k8s.io/apimachinery/pkg/api/equality" + coreapiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -17,9 +14,12 @@ import ( corelisterv1 "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" + "k8s.io/klog" operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/condition" "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" ) const nodeControllerWorkQueueKey = "key" @@ -59,11 +59,10 @@ func NewNodeController( } func (c NodeController) sync() error { - _, originalOperatorStatus, resourceVersion, err := c.operatorClient.GetStaticPodOperatorState() + _, originalOperatorStatus, _, err := c.operatorClient.GetStaticPodOperatorState() if err != nil { return err } - operatorStatus := originalOperatorStatus.DeepCopy() selector, err := labels.NewRequirement("node-role.kubernetes.io/master", selection.Equals, []string{""}) if err != nil { @@ -106,13 +105,52 @@ func (c NodeController) sync() error { newTargetNodeStates = append(newTargetNodeStates, operatorv1.NodeStatus{NodeName: node.Name}) } - operatorStatus.NodeStatuses = newTargetNodeStates - if !equality.Semantic.DeepEqual(originalOperatorStatus, operatorStatus) { - if _, updateError := c.operatorClient.UpdateStaticPodOperatorStatus(resourceVersion, operatorStatus); updateError != nil { - return updateError + // detect and report master nodes that are not ready + notReadyNodes := []string{} + for _, node := range nodes { + for _, con := range node.Status.Conditions { + if con.Type == coreapiv1.NodeReady && con.Status != coreapiv1.ConditionTrue { + notReadyNodes = append(notReadyNodes, node.Name) + } } } + newCondition := operatorv1.OperatorCondition{ + Type: condition.NodeControllerDegradedConditionType, + } + if len(notReadyNodes) > 0 { + newCondition.Status = operatorv1.ConditionTrue + newCondition.Reason = "MasterNodesReady" + newCondition.Message = fmt.Sprintf("The master node(s) %q not ready", strings.Join(notReadyNodes, ",")) + } else { + newCondition.Status = operatorv1.ConditionFalse + newCondition.Reason = "MasterNodesReady" + newCondition.Message = "All master node(s) are ready" + } + + oldStatus := &operatorv1.StaticPodOperatorStatus{} + _, updated, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(newCondition), func(status *operatorv1.StaticPodOperatorStatus) error { + status.NodeStatuses = newTargetNodeStates + return nil + }, func(status *operatorv1.StaticPodOperatorStatus) error { + //a hack for storing the old status (before the update) + oldStatus = status + return nil + }) + + if updateError != nil { + return updateError + } + if !updated { + return nil + } + + for _, oldCondition := range oldStatus.Conditions { + if oldCondition.Type == condition.NodeControllerDegradedConditionType && oldCondition.Message != newCondition.Message { + c.eventRecorder.Eventf("MasterNodesReadyChanged", newCondition.Message) + break + } + } return nil } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller_test.go index 1170bc87d..153cf0b48 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/node/node_controller_test.go @@ -5,19 +5,19 @@ import ( "testing" "time" - "github.com/openshift/library-go/pkg/operator/v1helpers" - - "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/condition" "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" ) -func fakeMasterNode(name string) *v1.Node { - n := &v1.Node{} +func fakeMasterNode(name string) *corev1.Node { + n := &corev1.Node{} n.Name = name n.Labels = map[string]string{ "node-role.kubernetes.io/master": "", @@ -26,6 +26,139 @@ func fakeMasterNode(name string) *v1.Node { return n } +func makeNodeNotReady(node *corev1.Node) *corev1.Node { + con := corev1.NodeCondition{} + con.Type = corev1.NodeReady + con.Status = corev1.ConditionFalse + node.Status.Conditions = append(node.Status.Conditions, con) + return node +} + +func validateCommonNodeControllerDegradedCondtion(con operatorv1.OperatorCondition) error { + if con.Type != condition.NodeControllerDegradedConditionType { + return fmt.Errorf("incorrect condition.type, expected NodeControllerDegraded, got %s", con.Type) + } + if con.Reason != "MasterNodesReady" { + return fmt.Errorf("incorrect condition.reason, expected MasterNodesReady, got %s", con.Reason) + } + return nil +} + +func TestNodeControllerDegradedConditionType(t *testing.T) { + scenarios := []struct { + name string + masterNodes []runtime.Object + evaluateNodeStatus func([]operatorv1.OperatorCondition) error + }{ + // scenario 1 + { + name: "scenario 1: one unhealthy master node is reported", + masterNodes: []runtime.Object{makeNodeNotReady(fakeMasterNode("test-node-1")), fakeMasterNode("test-node-2")}, + evaluateNodeStatus: func(conditions []operatorv1.OperatorCondition) error { + if len(conditions) != 1 { + return fmt.Errorf("expected exaclty 1 condition, got %d", len(conditions)) + } + + con := conditions[0] + if err := validateCommonNodeControllerDegradedCondtion(con); err != nil { + return err + } + if con.Status != operatorv1.ConditionTrue { + return fmt.Errorf("incorrect condition.status, expected %v, got %v", operatorv1.ConditionTrue, con.Status) + } + expectedMsg := "The master node(s) \"test-node-1\" not ready" + if con.Message != expectedMsg { + return fmt.Errorf("incorrect condition.message, expected %s, got %s", expectedMsg, con.Message) + } + return nil + }, + }, + + // scenario 2 + { + name: "scenario 2: all master nodes are healthy", + masterNodes: []runtime.Object{fakeMasterNode("test-node-1"), fakeMasterNode("test-node-2")}, + evaluateNodeStatus: func(conditions []operatorv1.OperatorCondition) error { + if len(conditions) != 1 { + return fmt.Errorf("expected exaclty 1 condition, got %d", len(conditions)) + } + + con := conditions[0] + if err := validateCommonNodeControllerDegradedCondtion(con); err != nil { + return err + } + if con.Status != operatorv1.ConditionFalse { + return fmt.Errorf("incorrect condition.status, expected %v, got %v", operatorv1.ConditionFalse, con.Status) + } + expectedMsg := "All master node(s) are ready" + if con.Message != expectedMsg { + return fmt.Errorf("incorrect condition.message, expected %s, got %s", expectedMsg, con.Message) + } + return nil + }, + }, + + // scenario 3 + { + name: "scenario 3: multiple master nodes are unhealthy", + masterNodes: []runtime.Object{makeNodeNotReady(fakeMasterNode("test-node-1")), fakeMasterNode("test-node-2"), makeNodeNotReady(fakeMasterNode("test-node-3"))}, + evaluateNodeStatus: func(conditions []operatorv1.OperatorCondition) error { + if len(conditions) != 1 { + return fmt.Errorf("expected exaclty 1 condition, got %d", len(conditions)) + } + + con := conditions[0] + if err := validateCommonNodeControllerDegradedCondtion(con); err != nil { + return err + } + if con.Status != operatorv1.ConditionTrue { + return fmt.Errorf("incorrect condition.status, expected %v, got %v", operatorv1.ConditionTrue, con.Status) + } + expectedMsg := "The master node(s) \"test-node-1,test-node-3\" not ready" + if con.Message != expectedMsg { + return fmt.Errorf("incorrect condition.message, expected %s, got %s", expectedMsg, con.Message) + } + return nil + }, + }, + } + for _, scenario := range scenarios { + t.Run(scenario.name, func(t *testing.T) { + kubeClient := fake.NewSimpleClientset(scenario.masterNodes...) + fakeLister := v1helpers.NewFakeNodeLister(kubeClient) + kubeInformers := informers.NewSharedInformerFactory(kubeClient, 1*time.Minute) + fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 1, + }, + nil, + nil, + ) + + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) + + c := NewNodeController(fakeStaticPodOperatorClient, kubeInformers, eventRecorder) + // override the lister so we don't have to run the informer to list nodes + c.nodeLister = fakeLister + if err := c.sync(); err != nil { + t.Fatal(err) + } + + _, status, _, _ := fakeStaticPodOperatorClient.GetStaticPodOperatorState() + + if err := scenario.evaluateNodeStatus(status.OperatorStatus.Conditions); err != nil { + t.Errorf("%s: failed to evaluate operator conditions: %v", scenario.name, err) + } + }) + + } +} + func TestNewNodeController(t *testing.T) { tests := []struct { name string @@ -115,9 +248,10 @@ func TestNewNodeController(t *testing.T) { NodeStatuses: test.startNodeStatus, }, nil, + nil, ) - eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) + eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &corev1.ObjectReference{}) c := NewNodeController(fakeStaticPodOperatorClient, kubeInformers, eventRecorder) // override the lister so we don't have to run the informer to list nodes diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller_test.go index aef89accf..58fec73ba 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller_test.go @@ -196,6 +196,7 @@ func TestPruneAPIResources(t *testing.T) { }, }, nil, + nil, ) eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) @@ -408,6 +409,7 @@ func TestPruneDiskResources(t *testing.T) { }, }, nil, + nil, ) eventRecorder := events.NewRecorder(kubeClient.CoreV1().Events("test"), "test-operator", &v1.ObjectReference{}) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go index 31b9eb20e..6071d035e 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go @@ -2,6 +2,7 @@ package revision import ( "fmt" + "strconv" "strings" "time" @@ -19,13 +20,14 @@ import ( "k8s.io/client-go/util/workqueue" operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/condition" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/management" "github.com/openshift/library-go/pkg/operator/resource/resourceapply" "github.com/openshift/library-go/pkg/operator/v1helpers" ) -const operatorStatusRevisionControllerDegraded = "RevisionControllerDegraded" const revisionControllerWorkQueueKey = "key" // RevisionController is a controller that watches a set of configmaps and secrets and them against a revision snapshot @@ -248,6 +250,32 @@ func (c RevisionController) createNewRevision(revision int32) error { return nil } +// getLatestAvailableRevision returns the latest known revision to the operator +// This is either the LatestAvailableRevision in the status or by checking revision status configmaps +func (c RevisionController) getLatestAvailableRevision(operatorStatus *operatorv1.StaticPodOperatorStatus) (int32, error) { + configMaps, err := c.configMapGetter.ConfigMaps(c.targetNamespace).List(metav1.ListOptions{}) + if err != nil { + return 0, err + } + var latestRevision int32 + for _, configMap := range configMaps.Items { + if !strings.HasPrefix(configMap.Name, "revision-status-") { + continue + } + if revision, ok := configMap.Data["revision"]; ok { + revisionNumber, err := strconv.Atoi(revision) + if err != nil { + return 0, err + } + if int32(revisionNumber) > latestRevision { + latestRevision = int32(revisionNumber) + } + } + } + // If there are no configmaps, then this should actually be revision 0 + return latestRevision, nil +} + func (c RevisionController) sync() error { operatorSpec, originalOperatorStatus, resourceVersion, err := c.operatorClient.GetStaticPodOperatorStateWithQuorum() if err != nil { @@ -259,6 +287,25 @@ func (c RevisionController) sync() error { return nil } + // If the operator status has 0 as its latest available revision, this is either the first revision + // or possibly the operator resource was deleted and reset back to 0, which is not what we want so check configmaps + if operatorStatus.LatestAvailableRevision == 0 { + // Check to see if current revision is accurate and if not, search through configmaps for latest revision + latestRevision, err := c.getLatestAvailableRevision(operatorStatus) + if err != nil { + return err + } + if latestRevision != 0 { + // Then make sure that revision number is what's in the operator status + _, _, err = v1helpers.UpdateStaticPodStatus(c.operatorClient, func(status *operatorv1.StaticPodOperatorStatus) error { + status.LatestAvailableRevision = latestRevision + return nil + }) + // If we made a change return and requeue with the correct status + return fmt.Errorf("synthetic requeue request (err: %v)", err) + } + } + requeue, syncErr := c.createRevisionIfNeeded(operatorSpec, operatorStatus, resourceVersion) if requeue && syncErr == nil { return fmt.Errorf("synthetic requeue request (err: %v)", syncErr) @@ -267,7 +314,7 @@ func (c RevisionController) sync() error { // update failing condition cond := operatorv1.OperatorCondition{ - Type: operatorStatusRevisionControllerDegraded, + Type: condition.RevisionControllerDegradedConditionType, Status: operatorv1.ConditionFalse, } if err != nil { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller_test.go index 7fb928cd0..db1de1139 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller_test.go @@ -48,6 +48,49 @@ func TestRevisionController(t *testing.T) { validateStatus func(t *testing.T, status *operatorv1.StaticPodOperatorStatus) expectSyncError string }{ + { + testName: "set-latest-revision-by-configmap", + targetNamespace: targetNamespace, + staticPodOperatorClient: v1helpers.NewFakeStaticPodOperatorClient( + &operatorv1.StaticPodOperatorSpec{ + OperatorSpec: operatorv1.OperatorSpec{ + ManagementState: operatorv1.Managed, + }, + }, + &operatorv1.StaticPodOperatorStatus{ + LatestAvailableRevision: 0, + NodeStatuses: []operatorv1.NodeStatus{ + { + NodeName: "test-node-1", + CurrentRevision: 0, + TargetRevision: 0, + }, + }, + }, + nil, + nil, + ), + testConfigs: []RevisionResource{{Name: "test-config"}}, + testSecrets: []RevisionResource{{Name: "test-secret"}}, + startingObjects: []runtime.Object{ + &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-config", Namespace: targetNamespace}}, + &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "revision-status", Namespace: targetNamespace}}, + &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "revision-status-1", Namespace: targetNamespace}, + Data: map[string]string{"revision": "1"}, + }, + &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: "revision-status-2", Namespace: targetNamespace}, + Data: map[string]string{"revision": "2"}, + }, + }, + validateStatus: func(t *testing.T, status *operatorv1.StaticPodOperatorStatus) { + if status.LatestAvailableRevision != 2 { + t.Errorf("expected status LatestAvailableRevision to be 2, got %v", status.LatestAvailableRevision) + } + }, + }, { testName: "operator-unmanaged", targetNamespace: targetNamespace, @@ -59,6 +102,7 @@ func TestRevisionController(t *testing.T) { }, &operatorv1.StaticPodOperatorStatus{}, nil, + nil, ), validateActions: func(t *testing.T, actions []clienttesting.Action) { createdObjects := filterCreateActions(actions) @@ -87,6 +131,7 @@ func TestRevisionController(t *testing.T) { }, }, nil, + nil, ), testConfigs: []RevisionResource{{Name: "test-config"}}, testSecrets: []RevisionResource{{Name: "test-secret"}}, @@ -123,6 +168,7 @@ func TestRevisionController(t *testing.T) { }, }, nil, + nil, ), startingObjects: []runtime.Object{ &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, @@ -189,6 +235,7 @@ func TestRevisionController(t *testing.T) { }, }, nil, + nil, ), startingObjects: []runtime.Object{ &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, @@ -267,6 +314,7 @@ func TestRevisionController(t *testing.T) { }, }, nil, + nil, ), startingObjects: []runtime.Object{ &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, @@ -327,6 +375,7 @@ func TestRevisionController(t *testing.T) { }, }, nil, + nil, ), startingObjects: []runtime.Object{ &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, @@ -364,6 +413,7 @@ func TestRevisionController(t *testing.T) { }, }, nil, + nil, ), startingObjects: []runtime.Object{ &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "test-secret", Namespace: targetNamespace}}, diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go index 34e46b98b..925a27c47 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go @@ -17,6 +17,7 @@ import ( "k8s.io/client-go/util/workqueue" operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/condition" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/management" "github.com/openshift/library-go/pkg/operator/status" @@ -24,7 +25,6 @@ import ( ) var ( - staticPodStateControllerDegraded = "StaticPodsDegraded" staticPodStateControllerWorkQueueKey = "key" ) @@ -135,11 +135,15 @@ func (c *StaticPodStateController) sync() error { c.operandName, status.VersionForOperandFromEnv(), ) + c.versionRecorder.SetVersion( + "operator", + status.VersionForOperatorFromEnv(), + ) } // update failing condition cond := operatorv1.OperatorCondition{ - Type: staticPodStateControllerDegraded, + Type: condition.StaticPodsDegradedConditionType, Status: operatorv1.ConditionFalse, } // Failing errors @@ -154,9 +158,7 @@ func (c *StaticPodStateController) sync() error { cond.Message = v1helpers.NewMultiLineAggregate(errs).Error() } if _, _, updateError := v1helpers.UpdateStaticPodStatus(c.operatorClient, v1helpers.UpdateStaticPodConditionFn(cond), v1helpers.UpdateStaticPodConditionFn(cond)); updateError != nil { - if err == nil { - return updateError - } + return updateError } return err diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go index a2e444fe7..f17b19871 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go @@ -164,6 +164,7 @@ func (b *staticPodOperatorControllerBuilder) ToControllers() (*staticPodOperator operandInformers, b.staticPodOperatorClient, configMapClient, + secretClient, podClient, eventRecorder, ).WithCerts( diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go index 2738ba2b9..11b5216b2 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/installerpod/cmd.go @@ -215,7 +215,7 @@ func (o *InstallOptions) copySecretsAndConfigMaps(ctx context.Context, resourceD for filename, content := range secret.Data { // TODO fix permissions klog.Infof("Writing secret manifest %q ...", path.Join(contentDir, filename)) - if err := ioutil.WriteFile(path.Join(contentDir, filename), content, 0644); err != nil { + if err := ioutil.WriteFile(path.Join(contentDir, filename), content, 0600); err != nil { return err } } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go b/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go index 60979a379..efc539bf2 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/condition.go @@ -55,8 +55,10 @@ func internalUnionCondition(conditionType string, defaultConditionStatus operato return OperatorConditionToClusterOperatorCondition(unionedCondition) } - oneMinuteAgo := time.Now().Add(-1 * time.Minute) - earliestBadConditionNotOldEnough := earliestTransitionTime(badConditions).Time.After(oneMinuteAgo) + // This timeout needs to be longer than the delay in kube-apiserver after setting not ready and before we stop serving. + // That delay use to be 30 seconds, but we switched it to 70 seconds to reflect the reality on AWS. + twoMinutesAgo := time.Now().Add(-2 * time.Minute) + earliestBadConditionNotOldEnough := earliestTransitionTime(badConditions).Time.After(twoMinutesAgo) if len(badConditions) == 0 || (hasInertia && earliestBadConditionNotOldEnough) { unionedCondition.Status = defaultConditionStatus unionedCondition.Message = unionMessage(interestingConditions) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go index 06f304644..0c59076ff 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go @@ -2,6 +2,7 @@ package status import ( "fmt" + "strings" "time" "k8s.io/klog" @@ -70,7 +71,7 @@ func NewClusterOperatorStatusController( operatorClient: operatorClient, eventRecorder: recorder.WithComponentSuffix("status-controller"), - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "StatusSyncer-"+name), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "StatusSyncer_"+strings.Replace(name, "-", "_", -1)), } operatorClient.Informer().AddEventHandler(c.eventHandler()) @@ -87,7 +88,7 @@ func NewClusterOperatorStatusController( func (c StatusSyncer) sync() error { detailedSpec, currentDetailedStatus, _, err := c.operatorClient.GetOperatorState() if apierrors.IsNotFound(err) { - c.eventRecorder.Warningf("StatusNotFound", "Unable to determine current operator status for %s", c.clusterOperatorName) + c.eventRecorder.Warningf("StatusNotFound", "Unable to determine current operator status for clusteroperator/%s", c.clusterOperatorName) if err := c.clusterOperatorClient.ClusterOperators().Delete(c.clusterOperatorName, nil); err != nil && !apierrors.IsNotFound(err) { return err } @@ -99,7 +100,7 @@ func (c StatusSyncer) sync() error { originalClusterOperatorObj, err := c.clusterOperatorLister.Get(c.clusterOperatorName) if err != nil && !apierrors.IsNotFound(err) { - c.eventRecorder.Warningf("StatusFailed", "Unable to get current operator status for %s: %v", c.clusterOperatorName, err) + c.eventRecorder.Warningf("StatusFailed", "Unable to get current operator status for clusteroperator/%s: %v", c.clusterOperatorName, err) return err } @@ -150,7 +151,11 @@ func (c StatusSyncer) sync() error { // TODO work out removal. We don't always know the existing value, so removing early seems like a bad idea. Perhaps a remove flag. versions := c.versionGetter.GetVersions() for operand, version := range versions { - operatorv1helpers.SetOperandVersion(&clusterOperatorObj.Status.Versions, configv1.OperandVersion{Name: operand, Version: version}) + previousVersion := operatorv1helpers.SetOperandVersion(&clusterOperatorObj.Status.Versions, configv1.OperandVersion{Name: operand, Version: version}) + if previousVersion != version { + // having this message will give us a marker in events when the operator updated compared to when the operand is updated + c.eventRecorder.Eventf("OperatorVersionChanged", "clusteroperator/%s version %q changed from %q to %q", c.clusterOperatorName, operand, previousVersion, version) + } } // if we have no diff, just return @@ -162,7 +167,7 @@ func (c StatusSyncer) sync() error { if _, updateErr := c.clusterOperatorClient.ClusterOperators().UpdateStatus(clusterOperatorObj); err != nil { return updateErr } - c.eventRecorder.Eventf("OperatorStatusChanged", "Status for operator %s changed: %s", c.clusterOperatorName, configv1helpers.GetStatusDiff(originalClusterOperatorObj.Status, clusterOperatorObj.Status)) + c.eventRecorder.Eventf("OperatorStatusChanged", "Status for clusteroperator/%s changed: %s", c.clusterOperatorName, configv1helpers.GetStatusDiff(originalClusterOperatorObj.Status, clusterOperatorObj.Status)) return nil } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller_test.go index 05b94854d..578e7ed38 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller_test.go @@ -21,7 +21,7 @@ import ( func TestDegraded(t *testing.T) { - twoMinutesAgo := metav1.NewTime(time.Now().Add(-2 * time.Minute)) + threeMinutesAgo := metav1.NewTime(time.Now().Add(-3 * time.Minute)) fiveSecondsAgo := metav1.NewTime(time.Now().Add(-2 * time.Second)) yesterday := metav1.NewTime(time.Now().Add(-24 * time.Hour)) @@ -53,7 +53,7 @@ func TestDegraded(t *testing.T) { { name: "one not failing/beyond threshold", conditions: []operatorv1.OperatorCondition{ - {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: twoMinutesAgo, Message: "a message from type a"}, + {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: threeMinutesAgo, Message: "a message from type a"}, }, expectedStatus: configv1.ConditionFalse, expectedReason: "AsExpected", @@ -75,7 +75,7 @@ func TestDegraded(t *testing.T) { { name: "one failing/beyond threshold", conditions: []operatorv1.OperatorCondition{ - {Type: "TypeADegraded", Status: operatorv1.ConditionTrue, Message: "a message from type a", LastTransitionTime: twoMinutesAgo}, + {Type: "TypeADegraded", Status: operatorv1.ConditionTrue, Message: "a message from type a", LastTransitionTime: threeMinutesAgo}, }, expectedStatus: configv1.ConditionTrue, expectedReason: "TypeADegraded", @@ -98,7 +98,7 @@ func TestDegraded(t *testing.T) { { name: "two present/one failing/beyond threshold", conditions: []operatorv1.OperatorCondition{ - {Type: "TypeADegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: twoMinutesAgo, Message: "a message from type a"}, + {Type: "TypeADegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: threeMinutesAgo, Message: "a message from type a"}, {Type: "TypeBDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, }, expectedStatus: configv1.ConditionTrue, @@ -123,7 +123,7 @@ func TestDegraded(t *testing.T) { name: "two present/second one failing/beyond threshold", conditions: []operatorv1.OperatorCondition{ {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, - {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: twoMinutesAgo, Message: "a message from type b"}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: threeMinutesAgo, Message: "a message from type b"}, }, expectedStatus: configv1.ConditionTrue, expectedReason: "TypeBDegraded", @@ -136,7 +136,7 @@ func TestDegraded(t *testing.T) { conditions: []operatorv1.OperatorCondition{ {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type b\nanother message from type b"}, - {Type: "TypeCDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: twoMinutesAgo, Message: "a message from type c"}, + {Type: "TypeCDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: threeMinutesAgo, Message: "a message from type c"}, {Type: "TypeDDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type d"}, }, expectedStatus: configv1.ConditionFalse, @@ -153,8 +153,8 @@ func TestDegraded(t *testing.T) { conditions: []operatorv1.OperatorCondition{ {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: fiveSecondsAgo, Message: "a message from type b\nanother message from type b"}, - {Type: "TypeCDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: twoMinutesAgo, Message: "a message from type c"}, - {Type: "TypeDDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: twoMinutesAgo, Message: "a message from type d"}, + {Type: "TypeCDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: threeMinutesAgo, Message: "a message from type c"}, + {Type: "TypeDDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: threeMinutesAgo, Message: "a message from type d"}, }, expectedStatus: configv1.ConditionTrue, expectedReason: "MultipleConditionsMatching", @@ -168,9 +168,9 @@ func TestDegraded(t *testing.T) { name: "many present/some failing/all beyond threshold", conditions: []operatorv1.OperatorCondition{ {Type: "TypeADegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: yesterday}, - {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: twoMinutesAgo, Message: "a message from type b\nanother message from type b"}, - {Type: "TypeCDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: twoMinutesAgo, Message: "a message from type c"}, - {Type: "TypeDDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: twoMinutesAgo, Message: "a message from type d"}, + {Type: "TypeBDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: threeMinutesAgo, Message: "a message from type b\nanother message from type b"}, + {Type: "TypeCDegraded", Status: operatorv1.ConditionFalse, LastTransitionTime: threeMinutesAgo, Message: "a message from type c"}, + {Type: "TypeDDegraded", Status: operatorv1.ConditionTrue, LastTransitionTime: threeMinutesAgo, Message: "a message from type d"}, }, expectedStatus: configv1.ConditionTrue, expectedReason: "MultipleConditionsMatching", diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/version.go b/vendor/github.com/openshift/library-go/pkg/operator/status/version.go index 5543a602d..3f3fcec94 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/status/version.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/version.go @@ -18,7 +18,8 @@ type versionGetter struct { } const ( - operandImageVersionEnvVarName = "OPERAND_IMAGE_VERSION" + operandImageVersionEnvVarName = "OPERAND_IMAGE_VERSION" + operatorImageVersionEnvVarName = "OPERATOR_IMAGE_VERSION" ) func NewVersionGetter() VersionGetter { @@ -66,6 +67,10 @@ func VersionForOperandFromEnv() string { return os.Getenv(operandImageVersionEnvVarName) } +func VersionForOperatorFromEnv() string { + return os.Getenv(operatorImageVersionEnvVarName) +} + func VersionForOperand(namespace, imagePullSpec string, configMapGetter corev1client.ConfigMapsGetter, eventRecorder events.Recorder) string { versionMap := map[string]string{} versionMapping, err := configMapGetter.ConfigMaps(namespace).Get("version-mapping", metav1.GetOptions{}) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller.go index 2f659617b..179542a4e 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller/unsupportedconfigoverrides_controller.go @@ -17,14 +17,14 @@ import ( "k8s.io/client-go/util/workqueue" operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/condition" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/management" "github.com/openshift/library-go/pkg/operator/v1helpers" ) const ( - unsupportedConfigOverridesControllerUpgradeable = "UnsupportedConfigOverridesUpgradeable" - controllerWorkQueueKey = "key" + controllerWorkQueueKey = "key" ) // UnsupportedConfigOverridesController is a controller that will copy source configmaps and secrets to their destinations. @@ -67,7 +67,7 @@ func (c *UnsupportedConfigOverridesController) sync() error { } cond := operatorv1.OperatorCondition{ - Type: unsupportedConfigOverridesControllerUpgradeable, + Type: condition.UnsupportedConfigOverridesUpgradeableConditionType, Status: operatorv1.ConditionTrue, Reason: "NoUnsupportedConfigOverrides", } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go index 2755b640c..0c0d62da0 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go @@ -1,36 +1,34 @@ package v1helpers import ( - "fmt" "strings" "time" - "github.com/ghodss/yaml" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/client-go/dynamic" "k8s.io/client-go/util/retry" configv1 "github.com/openshift/api/config/v1" operatorv1 "github.com/openshift/api/operator/v1" ) -func SetOperandVersion(versions *[]configv1.OperandVersion, operandVersion configv1.OperandVersion) { +// SetOperandVersion sets the new version and returns the previous value. +func SetOperandVersion(versions *[]configv1.OperandVersion, operandVersion configv1.OperandVersion) string { if versions == nil { versions = &[]configv1.OperandVersion{} } existingVersion := FindOperandVersion(*versions, operandVersion.Name) if existingVersion == nil { *versions = append(*versions, operandVersion) - return + return "" } + + previous := existingVersion.Version existingVersion.Version = operandVersion.Version + return previous } func FindOperandVersion(versions []configv1.OperandVersion, name string) *configv1.OperandVersion { @@ -106,33 +104,6 @@ func IsOperatorConditionPresentAndEqual(conditions []operatorv1.OperatorConditio return false } -func EnsureOperatorConfigExists(client dynamic.Interface, operatorConfigBytes []byte, gvr schema.GroupVersionResource) { - configJson, err := yaml.YAMLToJSON(operatorConfigBytes) - if err != nil { - panic(err) - } - operatorConfigObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, configJson) - if err != nil { - panic(err) - } - - requiredOperatorConfig, ok := operatorConfigObj.(*unstructured.Unstructured) - if !ok { - panic(fmt.Sprintf("unexpected object in %t", operatorConfigObj)) - } - - _, err = client.Resource(gvr).Get(requiredOperatorConfig.GetName(), metav1.GetOptions{}) - if errors.IsNotFound(err) { - if _, err := client.Resource(gvr).Create(requiredOperatorConfig, metav1.CreateOptions{}); err != nil { - panic(err) - } - return - } - if err != nil { - panic(err) - } -} - // UpdateOperatorSpecFunc is a func that mutates an operator spec. type UpdateOperatorSpecFunc func(spec *operatorv1.OperatorSpec) error @@ -194,6 +165,8 @@ func UpdateStatus(client OperatorClient, updateFuncs ...UpdateStatusFunc) (*oper } if equality.Semantic.DeepEqual(oldStatus, newStatus) { + // We return the newStatus which is a deep copy of oldStatus but with all update funcs applied. + updatedOperatorStatus = newStatus return nil } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go index 2bb7cc635..4afb23a61 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go @@ -9,9 +9,9 @@ type OperatorClient interface { Informer() cache.SharedIndexInformer // GetOperatorState returns the operator spec, status and the resource version, potentially from a lister. GetOperatorState() (spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, resourceVersion string, err error) - // UpdateOperatorSpec updates the spec of the operator, assuming the given resource verison. + // UpdateOperatorSpec updates the spec of the operator, assuming the given resource version. UpdateOperatorSpec(oldResourceVersion string, in *operatorv1.OperatorSpec) (out *operatorv1.OperatorSpec, newResourceVersion string, err error) - // UpdateOperatorStatus updates the status of the operator, assuming the given resource verison. + // UpdateOperatorStatus updates the status of the operator, assuming the given resource version. UpdateOperatorStatus(oldResourceVersion string, in *operatorv1.OperatorStatus) (out *operatorv1.OperatorStatus, err error) } @@ -25,4 +25,6 @@ type StaticPodOperatorClient interface { GetStaticPodOperatorStateWithQuorum() (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error) // UpdateStaticPodOperatorStatus updates the status, assuming the given resource version. UpdateStaticPodOperatorStatus(resourceVersion string, in *operatorv1.StaticPodOperatorStatus) (out *operatorv1.StaticPodOperatorStatus, err error) + // UpdateStaticPodOperatorSpec updates the spec, assuming the given resource version. + UpdateStaticPodOperatorSpec(resourceVersion string, in *operatorv1.StaticPodOperatorSpec) (out *operatorv1.StaticPodOperatorSpec, newResourceVersion string, err error) } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go index 1669c5a18..014585c55 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go @@ -61,12 +61,14 @@ func (fakeSharedIndexInformer) GetIndexer() cache.Indexer { // NewFakeStaticPodOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. func NewFakeStaticPodOperatorClient( staticPodSpec *operatorv1.StaticPodOperatorSpec, staticPodStatus *operatorv1.StaticPodOperatorStatus, - triggerErr func(rv string, status *operatorv1.StaticPodOperatorStatus) error) StaticPodOperatorClient { + triggerStatusErr func(rv string, status *operatorv1.StaticPodOperatorStatus) error, + triggerSpecErr func(rv string, spec *operatorv1.StaticPodOperatorSpec) error) StaticPodOperatorClient { return &fakeStaticPodOperatorClient{ fakeStaticPodOperatorSpec: staticPodSpec, fakeStaticPodOperatorStatus: staticPodStatus, resourceVersion: "0", - triggerStatusUpdateError: triggerErr, + triggerStatusUpdateError: triggerStatusErr, + triggerSpecUpdateError: triggerSpecErr, } } @@ -76,6 +78,7 @@ type fakeStaticPodOperatorClient struct { fakeStaticPodOperatorStatus *operatorv1.StaticPodOperatorStatus resourceVersion string triggerStatusUpdateError func(rv string, status *operatorv1.StaticPodOperatorStatus) error + triggerSpecUpdateError func(rv string, status *operatorv1.StaticPodOperatorSpec) error } func (c *fakeStaticPodOperatorClient) Informer() cache.SharedIndexInformer { @@ -108,6 +111,24 @@ func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorStatus(resourceVers return c.fakeStaticPodOperatorStatus, nil } +func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorSpec(resourceVersion string, spec *operatorv1.StaticPodOperatorSpec) (*operatorv1.StaticPodOperatorSpec, string, error) { + if c.resourceVersion != resourceVersion { + return nil, "", errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, "", err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerSpecUpdateError != nil { + if err := c.triggerSpecUpdateError(resourceVersion, spec); err != nil { + return nil, "", err + } + } + c.fakeStaticPodOperatorSpec = spec + return c.fakeStaticPodOperatorSpec, c.resourceVersion, nil +} + func (c *fakeStaticPodOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { return &c.fakeStaticPodOperatorSpec.OperatorSpec, &c.fakeStaticPodOperatorStatus.OperatorStatus, "", nil } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd.go b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd.go new file mode 100644 index 000000000..c0d64b7b2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd.go @@ -0,0 +1,345 @@ +package watchdog + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "syscall" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/kubernetes" + "k8s.io/klog" + + "github.com/openshift/library-go/pkg/config/client" + "github.com/openshift/library-go/pkg/controller/fileobserver" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/retry" +) + +type FileWatcherOptions struct { + // ProcessName is the name of the process we will send SIGTERM + ProcessName string + + // Files lists all files we want to monitor for changes + Files []string + KubeConfig string + + // Namespace to report events to + Namespace string + recorder events.Recorder + + // Interval specifies how aggressive we want to be in file checks + Interval time.Duration + + // Time to give the process to terminate gracefully + TerminationGracePeriod time.Duration + + // for unit-test to mock getting the process PID (unit-test) + findPidByNameFn func(name string) (int, bool, error) + + // processExistsFn to mock checking the process PID (unit-test) + processExistsFn func(int) (bool, error) + + // for unit-test to mock sending UNIX signals + handleTerminationFn func(pid int) error + + handleKillFn func(pid int) error + + // for unit-test to mock prefixing files (/proc/PID/root) + addProcPrefixToFilesFn func([]string, int) []string + + // lastTerminatedPid is used track the value of a PID that we already terminated + lastTerminatedPid int +} + +func NewFileWatcherOptions() *FileWatcherOptions { + return &FileWatcherOptions{ + findPidByNameFn: FindProcessByName, + processExistsFn: ProcessExists, + addProcPrefixToFilesFn: addProcPrefixToFiles, + handleTerminationFn: func(pid int) error { + return syscall.Kill(pid, syscall.SIGTERM) + }, + handleKillFn: func(pid int) error { + return syscall.Kill(pid, syscall.SIGKILL) + }, + } +} + +// NewFileWatcherWatchdog return the file watcher watchdog command. +// This command should be used as a side-car to a container which will react to file changes in the main container +// and terminate the main container process in case a change is observed. +// TODO: If the main container start before the watchdog side-car container (image pull) there might be a case +// the watchdog won't react to a changed file (simply because it is not running yet). In that case the main process +// will not be reloaded. However, the operator image should be pulled on master node and therefore chances to hit this +// case are minimal. +func NewFileWatcherWatchdog() *cobra.Command { + o := NewFileWatcherOptions() + + cmd := &cobra.Command{ + Use: "file-watcher-watchdog", + Short: "Watch files on the disk and terminate the specified process on change", + Run: func(cmd *cobra.Command, args []string) { + klog.V(1).Info(cmd.Flags()) + klog.V(1).Info(spew.Sdump(o)) + + // Handle shutdown + termHandler := server.SetupSignalHandler() + ctx, shutdown := context.WithCancel(context.TODO()) + go func() { + defer shutdown() + <-termHandler + }() + + if err := o.Complete(); err != nil { + klog.Fatal(err) + } + if err := o.Validate(); err != nil { + klog.Fatal(err) + } + + if err := o.Run(ctx); err != nil { + klog.Fatal(err) + } + }, + } + + o.AddFlags(cmd.Flags()) + + return cmd +} + +func (o *FileWatcherOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.ProcessName, "process-name", "", "name of the process to send TERM signal to on file change (eg. 'hyperkube').") + fs.StringSliceVar(&o.Files, "files", o.Files, "comma separated list of file names to monitor for changes") + fs.StringVar(&o.KubeConfig, "kubeconfig", o.KubeConfig, "kubeconfig file or empty") + fs.StringVar(&o.Namespace, "namespace", o.Namespace, "namespace to report the watchdog events") + fs.DurationVar(&o.Interval, "interval", 5*time.Second, "interval specifying how aggressive the file checks should be") + fs.DurationVar(&o.TerminationGracePeriod, "termination-grace-period", 30*time.Second, "interval specifying how long to wait until sending KILL signal to the process") +} + +func (o *FileWatcherOptions) Complete() error { + clientConfig, err := client.GetKubeConfigOrInClusterConfig(o.KubeConfig, nil) + if err != nil { + return err + } + kubeClient, err := kubernetes.NewForConfig(clientConfig) + if err != nil { + return err + } + + ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second) + defer cancel() + + // Get event recorder. + // Retry on connection errors for 10s, but don't error out, instead fallback to the namespace. + var eventTarget *v1.ObjectReference + err = retry.RetryOnConnectionErrors(ctx, func(context.Context) (bool, error) { + var clientErr error + eventTarget, clientErr = events.GetControllerReferenceForCurrentPod(kubeClient, o.Namespace, nil) + if clientErr != nil { + return false, clientErr + } + return true, nil + }) + if err != nil { + klog.Warningf("unable to get owner reference (falling back to namespace): %v", err) + } + o.recorder = events.NewRecorder(kubeClient.CoreV1().Events(o.Namespace), "file-change-watchdog", eventTarget) + + return nil +} + +func (o *FileWatcherOptions) Validate() error { + if len(o.ProcessName) == 0 { + return fmt.Errorf("process name must be specified") + } + if len(o.Files) == 0 { + return fmt.Errorf("at least one file to observe must be specified") + } + if len(o.Namespace) == 0 && len(os.Getenv("POD_NAMESPACE")) == 0 { + return fmt.Errorf("either namespace flag or POD_NAMESPACE environment variable must be specified") + } + return nil +} + +// runPidObserver runs a loop that observes changes to the PID of the process we send signals after change is detected. +func (o *FileWatcherOptions) runPidObserver(ctx context.Context, pidObservedCh chan int) { + defer close(pidObservedCh) + currentPID := 0 + retries := 0 + pollErr := wait.PollImmediateUntil(1*time.Second, func() (done bool, err error) { + retries++ + // attempt to find the PID by process name via /proc + observedPID, found, err := o.findPidByNameFn(o.ProcessName) + if !found || err != nil { + klog.Warningf("Unable to determine PID for %q (retry: %d, err: %v)", o.ProcessName, retries, err) + return false, nil + } + + if currentPID == 0 { + currentPID = observedPID + // notify runWatchdog when the PID is initially observed (we need the PID to mutate file paths). + pidObservedCh <- observedPID + } + + // watch for PID changes, when observed restart the observer and wait for the new PID to appear. + if currentPID != observedPID { + return true, nil + } + + return false, nil + }, ctx.Done()) + + // These are not fatal errors, but we still want to log them out + if pollErr != nil && pollErr != wait.ErrWaitTimeout { + klog.Warningf("Unexpected error: %v", pollErr) + } +} + +// readInitialFileContent reads the content of files specified. +// This is needed by file observer. +func readInitialFileContent(files []string) (map[string][]byte, error) { + initialContent := map[string][]byte{} + for _, name := range files { + // skip files that does not exists (yet) + if _, err := os.Stat(name); os.IsNotExist(err) { + continue + } + content, err := ioutil.ReadFile(name) + if err != nil { + return nil, err + } + initialContent[name] = content + } + return initialContent, nil +} + +// addProcPrefixToFiles mutates the file list and prefix every file with /proc/PID/root. +// With shared pid namespace, we are able to access the target container filesystem via /proc. +func addProcPrefixToFiles(oldFiles []string, pid int) []string { + files := []string{} + for _, file := range oldFiles { + files = append(files, filepath.Join("/proc", fmt.Sprintf("%d", pid), "root", file)) + } + return files +} + +// Run the main watchdog loop. +func (o *FileWatcherOptions) Run(ctx context.Context) error { + for { + { + o.lastTerminatedPid = 0 + instanceCtx, shutdown := context.WithCancel(ctx) + defer shutdown() + select { + case <-ctx.Done(): + // exit(0) + shutdown() + return nil + default: + } + if err := o.runWatchdog(instanceCtx); err != nil { + return err + } + } + } +} + +func (o *FileWatcherOptions) terminateGracefully(pid int) error { + // Send SIGTERM to the process + klog.Infof("Sending TERM signal to %d ...", pid) + if err := o.handleTerminationFn(pid); err != nil { + return err + } + // Wait TerminationGracePeriodSeconds or until the process is not removed from /proc + pollErr := wait.PollImmediate(500*time.Millisecond, o.TerminationGracePeriod, func() (done bool, err error) { + if exists, err := o.processExistsFn(pid); !exists && err == nil { + return true, nil + } else if err != nil { + return true, err + } + return false, nil + }) + // If the process still exists and the TerminationGracePeriodSeconds passed, send kill signal and return + if pollErr == wait.ErrWaitTimeout { + klog.Infof("Sending KILL signal to %d ...", pid) + return o.handleKillFn(pid) + } + return pollErr +} + +// runWatchdog run single instance of watchdog. +func (o *FileWatcherOptions) runWatchdog(ctx context.Context) error { + watchdogCtx, shutdown := context.WithCancel(ctx) + defer shutdown() + + // Handle watchdog shutdown + go func() { + defer shutdown() + <-ctx.Done() + }() + + pidObservedCh := make(chan int) + go o.runPidObserver(watchdogCtx, pidObservedCh) + + // Wait while we get the initial PID for the process + klog.Infof("Waiting for process %q PID ...", o.ProcessName) + currentPID := <-pidObservedCh + + // Mutate path for specified files as '/proc/PID/root/' + // This means side-car container don't have to duplicate the mounts from main container. + // This require shared PID namespace feature. + filesToWatch := o.addProcPrefixToFilesFn(o.Files, currentPID) + klog.Infof("Watching for changes in: %s", spew.Sdump(filesToWatch)) + + // Read initial file content. If shared PID namespace does not work, this will error. + initialContent, err := readInitialFileContent(filesToWatch) + if err != nil { + // TODO: remove this once we get aggregated logging + o.recorder.Warningf("FileChangeWatchdogFailed", "Reading initial file content failed: %v", err) + return fmt.Errorf("unable to read initial file content: %v", err) + } + + o.recorder.Eventf("FileChangeWatchdogStarted", "Started watching files for process %s[%d]", o.ProcessName, currentPID) + + observer, err := fileobserver.NewObserver(o.Interval) + if err != nil { + o.recorder.Warningf("ObserverFailed", "Failed to start to file observer: %v", err) + return fmt.Errorf("unable to start file observer: %v", err) + } + + observer.AddReactor(func(file string, action fileobserver.ActionType) error { + // We already signalled this PID to terminate and the process is being gracefully terminated now. + // Do not duplicate termination process for PID we already terminated, but wait for the new PID to appear. + if currentPID == o.lastTerminatedPid { + return nil + } + + o.lastTerminatedPid = currentPID + defer shutdown() + + o.recorder.Eventf("FileChangeObserved", "Observed change in file %q, gracefully terminating process %s[%d]", file, o.ProcessName, currentPID) + + if err := o.terminateGracefully(currentPID); err != nil { + o.recorder.Warningf("SignalFailed", "Failed to terminate process %s[%d] gracefully: %v", o.ProcessName, currentPID, err) + return err + } + + return nil + }, initialContent, filesToWatch...) + + go observer.Run(watchdogCtx.Done()) + + <-watchdogCtx.Done() + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd_test.go b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd_test.go new file mode 100644 index 000000000..8821e6457 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/cmd_test.go @@ -0,0 +1,152 @@ +package watchdog + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "sync" + "testing" + "time" + + "github.com/openshift/library-go/pkg/operator/events/eventstesting" +) + +func TestPidObserver(t *testing.T) { + var currentPIDMutex = sync.Mutex{} + currentPID := 1 + + getProcessPIDByName := func(name string) (int, bool, error) { + currentPIDMutex.Lock() + defer currentPIDMutex.Unlock() + return currentPID, true, nil + } + + watcher := &FileWatcherOptions{ + findPidByNameFn: getProcessPIDByName, + } + + pidObservedCh := make(chan int) + monitorTerminated := make(chan struct{}) + + go func() { + defer close(monitorTerminated) + watcher.runPidObserver(context.TODO(), pidObservedCh) + }() + + // We should receive the initial PID + select { + case pid := <-pidObservedCh: + if pid != 1 { + t.Fatalf("expected PID 1, got %d", pid) + } + t.Log("initial PID observed") + case <-time.After(10 * time.Second): + t.Fatal("timeout (observing initial PID)") + } + + // We changed the PID, the monitor should gracefully terminate + currentPIDMutex.Lock() + currentPID = 10 + currentPIDMutex.Unlock() + + select { + case <-monitorTerminated: + t.Log("monitor successfully terminated") + case <-time.After(10 * time.Second): + t.Fatal("timeout (terminating monitor)") + } +} + +func TestWatchdogRun(t *testing.T) { + signalTermRecv := make(chan int) + signalKillRecv := make(chan int) + + // Make temporary file we are going to watch and write changes + testDir, err := ioutil.TempDir("", "test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(testDir) + if err := ioutil.WriteFile(filepath.Join(testDir, "testfile"), []byte("starting"), os.ModePerm); err != nil { + t.Fatal(err) + } + + opts := &FileWatcherOptions{ + ProcessName: "test", + Files: []string{filepath.Join(testDir, "testfile")}, + handleTerminationFn: func(pid int) error { + signalTermRecv <- pid + return nil + }, + handleKillFn: func(pid int) error { + signalKillRecv <- pid + return nil + }, + findPidByNameFn: func(name string) (int, bool, error) { + return 10, true, nil + }, + processExistsFn: func(int) (bool, error) { + return true, nil + }, + addProcPrefixToFilesFn: func(files []string, i int) []string { + return files + }, + Interval: 200 * time.Millisecond, + TerminationGracePeriod: 1 * time.Second, + recorder: eventstesting.NewTestingEventRecorder(t), + } + + // commandCtx is context used for the Run() method + commandCtx, shutdown := context.WithTimeout(context.TODO(), 1*time.Minute) + defer shutdown() + + commandTerminatedCh := make(chan struct{}) + go func() { + defer close(commandTerminatedCh) + if err := opts.Run(commandCtx); err != nil { + t.Fatal(err) + } + }() + + // Give file watcher time to observe the file + time.Sleep(1 * time.Second) + + // Modify the monitored file + if err := ioutil.WriteFile(filepath.Join(testDir, "testfile"), []byte("changed"), os.ModePerm); err != nil { + t.Fatal(err) + } + + select { + case pid := <-signalTermRecv: + if pid != 10 { + t.Errorf("expected received PID to be 10, got %d", pid) + } + case <-time.After(20 * time.Second): + t.Fatal("timeout (waiting for PID)") + } + + select { + case pid := <-signalKillRecv: + if pid != 10 { + t.Errorf("expected received PID to be 10, got %d", pid) + } + case <-time.After(20 * time.Second): + t.Fatal("timeout (waiting for PID)") + } + + select { + case <-commandTerminatedCh: + t.Fatal("run command is not expected to terminate") + default: + } + + // Test the shutdown sequence + shutdown() + select { + case <-commandTerminatedCh: + case <-time.After(20 * time.Second): + t.Fatal("run command failed to terminate") + } + +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/watchdog/proc.go b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/proc.go new file mode 100644 index 000000000..f497b41b9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/proc.go @@ -0,0 +1,76 @@ +package watchdog + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "sort" + "strconv" + "syscall" +) + +// FindProcessByName find the process name specified by name and return the PID of that process. +// If the process is not found, the bool is false. +// NOTE: This require container with shared process namespace (if run as side-car). +func FindProcessByName(name string) (int, bool, error) { + files, err := ioutil.ReadDir("/proc") + if err != nil { + return 0, false, err + } + // sort means we start with the directories with numbers + sort.Slice(files, func(i, j int) bool { + return files[i].Name() < files[j].Name() + }) + for _, file := range files { + if !file.IsDir() { + continue + } + // only scan process directories (eg. /proc/1234) + pid, err := strconv.Atoi(file.Name()) + if err != nil { + continue + } + // read the /proc/123/exe symlink that points to a process + linkTarget := readlink(filepath.Join("/proc", file.Name(), "exe")) + if path.Base(linkTarget) != name { + continue + } + return pid, true, nil + } + return 0, false, nil +} + +// ProcessExists checks if the process specified by a PID exists in the /proc filesystem. +// Error is returned when the stat on the /proc dir fail (permission issue). +func ProcessExists(pid int) (bool, error) { + procDir, err := os.Stat(fmt.Sprintf("/proc/%d", pid)) + if os.IsNotExist(err) { + return false, nil + } + if err != nil { + return false, err + } + if procDir.IsDir() { + return true, nil + } else { + return false, fmt.Errorf("unexpected error: /proc/%d is file, not directory", pid) + } +} + +// readlink is copied from the os.Readlink() but does not return error when the target path does not exists. +// This is used to read broken links as in case of share PID namespace, the /proc/1/exe points to a binary +// that does not exists from the source container. +func readlink(name string) string { + for l := 128; ; l *= 2 { + b := make([]byte, l) + n, _ := syscall.Readlink(name, b) + if n < 0 { + n = 0 + } + if n < l { + return string(b[0:n]) + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/watchdog/proc_test.go b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/proc_test.go new file mode 100644 index 000000000..47b9e4bbc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/watchdog/proc_test.go @@ -0,0 +1,96 @@ +package watchdog + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func TestReadLink(t *testing.T) { + tests := []struct { + name string + evalResult func(string, string, string, *testing.T) + preRun func(t *testing.T) (target, linkPath, dirName string) + postRun func(linkPath, dirName string, t *testing.T) + }{ + { + name: "target exists", + evalResult: func(target, link, result string, t *testing.T) { + if result != target { + t.Errorf("expected %q to match %q", result, target) + } + }, + preRun: func(t *testing.T) (string, string, string) { + tmpDir, err := ioutil.TempDir("", "existing") + if err != nil { + t.Fatalf("unable to create temp dir: %v", err) + } + if err := ioutil.WriteFile(filepath.Join(tmpDir, "testfile"), []byte{1}, os.ModePerm); err != nil { + t.Fatalf("unable to write file: %v", err) + } + if err := os.Symlink(filepath.Join(tmpDir, "testfile"), filepath.Join(tmpDir, "newfile")); err != nil { + t.Fatalf("unable to make symlink: %v", err) + } + return filepath.Join(tmpDir, "testfile"), filepath.Join(tmpDir, "newfile"), tmpDir + }, + postRun: func(linkPath, dirName string, t *testing.T) { + if err := os.RemoveAll(dirName); err != nil { + t.Fatalf("unable to remove %q: %v", dirName, err) + } + }, + }, + { + name: "target does not exists", + evalResult: func(target, link, result string, t *testing.T) { + if result != target { + t.Errorf("expected %q to match %q", result, target) + } + }, + preRun: func(t *testing.T) (string, string, string) { + tmpDir, err := ioutil.TempDir("", "broken") + if err != nil { + t.Fatalf("unable to create temp dir: %v", err) + } + if err := os.Symlink(filepath.Join(tmpDir, "testfile"), filepath.Join(tmpDir, "newfile")); err != nil { + t.Fatalf("unable to make symlink: %v", err) + } + return filepath.Join(tmpDir, "testfile"), filepath.Join(tmpDir, "newfile"), tmpDir + }, + postRun: func(linkPath, dirName string, t *testing.T) { + if err := os.RemoveAll(dirName); err != nil { + t.Fatalf("unable to remove %q: %v", dirName, err) + } + }, + }, + { + name: "source does not exists", + evalResult: func(target, link, result string, t *testing.T) { + if len(result) > 0 { + t.Errorf("expected result be empty, got: %q", result) + } + }, + preRun: func(t *testing.T) (string, string, string) { + tmpDir, err := ioutil.TempDir("", "broken-source") + if err != nil { + t.Fatalf("unable to create temp dir: %v", err) + } + return filepath.Join(tmpDir, "testfile"), filepath.Join(tmpDir, "newfile"), tmpDir + }, + postRun: func(linkPath, dirName string, t *testing.T) { + if err := os.RemoveAll(dirName); err != nil { + t.Fatalf("unable to remove %q: %v", dirName, err) + } + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + target, link, tempDir := test.preRun(t) + result := readlink(link) + test.evalResult(target, link, result, t) + test.postRun(link, tempDir, t) + }) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping.go b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping.go new file mode 100644 index 000000000..712f9b8bc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping.go @@ -0,0 +1,409 @@ +package clusterquotamapping + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + kapierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + corev1informers "k8s.io/client-go/informers/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + quotav1 "github.com/openshift/api/quota/v1" + quotainformer "github.com/openshift/client-go/quota/informers/externalversions/quota/v1" + quotalister "github.com/openshift/client-go/quota/listers/quota/v1" +) + +// Look out, here there be dragons! +// There is a race when dealing with the DeltaFifo compression used to back a reflector for a controller that uses two +// SharedInformers for both their watch events AND their caches. The scenario looks like this +// +// 1. Add, Delete a namespace really fast, *before* the add is observed by the controller using the reflector. +// 2. Add or Update a quota that matches the Add namespace +// 3. The cache had the intermediate state for the namespace for some period of time. This makes the quota update the mapping indicating a match. +// 4. The ns Delete is compressed out and never delivered to the controller, so the improper match is never cleared. +// +// This sounds pretty bad, however, we fail in the "safe" direction and the consequences are detectable. +// When going from quota to namespace, you can get back a namespace that doesn't exist. There are no resource in a non-existence +// namespace, so you know to clear all referenced resources. In addition, this add/delete has to happen so fast +// that it would be nearly impossible for any resources to be created. If you do create resources, then we must be observing +// their deletes. When quota is replenished, we'll see that we need to clear any charges. +// +// When going from namespace to quota, you can get back a quota that doesn't exist. Since the cache is shared, +// we know that a missing quota means that there isn't anything for us to bill against, so we can skip it. +// +// If the mapping cache is wrong and a previously deleted quota or namespace is created, this controller +// correctly adds the items back to the list and clears out all previous mappings. +// +// In addition to those constraints, the timing threshold for actually hitting this problem is really tight. It's +// basically a script that is creating and deleting things as fast as it possibly can. Sub-millisecond in the fuzz +// test where I caught the problem. + +// NewClusterQuotaMappingController builds a mapping between namespaces and clusterresourcequotas +func NewClusterQuotaMappingController(namespaceInformer corev1informers.NamespaceInformer, quotaInformer quotainformer.ClusterResourceQuotaInformer) *ClusterQuotaMappingController { + c := newClusterQuotaMappingController(namespaceInformer.Informer(), quotaInformer) + c.namespaceLister = v1NamespaceLister{lister: namespaceInformer.Lister()} + return c +} + +type namespaceLister interface { + Each(label labels.Selector, fn func(metav1.Object) bool) error + Get(name string) (metav1.Object, error) +} + +type v1NamespaceLister struct { + lister corev1listers.NamespaceLister +} + +func (l v1NamespaceLister) Each(label labels.Selector, fn func(metav1.Object) bool) error { + results, err := l.lister.List(label) + if err != nil { + return err + } + for i := range results { + if !fn(results[i]) { + return nil + } + } + return nil +} +func (l v1NamespaceLister) Get(name string) (metav1.Object, error) { + return l.lister.Get(name) +} + +func newClusterQuotaMappingController(namespaceInformer cache.SharedIndexInformer, quotaInformer quotainformer.ClusterResourceQuotaInformer) *ClusterQuotaMappingController { + c := &ClusterQuotaMappingController{ + namespaceQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "controller_clusterquotamappingcontroller_namespaces"), + quotaQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "controller_clusterquotamappingcontroller_clusterquotas"), + clusterQuotaMapper: NewClusterQuotaMapper(), + } + namespaceInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.addNamespace, + UpdateFunc: c.updateNamespace, + DeleteFunc: c.deleteNamespace, + }) + c.namespacesSynced = namespaceInformer.HasSynced + + quotaInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.addQuota, + UpdateFunc: c.updateQuota, + DeleteFunc: c.deleteQuota, + }) + c.quotaLister = quotaInformer.Lister() + c.quotasSynced = quotaInformer.Informer().HasSynced + + return c +} + +type ClusterQuotaMappingController struct { + namespaceQueue workqueue.RateLimitingInterface + namespaceLister namespaceLister + namespacesSynced func() bool + + quotaQueue workqueue.RateLimitingInterface + quotaLister quotalister.ClusterResourceQuotaLister + quotasSynced func() bool + + clusterQuotaMapper *clusterQuotaMapper +} + +func (c *ClusterQuotaMappingController) GetClusterQuotaMapper() ClusterQuotaMapper { + return c.clusterQuotaMapper +} + +func (c *ClusterQuotaMappingController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.namespaceQueue.ShutDown() + defer c.quotaQueue.ShutDown() + + klog.Infof("Starting ClusterQuotaMappingController controller") + defer klog.Infof("Shutting down ClusterQuotaMappingController controller") + + if !cache.WaitForCacheSync(stopCh, c.namespacesSynced, c.quotasSynced) { + utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) + return + } + + klog.V(4).Infof("Starting workers for quota mapping controller workers") + for i := 0; i < workers; i++ { + go wait.Until(c.namespaceWorker, time.Second, stopCh) + go wait.Until(c.quotaWorker, time.Second, stopCh) + } + + <-stopCh +} + +func (c *ClusterQuotaMappingController) syncQuota(quota *quotav1.ClusterResourceQuota) error { + matcherFunc, err := GetObjectMatcher(quota.Spec.Selector) + if err != nil { + return err + } + + if err := c.namespaceLister.Each(labels.Everything(), func(obj metav1.Object) bool { + // attempt to set the mapping. The quotas never collide with each other (same quota is never processed twice in parallel) + // so this means that the project we have is out of date, pull a more recent copy from the cache and retest + for { + matches, err := matcherFunc(obj) + if err != nil { + utilruntime.HandleError(err) + break + } + success, quotaMatches, _ := c.clusterQuotaMapper.setMapping(quota, obj, !matches) + if success { + break + } + + // if the quota is mismatched, then someone has updated the quota or has deleted the entry entirely. + // if we've been updated, we'll be rekicked, if we've been deleted we should stop. Either way, this + // execution is finished + if !quotaMatches { + return false + } + newer, err := c.namespaceLister.Get(obj.GetName()) + if kapierrors.IsNotFound(err) { + // if the namespace is gone, then the deleteNamespace path will be called, just continue + break + } + if err != nil { + utilruntime.HandleError(err) + break + } + obj = newer + } + return true + }); err != nil { + return err + } + + c.clusterQuotaMapper.completeQuota(quota) + return nil +} + +func (c *ClusterQuotaMappingController) syncNamespace(namespace metav1.Object) error { + allQuotas, err1 := c.quotaLister.List(labels.Everything()) + if err1 != nil { + return err1 + } + for i := range allQuotas { + quota := allQuotas[i] + + for { + matcherFunc, err := GetObjectMatcher(quota.Spec.Selector) + if err != nil { + utilruntime.HandleError(err) + break + } + + // attempt to set the mapping. The namespaces never collide with each other (same namespace is never processed twice in parallel) + // so this means that the quota we have is out of date, pull a more recent copy from the cache and retest + matches, err := matcherFunc(namespace) + if err != nil { + utilruntime.HandleError(err) + break + } + success, _, namespaceMatches := c.clusterQuotaMapper.setMapping(quota, namespace, !matches) + if success { + break + } + + // if the namespace is mismatched, then someone has updated the namespace or has deleted the entry entirely. + // if we've been updated, we'll be rekicked, if we've been deleted we should stop. Either way, this + // execution is finished + if !namespaceMatches { + return nil + } + + quota, err = c.quotaLister.Get(quota.Name) + if kapierrors.IsNotFound(err) { + // if the quota is gone, then the deleteQuota path will be called, just continue + break + } + if err != nil { + utilruntime.HandleError(err) + break + } + } + } + + c.clusterQuotaMapper.completeNamespace(namespace) + return nil +} + +func (c *ClusterQuotaMappingController) quotaWork() bool { + key, quit := c.quotaQueue.Get() + if quit { + return true + } + defer c.quotaQueue.Done(key) + + quota, err := c.quotaLister.Get(key.(string)) + if err != nil { + if errors.IsNotFound(err) { + c.quotaQueue.Forget(key) + return false + } + utilruntime.HandleError(err) + return false + } + + err = c.syncQuota(quota) + outOfRetries := c.quotaQueue.NumRequeues(key) > 5 + switch { + case err != nil && outOfRetries: + utilruntime.HandleError(err) + c.quotaQueue.Forget(key) + + case err != nil && !outOfRetries: + c.quotaQueue.AddRateLimited(key) + + default: + c.quotaQueue.Forget(key) + } + + return false +} + +func (c *ClusterQuotaMappingController) quotaWorker() { + for { + if quit := c.quotaWork(); quit { + return + } + } +} + +func (c *ClusterQuotaMappingController) namespaceWork() bool { + key, quit := c.namespaceQueue.Get() + if quit { + return true + } + defer c.namespaceQueue.Done(key) + + namespace, err := c.namespaceLister.Get(key.(string)) + if kapierrors.IsNotFound(err) { + c.namespaceQueue.Forget(key) + return false + } + if err != nil { + utilruntime.HandleError(err) + return false + } + + err = c.syncNamespace(namespace) + outOfRetries := c.namespaceQueue.NumRequeues(key) > 5 + switch { + case err != nil && outOfRetries: + utilruntime.HandleError(err) + c.namespaceQueue.Forget(key) + + case err != nil && !outOfRetries: + c.namespaceQueue.AddRateLimited(key) + + default: + c.namespaceQueue.Forget(key) + } + + return false +} + +func (c *ClusterQuotaMappingController) namespaceWorker() { + for { + if quit := c.namespaceWork(); quit { + return + } + } +} + +func (c *ClusterQuotaMappingController) deleteNamespace(obj interface{}) { + var name string + switch ns := obj.(type) { + case cache.DeletedFinalStateUnknown: + switch nested := ns.Obj.(type) { + case *corev1.Namespace: + name = nested.Name + default: + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Namespace %T", ns.Obj)) + return + } + case *corev1.Namespace: + name = ns.Name + default: + utilruntime.HandleError(fmt.Errorf("not a Namespace %v", obj)) + return + } + c.clusterQuotaMapper.removeNamespace(name) +} + +func (c *ClusterQuotaMappingController) addNamespace(cur interface{}) { + c.enqueueNamespace(cur) +} +func (c *ClusterQuotaMappingController) updateNamespace(old, cur interface{}) { + c.enqueueNamespace(cur) +} +func (c *ClusterQuotaMappingController) enqueueNamespace(obj interface{}) { + switch ns := obj.(type) { + case *corev1.Namespace: + if !c.clusterQuotaMapper.requireNamespace(ns) { + return + } + default: + utilruntime.HandleError(fmt.Errorf("not a Namespace %v", obj)) + return + } + + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + utilruntime.HandleError(err) + return + } + c.namespaceQueue.Add(key) +} + +func (c *ClusterQuotaMappingController) deleteQuota(obj interface{}) { + quota, ok1 := obj.(*quotav1.ClusterResourceQuota) + if !ok1 { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %v", obj)) + return + } + quota, ok = tombstone.Obj.(*quotav1.ClusterResourceQuota) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Quota %v", obj)) + return + } + } + + c.clusterQuotaMapper.removeQuota(quota.Name) +} + +func (c *ClusterQuotaMappingController) addQuota(cur interface{}) { + c.enqueueQuota(cur) +} +func (c *ClusterQuotaMappingController) updateQuota(old, cur interface{}) { + c.enqueueQuota(cur) +} +func (c *ClusterQuotaMappingController) enqueueQuota(obj interface{}) { + quota, ok := obj.(*quotav1.ClusterResourceQuota) + if !ok { + utilruntime.HandleError(fmt.Errorf("not a Quota %v", obj)) + return + } + if !c.clusterQuotaMapper.requireQuota(quota) { + return + } + + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(quota) + if err != nil { + utilruntime.HandleError(err) + return + } + c.quotaQueue.Add(key) +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping_test.go b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping_test.go new file mode 100644 index 000000000..cffcb10f6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/clusterquotamapping_test.go @@ -0,0 +1,319 @@ +package clusterquotamapping + +import ( + "fmt" + "math/rand" + "reflect" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/watch" + kexternalinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + clientgotesting "k8s.io/client-go/testing" + + quotav1 "github.com/openshift/api/quota/v1" + quotaclient "github.com/openshift/client-go/quota/clientset/versioned/fake" + quotainformer "github.com/openshift/client-go/quota/informers/externalversions" +) + +var ( + keys = []string{"different", "used", "important", "every", "large"} + values = []string{"time", "person"} + annotationKeys = []string{"different", "used", "important", "every", "large", "foo.bar.baz/key", "whitespace key"} + annotationValues = []string{"Person", "time and place", "Thing", "me@example.com", "system:admin"} + namespaceNames = []string{ + "tokillamockingbird", "harrypotter", "1984", "prideandprejudice", "thediaryofayounggirl", "animalfarm", "thehobbit", + "thelittleprince", "thegreatgatsby", "thecatcherintherye", "lordoftherings", "janeeyre", "romeoandjuliet", "thechroniclesofnarnia", + "lordoftheflies", "thegivingtree", "charlottesweb", "greeneggsandham", "alicesadventuresinwonderland", "littlewomen", + "ofmiceandmend", "wutheringheights", "thehungergames", "gonewiththewind", "thepictureofdoriangray", "theadventuresofhuckleberryfinn", + "fahrenheit451", "hamlet", "thehitchhikersguidetothegalaxy", "bravenewworld", "lesmiserables", "crimeandpunishment", "memoirsofageisha", + } + quotaNames = []string{"emma", "olivia", "sophia", "ava", "isabella", "mia", "abigail", "emily", "charlotte", "harper"} + + maxSelectorKeys = 2 + maxLabels = 5 +) + +func TestClusterQuotaFuzzer(t *testing.T) { + for j := 0; j < 100; j++ { + t.Logf("attempt %d", (j + 1)) + runFuzzer(t) + } +} + +func runFuzzer(t *testing.T) { + stopCh := make(chan struct{}) + defer close(stopCh) + + startingNamespaces := CreateStartingNamespaces() + kubeClient := fake.NewSimpleClientset(startingNamespaces...) + nsWatch := watch.NewFake() + kubeClient.PrependWatchReactor("namespaces", clientgotesting.DefaultWatchReactor(nsWatch, nil)) + + kubeInformerFactory := kexternalinformers.NewSharedInformerFactory(kubeClient, 10*time.Minute) + + startingQuotas := CreateStartingQuotas() + quotaWatch := watch.NewFake() + quotaClient := quotaclient.NewSimpleClientset(startingQuotas...) + quotaClient.PrependWatchReactor("clusterresourcequotas", clientgotesting.DefaultWatchReactor(quotaWatch, nil)) + quotaFactory := quotainformer.NewSharedInformerFactory(quotaClient, 0) + + controller := NewClusterQuotaMappingController(kubeInformerFactory.Core().V1().Namespaces(), quotaFactory.Quota().V1().ClusterResourceQuotas()) + go controller.Run(5, stopCh) + quotaFactory.Start(stopCh) + kubeInformerFactory.Start(stopCh) + + finalNamespaces := map[string]*corev1.Namespace{} + finalQuotas := map[string]*quotav1.ClusterResourceQuota{} + quotaActions := map[string][]string{} + namespaceActions := map[string][]string{} + finishedNamespaces := make(chan struct{}) + finishedQuotas := make(chan struct{}) + + for _, quota := range startingQuotas { + name := quota.(*quotav1.ClusterResourceQuota).Name + quotaActions[name] = append(quotaActions[name], fmt.Sprintf("inserting %v to %v", name, quota.(*quotav1.ClusterResourceQuota).Spec.Selector)) + finalQuotas[name] = quota.(*quotav1.ClusterResourceQuota) + } + for _, namespace := range startingNamespaces { + name := namespace.(*corev1.Namespace).Name + namespaceActions[name] = append(namespaceActions[name], fmt.Sprintf("inserting %v to %v", name, namespace.(*corev1.Namespace).Labels)) + finalNamespaces[name] = namespace.(*corev1.Namespace) + } + + go func() { + for i := 0; i < 200; i++ { + name := quotaNames[rand.Intn(len(quotaNames))] + _, exists := finalQuotas[name] + if rand.Intn(50) == 0 { + if !exists { + continue + } + // due to the compression race (see big comment for impl), clear the queue then delete + for { + if len(quotaWatch.ResultChan()) == 0 { + break + } + time.Sleep(10 * time.Millisecond) + } + + quotaActions[name] = append(quotaActions[name], "deleting "+name) + quotaWatch.Delete(finalQuotas[name]) + delete(finalQuotas, name) + continue + } + + quota := NewQuota(name) + finalQuotas[name] = quota + copied := quota.DeepCopy() + if exists { + quotaActions[name] = append(quotaActions[name], fmt.Sprintf("updating %v to %v", name, quota.Spec.Selector)) + quotaWatch.Modify(copied) + } else { + quotaActions[name] = append(quotaActions[name], fmt.Sprintf("adding %v to %v", name, quota.Spec.Selector)) + quotaWatch.Add(copied) + } + } + close(finishedQuotas) + }() + + go func() { + for i := 0; i < 200; i++ { + name := namespaceNames[rand.Intn(len(namespaceNames))] + _, exists := finalNamespaces[name] + if rand.Intn(50) == 0 { + if !exists { + continue + } + // due to the compression race (see big comment for impl), clear the queue then delete + for { + if len(nsWatch.ResultChan()) == 0 { + break + } + time.Sleep(10 * time.Millisecond) + } + + namespaceActions[name] = append(namespaceActions[name], "deleting "+name) + nsWatch.Delete(finalNamespaces[name]) + delete(finalNamespaces, name) + continue + } + + ns := NewNamespace(name) + finalNamespaces[name] = ns + copied := ns.DeepCopy() + if exists { + namespaceActions[name] = append(namespaceActions[name], fmt.Sprintf("updating %v to %v", name, ns.Labels)) + nsWatch.Modify(copied) + } else { + namespaceActions[name] = append(namespaceActions[name], fmt.Sprintf("adding %v to %v", name, ns.Labels)) + nsWatch.Add(copied) + } + } + close(finishedNamespaces) + }() + + <-finishedQuotas + <-finishedNamespaces + + finalFailures := []string{} + for i := 0; i < 200; i++ { + // better suggestions for testing doneness? Check the condition a few times? + time.Sleep(50 * time.Millisecond) + + finalFailures = checkState(controller, finalNamespaces, finalQuotas, t, quotaActions, namespaceActions) + if len(finalFailures) == 0 { + break + } + } + + if len(finalFailures) > 0 { + t.Logf("have %d quotas and %d namespaces", len(quotaWatch.ResultChan()), len(nsWatch.ResultChan())) + t.Fatalf("failed on \n%v", strings.Join(finalFailures, "\n")) + } +} + +func checkState(controller *ClusterQuotaMappingController, finalNamespaces map[string]*corev1.Namespace, finalQuotas map[string]*quotav1.ClusterResourceQuota, t *testing.T, quotaActions, namespaceActions map[string][]string) []string { + failures := []string{} + + quotaToNamespaces := map[string]sets.String{} + for _, quotaName := range quotaNames { + quotaToNamespaces[quotaName] = sets.String{} + } + namespacesToQuota := map[string]sets.String{} + for _, namespaceName := range namespaceNames { + namespacesToQuota[namespaceName] = sets.String{} + } + for _, quota := range finalQuotas { + matcherFunc, err := GetMatcher(quota.Spec.Selector) + if err != nil { + t.Fatal(err) + } + for _, namespace := range finalNamespaces { + if matches, _ := matcherFunc(namespace); matches { + quotaToNamespaces[quota.Name].Insert(namespace.Name) + namespacesToQuota[namespace.Name].Insert(quota.Name) + } + } + } + + for _, quotaName := range quotaNames { + namespaces, selector := controller.clusterQuotaMapper.GetNamespacesFor(quotaName) + nsSet := sets.NewString(namespaces...) + if !nsSet.Equal(quotaToNamespaces[quotaName]) { + failures = append(failures, fmt.Sprintf("quota %v, expected %v, got %v", quotaName, quotaToNamespaces[quotaName].List(), nsSet.List())) + failures = append(failures, quotaActions[quotaName]...) + } + if quota, ok := finalQuotas[quotaName]; ok && !reflect.DeepEqual(quota.Spec.Selector, selector) { + failures = append(failures, fmt.Sprintf("quota %v, expected %v, got %v", quotaName, quota.Spec.Selector, selector)) + } + } + + for _, namespaceName := range namespaceNames { + quotas, selectionFields := controller.clusterQuotaMapper.GetClusterQuotasFor(namespaceName) + quotaSet := sets.NewString(quotas...) + if !quotaSet.Equal(namespacesToQuota[namespaceName]) { + failures = append(failures, fmt.Sprintf("namespace %v, expected %v, got %v", namespaceName, namespacesToQuota[namespaceName].List(), quotaSet.List())) + failures = append(failures, namespaceActions[namespaceName]...) + } + if namespace, ok := finalNamespaces[namespaceName]; ok && !reflect.DeepEqual(GetSelectionFields(namespace), selectionFields) { + failures = append(failures, fmt.Sprintf("namespace %v, expected %v, got %v", namespaceName, GetSelectionFields(namespace), selectionFields)) + } + } + + return failures +} + +func CreateStartingQuotas() []runtime.Object { + count := rand.Intn(len(quotaNames)) + used := sets.String{} + ret := []runtime.Object{} + + for i := 0; i < count; i++ { + name := quotaNames[rand.Intn(len(quotaNames))] + if !used.Has(name) { + ret = append(ret, NewQuota(name)) + used.Insert(name) + } + } + + return ret +} + +func CreateStartingNamespaces() []runtime.Object { + count := rand.Intn(len(namespaceNames)) + used := sets.String{} + ret := []runtime.Object{} + + for i := 0; i < count; i++ { + name := namespaceNames[rand.Intn(len(namespaceNames))] + if !used.Has(name) { + ret = append(ret, NewNamespace(name)) + used.Insert(name) + } + } + + return ret +} + +func NewQuota(name string) *quotav1.ClusterResourceQuota { + ret := "av1.ClusterResourceQuota{} + ret.Name = name + + numSelectorKeys := rand.Intn(maxSelectorKeys) + 1 + if numSelectorKeys == 0 { + return ret + } + + ret.Spec.Selector.LabelSelector = &metav1.LabelSelector{MatchLabels: map[string]string{}} + for i := 0; i < numSelectorKeys; i++ { + key := keys[rand.Intn(len(keys))] + value := values[rand.Intn(len(values))] + + ret.Spec.Selector.LabelSelector.MatchLabels[key] = value + } + + ret.Spec.Selector.AnnotationSelector = map[string]string{} + for i := 0; i < numSelectorKeys; i++ { + key := annotationKeys[rand.Intn(len(annotationKeys))] + value := annotationValues[rand.Intn(len(annotationValues))] + + ret.Spec.Selector.AnnotationSelector[key] = value + } + + return ret +} + +func NewNamespace(name string) *corev1.Namespace { + ret := &corev1.Namespace{} + ret.Name = name + + numLabels := rand.Intn(maxLabels) + 1 + if numLabels == 0 { + return ret + } + + ret.Labels = map[string]string{} + for i := 0; i < numLabels; i++ { + key := keys[rand.Intn(len(keys))] + value := values[rand.Intn(len(values))] + + ret.Labels[key] = value + } + + ret.Annotations = map[string]string{} + for i := 0; i < numLabels; i++ { + key := annotationKeys[rand.Intn(len(annotationKeys))] + value := annotationValues[rand.Intn(len(annotationValues))] + + ret.Annotations[key] = value + } + + return ret +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/helpers.go b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/helpers.go new file mode 100644 index 000000000..0c2c2ae7a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/helpers.go @@ -0,0 +1,139 @@ +package clusterquotamapping + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + + quotav1 "github.com/openshift/api/quota/v1" +) + +func GetResourceQuotasStatusByNamespace(namespaceStatuses quotav1.ResourceQuotasStatusByNamespace, namespace string) (corev1.ResourceQuotaStatus, bool) { + for i := range namespaceStatuses { + curr := namespaceStatuses[i] + if curr.Namespace == namespace { + return curr.Status, true + } + } + return corev1.ResourceQuotaStatus{}, false +} + +func RemoveResourceQuotasStatusByNamespace(namespaceStatuses *quotav1.ResourceQuotasStatusByNamespace, namespace string) { + newNamespaceStatuses := quotav1.ResourceQuotasStatusByNamespace{} + for i := range *namespaceStatuses { + curr := (*namespaceStatuses)[i] + if curr.Namespace == namespace { + continue + } + newNamespaceStatuses = append(newNamespaceStatuses, curr) + } + *namespaceStatuses = newNamespaceStatuses +} + +func InsertResourceQuotasStatus(namespaceStatuses *quotav1.ResourceQuotasStatusByNamespace, newStatus quotav1.ResourceQuotaStatusByNamespace) { + newNamespaceStatuses := quotav1.ResourceQuotasStatusByNamespace{} + found := false + for i := range *namespaceStatuses { + curr := (*namespaceStatuses)[i] + if curr.Namespace == newStatus.Namespace { + // do this so that we don't change serialization order + newNamespaceStatuses = append(newNamespaceStatuses, newStatus) + found = true + continue + } + newNamespaceStatuses = append(newNamespaceStatuses, curr) + } + if !found { + newNamespaceStatuses = append(newNamespaceStatuses, newStatus) + } + *namespaceStatuses = newNamespaceStatuses +} + +var accessor = meta.NewAccessor() + +func GetMatcher(selector quotav1.ClusterResourceQuotaSelector) (func(obj runtime.Object) (bool, error), error) { + var labelSelector labels.Selector + if selector.LabelSelector != nil { + var err error + labelSelector, err = metav1.LabelSelectorAsSelector(selector.LabelSelector) + if err != nil { + return nil, err + } + } + + var annotationSelector map[string]string + if len(selector.AnnotationSelector) > 0 { + // ensure our matcher has a stable copy of the map + annotationSelector = make(map[string]string, len(selector.AnnotationSelector)) + for k, v := range selector.AnnotationSelector { + annotationSelector[k] = v + } + } + + return func(obj runtime.Object) (bool, error) { + if labelSelector != nil { + objLabels, err := accessor.Labels(obj) + if err != nil { + return false, err + } + if !labelSelector.Matches(labels.Set(objLabels)) { + return false, nil + } + } + + if annotationSelector != nil { + objAnnotations, err := accessor.Annotations(obj) + if err != nil { + return false, err + } + for k, v := range annotationSelector { + if objValue, exists := objAnnotations[k]; !exists || objValue != v { + return false, nil + } + } + } + + return true, nil + }, nil +} + +func GetObjectMatcher(selector quotav1.ClusterResourceQuotaSelector) (func(obj metav1.Object) (bool, error), error) { + var labelSelector labels.Selector + if selector.LabelSelector != nil { + var err error + labelSelector, err = metav1.LabelSelectorAsSelector(selector.LabelSelector) + if err != nil { + return nil, err + } + } + + var annotationSelector map[string]string + if len(selector.AnnotationSelector) > 0 { + // ensure our matcher has a stable copy of the map + annotationSelector = make(map[string]string, len(selector.AnnotationSelector)) + for k, v := range selector.AnnotationSelector { + annotationSelector[k] = v + } + } + + return func(obj metav1.Object) (bool, error) { + if labelSelector != nil { + if !labelSelector.Matches(labels.Set(obj.GetLabels())) { + return false, nil + } + } + + if annotationSelector != nil { + objAnnotations := obj.GetAnnotations() + for k, v := range annotationSelector { + if objValue, exists := objAnnotations[k]; !exists || objValue != v { + return false, nil + } + } + } + + return true, nil + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/mapper.go b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/mapper.go new file mode 100644 index 000000000..e8d66c4fa --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/clusterquotamapping/mapper.go @@ -0,0 +1,289 @@ +package clusterquotamapping + +import ( + "reflect" + "sync" + + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + + quotav1 "github.com/openshift/api/quota/v1" +) + +type ClusterQuotaMapper interface { + // GetClusterQuotasFor returns the list of clusterquota names that this namespace matches. It also + // returns the selectionFields associated with the namespace for the check so that callers can determine staleness + GetClusterQuotasFor(namespaceName string) ([]string, SelectionFields) + // GetNamespacesFor returns the list of namespace names that this cluster quota matches. It also + // returns the selector associated with the clusterquota for the check so that callers can determine staleness + GetNamespacesFor(quotaName string) ([]string, quotav1.ClusterResourceQuotaSelector) + + AddListener(listener MappingChangeListener) +} + +// MappingChangeListener is notified of changes to the mapping. It must not block. +type MappingChangeListener interface { + AddMapping(quotaName, namespaceName string) + RemoveMapping(quotaName, namespaceName string) +} + +type SelectionFields struct { + Labels map[string]string + Annotations map[string]string +} + +// clusterQuotaMapper gives thread safe access to the actual mappings that are being stored. +// Many method use a shareable read lock to check status followed by a non-shareable +// write lock which double checks the condition before proceeding. Since locks aren't escalatable +// you have to perform the recheck because someone could have beaten you in. +type clusterQuotaMapper struct { + lock sync.RWMutex + + // requiredQuotaToSelector indicates the latest label selector this controller has observed for a quota + requiredQuotaToSelector map[string]quotav1.ClusterResourceQuotaSelector + // requiredNamespaceToLabels indicates the latest selectionFields this controller has observed for a namespace + requiredNamespaceToLabels map[string]SelectionFields + // completedQuotaToSelector indicates the latest label selector this controller has scanned against namespaces + completedQuotaToSelector map[string]quotav1.ClusterResourceQuotaSelector + // completedNamespaceToLabels indicates the latest selectionFields this controller has scanned against cluster quotas + completedNamespaceToLabels map[string]SelectionFields + + quotaToNamespaces map[string]sets.String + namespaceToQuota map[string]sets.String + + listeners []MappingChangeListener +} + +func NewClusterQuotaMapper() *clusterQuotaMapper { + return &clusterQuotaMapper{ + requiredQuotaToSelector: map[string]quotav1.ClusterResourceQuotaSelector{}, + requiredNamespaceToLabels: map[string]SelectionFields{}, + completedQuotaToSelector: map[string]quotav1.ClusterResourceQuotaSelector{}, + completedNamespaceToLabels: map[string]SelectionFields{}, + + quotaToNamespaces: map[string]sets.String{}, + namespaceToQuota: map[string]sets.String{}, + } +} + +func (m *clusterQuotaMapper) GetClusterQuotasFor(namespaceName string) ([]string, SelectionFields) { + m.lock.RLock() + defer m.lock.RUnlock() + + quotas, ok := m.namespaceToQuota[namespaceName] + if !ok { + return []string{}, m.completedNamespaceToLabels[namespaceName] + } + return quotas.List(), m.completedNamespaceToLabels[namespaceName] +} + +func (m *clusterQuotaMapper) GetNamespacesFor(quotaName string) ([]string, quotav1.ClusterResourceQuotaSelector) { + m.lock.RLock() + defer m.lock.RUnlock() + + namespaces, ok := m.quotaToNamespaces[quotaName] + if !ok { + return []string{}, m.completedQuotaToSelector[quotaName] + } + return namespaces.List(), m.completedQuotaToSelector[quotaName] +} + +func (m *clusterQuotaMapper) AddListener(listener MappingChangeListener) { + m.lock.Lock() + defer m.lock.Unlock() + + m.listeners = append(m.listeners, listener) +} + +// requireQuota updates the selector requirements for the given quota. This prevents stale updates to the mapping itself. +// returns true if a modification was made +func (m *clusterQuotaMapper) requireQuota(quota *quotav1.ClusterResourceQuota) bool { + m.lock.RLock() + selector, exists := m.requiredQuotaToSelector[quota.Name] + m.lock.RUnlock() + + if selectorMatches(selector, exists, quota) { + return false + } + + m.lock.Lock() + defer m.lock.Unlock() + selector, exists = m.requiredQuotaToSelector[quota.Name] + if selectorMatches(selector, exists, quota) { + return false + } + + m.requiredQuotaToSelector[quota.Name] = quota.Spec.Selector + return true +} + +// completeQuota updates the latest selector used to generate the mappings for this quota. The value is returned +// by the Get methods for the mapping so that callers can determine staleness +func (m *clusterQuotaMapper) completeQuota(quota *quotav1.ClusterResourceQuota) { + m.lock.Lock() + defer m.lock.Unlock() + m.completedQuotaToSelector[quota.Name] = quota.Spec.Selector +} + +// removeQuota deletes a quota from all mappings +func (m *clusterQuotaMapper) removeQuota(quotaName string) { + m.lock.Lock() + defer m.lock.Unlock() + + delete(m.requiredQuotaToSelector, quotaName) + delete(m.completedQuotaToSelector, quotaName) + delete(m.quotaToNamespaces, quotaName) + for namespaceName, quotas := range m.namespaceToQuota { + if quotas.Has(quotaName) { + quotas.Delete(quotaName) + for _, listener := range m.listeners { + listener.RemoveMapping(quotaName, namespaceName) + } + } + } +} + +// requireNamespace updates the label requirements for the given namespace. This prevents stale updates to the mapping itself. +// returns true if a modification was made +func (m *clusterQuotaMapper) requireNamespace(namespace metav1.Object) bool { + m.lock.RLock() + selectionFields, exists := m.requiredNamespaceToLabels[namespace.GetName()] + m.lock.RUnlock() + + if selectionFieldsMatch(selectionFields, exists, namespace) { + return false + } + + m.lock.Lock() + defer m.lock.Unlock() + selectionFields, exists = m.requiredNamespaceToLabels[namespace.GetName()] + if selectionFieldsMatch(selectionFields, exists, namespace) { + return false + } + + m.requiredNamespaceToLabels[namespace.GetName()] = GetSelectionFields(namespace) + return true +} + +// completeNamespace updates the latest selectionFields used to generate the mappings for this namespace. The value is returned +// by the Get methods for the mapping so that callers can determine staleness +func (m *clusterQuotaMapper) completeNamespace(namespace metav1.Object) { + m.lock.Lock() + defer m.lock.Unlock() + m.completedNamespaceToLabels[namespace.GetName()] = GetSelectionFields(namespace) +} + +// removeNamespace deletes a namespace from all mappings +func (m *clusterQuotaMapper) removeNamespace(namespaceName string) { + m.lock.Lock() + defer m.lock.Unlock() + + delete(m.requiredNamespaceToLabels, namespaceName) + delete(m.completedNamespaceToLabels, namespaceName) + delete(m.namespaceToQuota, namespaceName) + for quotaName, namespaces := range m.quotaToNamespaces { + if namespaces.Has(namespaceName) { + namespaces.Delete(namespaceName) + for _, listener := range m.listeners { + listener.RemoveMapping(quotaName, namespaceName) + } + } + } +} + +func selectorMatches(selector quotav1.ClusterResourceQuotaSelector, exists bool, quota *quotav1.ClusterResourceQuota) bool { + return exists && equality.Semantic.DeepEqual(selector, quota.Spec.Selector) +} +func selectionFieldsMatch(selectionFields SelectionFields, exists bool, namespace metav1.Object) bool { + return exists && reflect.DeepEqual(selectionFields, GetSelectionFields(namespace)) +} + +// setMapping maps (or removes a mapping) between a clusterquota and a namespace +// It returns whether the action worked, whether the quota is out of date, whether the namespace is out of date +// This allows callers to decide whether to pull new information from the cache or simply skip execution +func (m *clusterQuotaMapper) setMapping(quota *quotav1.ClusterResourceQuota, namespace metav1.Object, remove bool) (bool /*added*/, bool /*quota matches*/, bool /*namespace matches*/) { + m.lock.RLock() + selector, selectorExists := m.requiredQuotaToSelector[quota.Name] + selectionFields, selectionFieldsExist := m.requiredNamespaceToLabels[namespace.GetName()] + m.lock.RUnlock() + + if !selectorMatches(selector, selectorExists, quota) { + return false, false, selectionFieldsMatch(selectionFields, selectionFieldsExist, namespace) + } + if !selectionFieldsMatch(selectionFields, selectionFieldsExist, namespace) { + return false, true, false + } + + m.lock.Lock() + defer m.lock.Unlock() + selector, selectorExists = m.requiredQuotaToSelector[quota.Name] + selectionFields, selectionFieldsExist = m.requiredNamespaceToLabels[namespace.GetName()] + if !selectorMatches(selector, selectorExists, quota) { + return false, false, selectionFieldsMatch(selectionFields, selectionFieldsExist, namespace) + } + if !selectionFieldsMatch(selectionFields, selectionFieldsExist, namespace) { + return false, true, false + } + + if remove { + mutated := false + + namespaces, ok := m.quotaToNamespaces[quota.Name] + if !ok { + m.quotaToNamespaces[quota.Name] = sets.String{} + } else { + mutated = namespaces.Has(namespace.GetName()) + namespaces.Delete(namespace.GetName()) + } + + quotas, ok := m.namespaceToQuota[namespace.GetName()] + if !ok { + m.namespaceToQuota[namespace.GetName()] = sets.String{} + } else { + mutated = mutated || quotas.Has(quota.Name) + quotas.Delete(quota.Name) + } + + if mutated { + for _, listener := range m.listeners { + listener.RemoveMapping(quota.Name, namespace.GetName()) + } + } + + return true, true, true + } + + mutated := false + + namespaces, ok := m.quotaToNamespaces[quota.Name] + if !ok { + mutated = true + m.quotaToNamespaces[quota.Name] = sets.NewString(namespace.GetName()) + } else { + mutated = !namespaces.Has(namespace.GetName()) + namespaces.Insert(namespace.GetName()) + } + + quotas, ok := m.namespaceToQuota[namespace.GetName()] + if !ok { + mutated = true + m.namespaceToQuota[namespace.GetName()] = sets.NewString(quota.Name) + } else { + mutated = mutated || !quotas.Has(quota.Name) + quotas.Insert(quota.Name) + } + + if mutated { + for _, listener := range m.listeners { + listener.AddMapping(quota.Name, namespace.GetName()) + } + } + + return true, true, true + +} + +func GetSelectionFields(namespace metav1.Object) SelectionFields { + return SelectionFields{Labels: namespace.GetLabels(), Annotations: namespace.GetAnnotations()} +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/quotautil/error.go b/vendor/github.com/openshift/library-go/pkg/quota/quotautil/error.go new file mode 100644 index 000000000..14faf6bc0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/quotautil/error.go @@ -0,0 +1,42 @@ +package quotautil + +import ( + "strings" + + apierrs "k8s.io/apimachinery/pkg/api/errors" +) + +// errMessageString is a part of error message copied from quotaAdmission.Admit() method in +// k8s.io/kubernetes/plugin/pkg/admission/resourcequota/admission.go module +const errQuotaMessageString = `exceeded quota:` +const errQuotaUnknownMessageString = `status unknown for quota:` +const errLimitsMessageString = `exceeds the maximum limit` + +// IsErrorQuotaExceeded returns true if the given error stands for a denied request caused by detected quota +// abuse. +func IsErrorQuotaExceeded(err error) bool { + if isForbidden := apierrs.IsForbidden(err); isForbidden || apierrs.IsInvalid(err) { + lowered := strings.ToLower(err.Error()) + // the limit error message can be accompanied only by Invalid reason + if strings.Contains(lowered, errLimitsMessageString) { + return true + } + // the quota error message can be accompanied only by Forbidden reason + if isForbidden && (strings.Contains(lowered, errQuotaMessageString) || strings.Contains(lowered, errQuotaUnknownMessageString)) { + return true + } + } + return false +} + +// IsErrorLimitExceeded returns true if the given error is a limit error. +func IsErrorLimitExceeded(err error) bool { + if isForbidden := apierrs.IsForbidden(err); isForbidden || apierrs.IsInvalid(err) { + lowered := strings.ToLower(err.Error()) + // the limit error message can be accompanied only by Invalid reason + if strings.Contains(lowered, errLimitsMessageString) { + return true + } + } + return false +} diff --git a/vendor/github.com/openshift/library-go/pkg/quota/quotautil/helpers.go b/vendor/github.com/openshift/library-go/pkg/quota/quotautil/helpers.go new file mode 100644 index 000000000..a6bfc6269 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/quota/quotautil/helpers.go @@ -0,0 +1,48 @@ +package quotautil + +import ( + corev1 "k8s.io/api/core/v1" + + quotav1 "github.com/openshift/api/quota/v1" +) + +func GetResourceQuotasStatusByNamespace(namespaceStatuses quotav1.ResourceQuotasStatusByNamespace, namespace string) (corev1.ResourceQuotaStatus, bool) { + for i := range namespaceStatuses { + curr := namespaceStatuses[i] + if curr.Namespace == namespace { + return curr.Status, true + } + } + return corev1.ResourceQuotaStatus{}, false +} + +func RemoveResourceQuotasStatusByNamespace(namespaceStatuses *quotav1.ResourceQuotasStatusByNamespace, namespace string) { + newNamespaceStatuses := quotav1.ResourceQuotasStatusByNamespace{} + for i := range *namespaceStatuses { + curr := (*namespaceStatuses)[i] + if curr.Namespace == namespace { + continue + } + newNamespaceStatuses = append(newNamespaceStatuses, curr) + } + *namespaceStatuses = newNamespaceStatuses +} + +func InsertResourceQuotasStatus(namespaceStatuses *quotav1.ResourceQuotasStatusByNamespace, newStatus quotav1.ResourceQuotaStatusByNamespace) { + newNamespaceStatuses := quotav1.ResourceQuotasStatusByNamespace{} + found := false + for i := range *namespaceStatuses { + curr := (*namespaceStatuses)[i] + if curr.Namespace == newStatus.Namespace { + // do this so that we don't change serialization order + newNamespaceStatuses = append(newNamespaceStatuses, newStatus) + found = true + continue + } + newNamespaceStatuses = append(newNamespaceStatuses, curr) + } + if !found { + newNamespaceStatuses = append(newNamespaceStatuses, newStatus) + } + *namespaceStatuses = newNamespaceStatuses +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldapclient/client.go b/vendor/github.com/openshift/library-go/pkg/security/ldapclient/client.go new file mode 100644 index 000000000..81c9b5021 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldapclient/client.go @@ -0,0 +1,119 @@ +package ldapclient + +import ( + "crypto/tls" + "fmt" + "net" + + "github.com/openshift/library-go/pkg/security/ldaputil" + "k8s.io/client-go/util/cert" + + "gopkg.in/ldap.v2" +) + +// NewLDAPClientConfig returns a new LDAP client config +func NewLDAPClientConfig(URL, bindDN, bindPassword, CA string, insecure bool) (Config, error) { + url, err := ldaputil.ParseURL(URL) + if err != nil { + return nil, fmt.Errorf("Error parsing URL: %v", err) + } + + tlsConfig := &tls.Config{} + if len(CA) > 0 { + roots, err := cert.NewPool(CA) + if err != nil { + return nil, fmt.Errorf("error loading cert pool from ca file %s: %v", CA, err) + } + tlsConfig.RootCAs = roots + } + + return &ldapClientConfig{ + scheme: url.Scheme, + host: url.Host, + bindDN: bindDN, + bindPassword: bindPassword, + insecure: insecure, + tlsConfig: tlsConfig, + }, nil +} + +// ldapClientConfig holds information for connecting to an LDAP server +type ldapClientConfig struct { + // scheme is the LDAP connection scheme, either ldap or ldaps + scheme ldaputil.Scheme + // host is the host:port of the LDAP server + host string + // bindDN is an optional DN to bind with during the search phase. + bindDN string + // bindPassword is an optional password to bind with during the search phase. + bindPassword string + // insecure specifies if TLS is required for the connection. If true, either an ldap://... URL or + // StartTLS must be supported by the server + insecure bool + // tlsConfig holds the TLS options. Only used when insecure=false + tlsConfig *tls.Config +} + +// ldapClientConfig is an Config +var _ Config = &ldapClientConfig{} + +// Connect returns an established LDAP connection, or an error if the connection could not +// be made (or successfully upgraded to TLS). If no error is returned, the caller is responsible for +// closing the connection +func (l *ldapClientConfig) Connect() (ldap.Client, error) { + tlsConfig := l.tlsConfig + + // Ensure tlsConfig specifies the server we're connecting to + if tlsConfig != nil && !tlsConfig.InsecureSkipVerify && len(tlsConfig.ServerName) == 0 { + // Add to a copy of the tlsConfig to avoid mutating the original + c := tlsConfig.Clone() + if host, _, err := net.SplitHostPort(l.host); err == nil { + c.ServerName = host + } else { + c.ServerName = l.host + } + tlsConfig = c + } + + switch l.scheme { + case ldaputil.SchemeLDAP: + con, err := ldap.Dial("tcp", l.host) + if err != nil { + return nil, err + } + + // If an insecure connection is desired, we're done + if l.insecure { + return con, nil + } + + // Attempt to upgrade to TLS + if err := con.StartTLS(tlsConfig); err != nil { + // We're returning an error on a successfully opened connection + // We are responsible for closing the open connection + con.Close() + return nil, err + } + + return con, nil + + case ldaputil.SchemeLDAPS: + return ldap.DialTLS("tcp", l.host, tlsConfig) + + default: + return nil, fmt.Errorf("unsupported scheme %q", l.scheme) + } +} + +func (l *ldapClientConfig) GetBindCredentials() (string, string) { + return l.bindDN, l.bindPassword +} + +func (l *ldapClientConfig) Host() string { + return l.host +} + +// String implements Stringer for debugging purposes +func (l *ldapClientConfig) String() string { + return fmt.Sprintf("{Scheme: %v Host: %v BindDN: %v len(BbindPassword): %v Insecure: %v}", l.scheme, l.host, l.bindDN, len(l.bindPassword), l.insecure) +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldapclient/interfaces.go b/vendor/github.com/openshift/library-go/pkg/security/ldapclient/interfaces.go new file mode 100644 index 000000000..0c4efa238 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldapclient/interfaces.go @@ -0,0 +1,10 @@ +package ldapclient + +import "gopkg.in/ldap.v2" + +// Config knows how to connect to an LDAP server and can describe which server it is connecting to +type Config interface { + Connect() (client ldap.Client, err error) + GetBindCredentials() (bindDN, bindPassword string) + Host() string +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldapquery/errors.go b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/errors.go new file mode 100644 index 000000000..cbc946f14 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/errors.go @@ -0,0 +1,81 @@ +package ldapquery + +import ( + "fmt" + + "gopkg.in/ldap.v2" +) + +func NewNoSuchObjectError(baseDN string) error { + return &errNoSuchObject{baseDN: baseDN} +} + +// errNoSuchObject is an error that occurs when a base DN for a search refers to an object that does not exist +type errNoSuchObject struct { + baseDN string +} + +// Error returns the error string for the invalid base DN query error +func (e *errNoSuchObject) Error() string { + return fmt.Sprintf("search for entry with base dn=%q refers to a non-existent entry", e.baseDN) +} + +// IsNoSuchObjectError determines if the error is a NoSuchObjectError or if it is the upstream version of the error +// If this returns true, you are *not* safe to cast the error to a NoSuchObjectError +func IsNoSuchObjectError(err error) bool { + if err == nil { + return false + } + + _, ok := err.(*errNoSuchObject) + return ok || ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) +} + +func NewEntryNotFoundError(baseDN, filter string) error { + return &errEntryNotFound{baseDN: baseDN, filter: filter} +} + +// errEntryNotFound is an error that occurs when trying to find a specific entry fails. +type errEntryNotFound struct { + baseDN string + filter string +} + +// Error returns the error string for the entry not found error +func (e *errEntryNotFound) Error() string { + return fmt.Sprintf("search for entry with base dn=%q and filter %q did not return any results", e.baseDN, e.filter) +} + +func IsEntryNotFoundError(err error) bool { + if err == nil { + return false + } + + _, ok := err.(*errEntryNotFound) + return ok +} + +func NewQueryOutOfBoundsError(queryDN, baseDN string) error { + return &errQueryOutOfBounds{baseDN: baseDN, queryDN: queryDN} +} + +// errQueryOutOfBounds is an error that occurs when trying to search by DN for an entry that exists +// outside of the tree specified with the BaseDN for search. +type errQueryOutOfBounds struct { + baseDN string + queryDN string +} + +// Error returns the error string for the out-of-bounds query +func (q *errQueryOutOfBounds) Error() string { + return fmt.Sprintf("search for entry with dn=%q would search outside of the base dn specified (dn=%q)", q.queryDN, q.baseDN) +} + +func IsQueryOutOfBoundsError(err error) bool { + if err == nil { + return false + } + + _, ok := err.(*errQueryOutOfBounds) + return ok +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldapquery/query.go b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/query.go new file mode 100644 index 000000000..5e2e57fab --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/query.go @@ -0,0 +1,248 @@ +package ldapquery + +import ( + "fmt" + "strings" + + "gopkg.in/ldap.v2" + "k8s.io/klog" + + "github.com/openshift/library-go/pkg/security/ldapclient" + "github.com/openshift/library-go/pkg/security/ldaputil" +) + +// NewLDAPQuery converts a user-provided LDAPQuery into a version we can use +func NewLDAPQuery(config SerializeableLDAPQuery) (LDAPQuery, error) { + scope, err := ldaputil.DetermineLDAPScope(config.Scope) + if err != nil { + return LDAPQuery{}, err + } + + derefAliases, err := ldaputil.DetermineDerefAliasesBehavior(config.DerefAliases) + if err != nil { + return LDAPQuery{}, err + } + + return LDAPQuery{ + BaseDN: config.BaseDN, + Scope: scope, + DerefAliases: derefAliases, + TimeLimit: config.TimeLimit, + Filter: config.Filter, + PageSize: config.PageSize, + }, nil +} + +// LDAPQuery encodes an LDAP query +type LDAPQuery struct { + // The DN of the branch of the directory where all searches should start from + BaseDN string + + // The (optional) scope of the search. Defaults to the entire subtree if not set + Scope ldaputil.Scope + + // The (optional) behavior of the search with regards to alisases. Defaults to always + // dereferencing if not set + DerefAliases ldaputil.DerefAliases + + // TimeLimit holds the limit of time in seconds that any request to the server can remain outstanding + // before the wait for a response is given up. If this is 0, no client-side limit is imposed + TimeLimit int + + // Filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN + Filter string + + // PageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done. + PageSize int +} + +// NewSearchRequest creates a new search request for the LDAP query and optionally includes more attributes +func (q *LDAPQuery) NewSearchRequest(additionalAttributes []string) *ldap.SearchRequest { + var controls []ldap.Control + if q.PageSize > 0 { + controls = append(controls, ldap.NewControlPaging(uint32(q.PageSize))) + } + return ldap.NewSearchRequest( + q.BaseDN, + int(q.Scope), + int(q.DerefAliases), + 0, // allowed return size - indicates no limit + q.TimeLimit, + false, // not types only + q.Filter, + additionalAttributes, + controls, + ) +} + +// NewLDAPQueryOnAttribute converts a user-provided LDAPQuery into a version we can use by parsing +// the input and combining it with a set of name attributes +func NewLDAPQueryOnAttribute(config SerializeableLDAPQuery, attribute string) (LDAPQueryOnAttribute, error) { + ldapQuery, err := NewLDAPQuery(config) + if err != nil { + return LDAPQueryOnAttribute{}, err + } + + return LDAPQueryOnAttribute{ + LDAPQuery: ldapQuery, + QueryAttribute: attribute, + }, nil +} + +// LDAPQueryOnAttribute encodes an LDAP query that conjoins two filters to extract a specific LDAP entry +// This query is not self-sufficient and needs the value of the QueryAttribute to construct the final filter +type LDAPQueryOnAttribute struct { + // Query retrieves entries from an LDAP server + LDAPQuery + + // QueryAttribute is the attribute for a specific filter that, when conjoined with the common filter, + // retrieves the specific LDAP entry from the LDAP server. (e.g. "cn", when formatted with "aGroupName" + // and conjoined with "objectClass=groupOfNames", becomes (&(objectClass=groupOfNames)(cn=aGroupName))") + QueryAttribute string +} + +// NewSearchRequest creates a new search request from the identifying query by internalizing the value of +// the attribute to be filtered as well as any attributes that need to be recovered +func (o *LDAPQueryOnAttribute) NewSearchRequest(attributeValue string, attributes []string) (*ldap.SearchRequest, error) { + if strings.EqualFold(o.QueryAttribute, "dn") { + dn, err := ldap.ParseDN(attributeValue) + if err != nil { + return nil, fmt.Errorf("could not search by dn, invalid dn value: %v", err) + } + baseDN, err := ldap.ParseDN(o.BaseDN) + if err != nil { + return nil, fmt.Errorf("could not search by dn, invalid dn value: %v", err) + } + if !baseDN.AncestorOf(dn) && !baseDN.Equal(dn) { + return nil, NewQueryOutOfBoundsError(attributeValue, o.BaseDN) + } + return o.buildDNQuery(attributeValue, attributes), nil + + } else { + return o.buildAttributeQuery(attributeValue, attributes), nil + } +} + +// buildDNQuery builds the query that finds an LDAP entry with the given DN +// this is done by setting the DN to be the base DN for the search and setting the search scope +// to only consider the base object found +func (o *LDAPQueryOnAttribute) buildDNQuery(dn string, attributes []string) *ldap.SearchRequest { + var controls []ldap.Control + if o.PageSize > 0 { + controls = append(controls, ldap.NewControlPaging(uint32(o.PageSize))) + } + return ldap.NewSearchRequest( + dn, + ldap.ScopeBaseObject, // over-ride original + int(o.DerefAliases), + 0, // allowed return size - indicates no limit + o.TimeLimit, + false, // not types only + "(objectClass=*)", // filter that returns all values + attributes, + controls, + ) +} + +// buildAttributeQuery builds the query containing a filter that conjoins the common filter given +// in the configuration with the specific attribute filter for which the attribute value is given +func (o *LDAPQueryOnAttribute) buildAttributeQuery(attributeValue string, + attributes []string) *ldap.SearchRequest { + specificFilter := fmt.Sprintf("%s=%s", + ldap.EscapeFilter(o.QueryAttribute), + ldap.EscapeFilter(attributeValue)) + + filter := fmt.Sprintf("(&(%s)(%s))", o.Filter, specificFilter) + + var controls []ldap.Control + if o.PageSize > 0 { + controls = append(controls, ldap.NewControlPaging(uint32(o.PageSize))) + } + + return ldap.NewSearchRequest( + o.BaseDN, + int(o.Scope), + int(o.DerefAliases), + 0, // allowed return size - indicates no limit + o.TimeLimit, + false, // not types only + filter, + attributes, + controls, + ) +} + +// QueryForUniqueEntry queries for an LDAP entry with the given searchRequest. The query is expected +// to return one unqiue result. If this is not the case, errors are raised +func QueryForUniqueEntry(clientConfig ldapclient.Config, query *ldap.SearchRequest) (*ldap.Entry, error) { + result, err := QueryForEntries(clientConfig, query) + if err != nil { + return nil, err + } + + if len(result) == 0 { + return nil, NewEntryNotFoundError(query.BaseDN, query.Filter) + } + + if len(result) > 1 { + if query.Scope == ldap.ScopeBaseObject { + return nil, fmt.Errorf("multiple entries found matching dn=%q:\n%s", + query.BaseDN, formatResult(result)) + } else { + return nil, fmt.Errorf("multiple entries found matching filter %s:\n%s", + query.Filter, formatResult(result)) + } + } + + entry := result[0] + klog.V(4).Infof("found dn=%q for %s", entry.DN, query.Filter) + return entry, nil +} + +// formatResult pretty-prints the first ten DNs in the slice of entries +func formatResult(results []*ldap.Entry) string { + var names []string + for _, entry := range results { + names = append(names, entry.DN) + } + return "\t" + strings.Join(names[0:10], "\n\t") +} + +// QueryForEntries queries for LDAP with the given searchRequest +func QueryForEntries(clientConfig ldapclient.Config, query *ldap.SearchRequest) ([]*ldap.Entry, error) { + connection, err := clientConfig.Connect() + if err != nil { + return nil, fmt.Errorf("could not connect to the LDAP server: %v", err) + } + defer connection.Close() + + if bindDN, bindPassword := clientConfig.GetBindCredentials(); len(bindDN) > 0 { + if err := connection.Bind(bindDN, bindPassword); err != nil { + return nil, fmt.Errorf("could not bind to the LDAP server: %v", err) + } + } + + var searchResult *ldap.SearchResult + control := ldap.FindControl(query.Controls, ldap.ControlTypePaging) + if control == nil { + klog.V(4).Infof("searching LDAP server with config %v with dn=%q and scope %v for %s requesting %v", clientConfig, query.BaseDN, query.Scope, query.Filter, query.Attributes) + searchResult, err = connection.Search(query) + } else if pagingControl, ok := control.(*ldap.ControlPaging); ok { + klog.V(4).Infof("searching LDAP server with config %v with dn=%q and scope %v for %s requesting %v with pageSize=%d", clientConfig, query.BaseDN, query.Scope, query.Filter, query.Attributes, pagingControl.PagingSize) + searchResult, err = connection.SearchWithPaging(query, pagingControl.PagingSize) + } else { + err = fmt.Errorf("invalid paging control type: %v", control) + } + + if err != nil { + if ldap.IsErrorWithCode(err, ldap.LDAPResultNoSuchObject) { + return nil, NewNoSuchObjectError(query.BaseDN) + } + return nil, err + } + + for _, entry := range searchResult.Entries { + klog.V(4).Infof("found dn=%q ", entry.DN) + } + return searchResult.Entries, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldapquery/query_test.go b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/query_test.go new file mode 100644 index 000000000..ea18fa28d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/query_test.go @@ -0,0 +1,320 @@ +package ldapquery + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "github.com/openshift/library-go/pkg/security/ldaptestclient" + "github.com/openshift/library-go/pkg/security/ldaputil" + "gopkg.in/ldap.v2" +) + +const ( + DefaultBaseDN string = "dc=example,dc=com" + DefaultScope ldaputil.Scope = ldaputil.ScopeWholeSubtree + DefaultDerefAliases ldaputil.DerefAliases = ldaputil.DerefAliasesAlways + DefaultSizeLimit int = 0 + DefaultTimeLimit int = 0 + DefaultTypesOnly bool = false + DefaultFilter string = "objectClass=groupOfNames" + DefaultQueryAttribute string = "uid" +) + +var DefaultAttributes = []string{"dn", "cn", "uid"} +var DefaultControls []ldap.Control + +func TestNewSearchRequest(t *testing.T) { + var testCases = []struct { + name string + options LDAPQueryOnAttribute + attributeValue string + attributes []string + expectedRequest *ldap.SearchRequest + expectedError bool + }{ + { + name: "attribute query no attributes", + options: LDAPQueryOnAttribute{ + LDAPQuery: LDAPQuery{ + BaseDN: DefaultBaseDN, + Scope: DefaultScope, + DerefAliases: DefaultDerefAliases, + TimeLimit: DefaultTimeLimit, + Filter: DefaultFilter, + }, + QueryAttribute: DefaultQueryAttribute, + }, + + attributeValue: "bar", + attributes: DefaultAttributes, + expectedRequest: &ldap.SearchRequest{ + BaseDN: DefaultBaseDN, + Scope: int(DefaultScope), + DerefAliases: int(DefaultDerefAliases), + SizeLimit: DefaultSizeLimit, + TimeLimit: DefaultTimeLimit, + TypesOnly: DefaultTypesOnly, + Filter: fmt.Sprintf("(&(%s)(%s=%s))", DefaultFilter, DefaultQueryAttribute, "bar"), + Attributes: DefaultAttributes, + Controls: DefaultControls, + }, + expectedError: false, + }, + { + name: "attribute query with additional attributes", + options: LDAPQueryOnAttribute{ + LDAPQuery: LDAPQuery{ + BaseDN: DefaultBaseDN, + Scope: DefaultScope, + DerefAliases: DefaultDerefAliases, + TimeLimit: DefaultTimeLimit, + Filter: DefaultFilter, + }, + QueryAttribute: DefaultQueryAttribute, + }, + attributeValue: "bar", + attributes: append(DefaultAttributes, []string{"email", "phone"}...), + expectedRequest: &ldap.SearchRequest{ + BaseDN: DefaultBaseDN, + Scope: int(DefaultScope), + DerefAliases: int(DefaultDerefAliases), + SizeLimit: DefaultSizeLimit, + TimeLimit: DefaultTimeLimit, + TypesOnly: DefaultTypesOnly, + Filter: fmt.Sprintf("(&(%s)(%s=%s))", DefaultFilter, DefaultQueryAttribute, "bar"), + Attributes: append(DefaultAttributes, []string{"email", "phone"}...), + Controls: DefaultControls, + }, + expectedError: false, + }, + { + name: "valid dn query no attributes", + options: LDAPQueryOnAttribute{ + LDAPQuery: LDAPQuery{ + BaseDN: DefaultBaseDN, + Scope: DefaultScope, + DerefAliases: DefaultDerefAliases, + TimeLimit: DefaultTimeLimit, + Filter: DefaultFilter, + }, + QueryAttribute: "DN", + }, + attributeValue: "uid=john,o=users,dc=example,dc=com", + attributes: DefaultAttributes, + expectedRequest: &ldap.SearchRequest{ + BaseDN: "uid=john,o=users,dc=example,dc=com", + Scope: ldap.ScopeBaseObject, + DerefAliases: int(DefaultDerefAliases), + SizeLimit: DefaultSizeLimit, + TimeLimit: DefaultTimeLimit, + TypesOnly: DefaultTypesOnly, + Filter: "(objectClass=*)", + Attributes: DefaultAttributes, + Controls: DefaultControls, + }, + expectedError: false, + }, + { + name: "valid dn query with additional attributes", + options: LDAPQueryOnAttribute{ + LDAPQuery: LDAPQuery{ + BaseDN: DefaultBaseDN, + Scope: DefaultScope, + DerefAliases: DefaultDerefAliases, + TimeLimit: DefaultTimeLimit, + Filter: DefaultFilter, + }, + QueryAttribute: "DN", + }, + attributeValue: "uid=john,o=users,dc=example,dc=com", + attributes: append(DefaultAttributes, []string{"email", "phone"}...), + expectedRequest: &ldap.SearchRequest{ + BaseDN: "uid=john,o=users,dc=example,dc=com", + Scope: ldap.ScopeBaseObject, + DerefAliases: int(DefaultDerefAliases), + SizeLimit: DefaultSizeLimit, + TimeLimit: DefaultTimeLimit, + TypesOnly: DefaultTypesOnly, + Filter: "(objectClass=*)", + Attributes: append(DefaultAttributes, []string{"email", "phone"}...), + Controls: DefaultControls, + }, + expectedError: false, + }, + { + name: "invalid dn query out of bounds", + options: LDAPQueryOnAttribute{ + LDAPQuery: LDAPQuery{ + BaseDN: DefaultBaseDN, + Scope: DefaultScope, + DerefAliases: DefaultDerefAliases, + TimeLimit: DefaultTimeLimit, + Filter: DefaultFilter, + }, + QueryAttribute: "DN", + }, + attributeValue: "uid=john,o=users,dc=other,dc=com", + attributes: DefaultAttributes, + expectedRequest: nil, + expectedError: true, + }, + { + name: "invalid dn query invalid dn", + options: LDAPQueryOnAttribute{ + LDAPQuery: LDAPQuery{ + BaseDN: DefaultBaseDN, + Scope: DefaultScope, + DerefAliases: DefaultDerefAliases, + TimeLimit: DefaultTimeLimit, + Filter: DefaultFilter, + }, + QueryAttribute: "DN", + }, + attributeValue: "uid=,o=users,dc=other,dc=com", + attributes: DefaultAttributes, + expectedRequest: nil, + expectedError: true, + }, + { + name: "attribute query no attributes with paging", + options: LDAPQueryOnAttribute{ + LDAPQuery: LDAPQuery{ + BaseDN: DefaultBaseDN, + Scope: DefaultScope, + DerefAliases: DefaultDerefAliases, + TimeLimit: DefaultTimeLimit, + Filter: DefaultFilter, + PageSize: 10, + }, + QueryAttribute: DefaultQueryAttribute, + }, + + attributeValue: "bar", + attributes: DefaultAttributes, + expectedRequest: &ldap.SearchRequest{ + BaseDN: DefaultBaseDN, + Scope: int(DefaultScope), + DerefAliases: int(DefaultDerefAliases), + SizeLimit: DefaultSizeLimit, + TimeLimit: DefaultTimeLimit, + TypesOnly: DefaultTypesOnly, + Filter: fmt.Sprintf("(&(%s)(%s=%s))", DefaultFilter, DefaultQueryAttribute, "bar"), + Attributes: DefaultAttributes, + Controls: []ldap.Control{ldap.NewControlPaging(10)}, + }, + expectedError: false, + }, + } + + for _, testCase := range testCases { + request, err := testCase.options.NewSearchRequest( + testCase.attributeValue, + testCase.attributes) + + switch { + case err != nil && !testCase.expectedError: + t.Errorf("%s: expected no error but got: %v", testCase.name, err) + case err == nil && testCase.expectedError: + t.Errorf("%s: expected an error but got none", testCase.name) + } + + if !reflect.DeepEqual(testCase.expectedRequest, request) { + t.Errorf("%s: did not correctly create search request:\n\texpected:\n%#v\n\tgot:\n%#v", + testCase.name, testCase.expectedRequest, request) + } + } +} + +// TestErrNoSuchObject tests that our LDAP search correctly wraps the LDAP server error +func TestErrNoSuchObject(t *testing.T) { + var testCases = []struct { + name string + searchRequest *ldap.SearchRequest + expectedError error + }{ + { + name: "valid search", + searchRequest: &ldap.SearchRequest{ + BaseDN: "uid=john,o=users,dc=example,dc=com", + }, + expectedError: nil, + }, + { + name: "invalid search", + searchRequest: &ldap.SearchRequest{ + BaseDN: "ou=groups,dc=example,dc=com", + }, + expectedError: &errNoSuchObject{baseDN: "ou=groups,dc=example,dc=com"}, + }, + } + for _, testCase := range testCases { + testClient := ldaptestclient.NewMatchingSearchErrorClient(ldaptestclient.New(), + "ou=groups,dc=example,dc=com", + ldap.NewError(ldap.LDAPResultNoSuchObject, errors.New("")), + ) + testConfig := ldaptestclient.NewConfig(testClient) + if _, err := QueryForEntries(testConfig, testCase.searchRequest); !reflect.DeepEqual(err, testCase.expectedError) { + t.Errorf("%s: error did not match:\n\texpected:\n\t%v\n\tgot:\n\t%v", testCase.name, testCase.expectedError, err) + } + } +} + +// TestErrEntryNotFound checks that we wrap a zero-length list of results correctly if we search for a unique entry +func TestErrEntryNotFound(t *testing.T) { + testConfig := ldaptestclient.NewConfig(ldaptestclient.New()) + testSearchRequest := &ldap.SearchRequest{ + BaseDN: "dc=example,dc=com", + Scope: ldap.ScopeWholeSubtree, + DerefAliases: int(DefaultDerefAliases), + SizeLimit: DefaultSizeLimit, + TimeLimit: DefaultTimeLimit, + TypesOnly: DefaultTypesOnly, + Filter: "(objectClass=*)", + Attributes: append(DefaultAttributes), + Controls: DefaultControls, + } + + expectedErr := &errEntryNotFound{baseDN: "dc=example,dc=com", filter: "(objectClass=*)"} + + // test that a unique search errors on no result + if _, err := QueryForUniqueEntry(testConfig, testSearchRequest); !reflect.DeepEqual(err, expectedErr) { + t.Errorf("query for unique entry did not get correct error:\n\texpected:\n\t%v\n\tgot:\n\t%v", expectedErr, err) + } + + // test that a non-unique search doesn't error + if _, err := QueryForEntries(testConfig, testSearchRequest); !reflect.DeepEqual(err, nil) { + t.Errorf("query for entries did not get correct error:\n\texpected:\n\t%v\n\tgot:\n\t%v", nil, err) + } +} + +func TestQueryWithPaging(t *testing.T) { + expectedResult := &ldap.SearchResult{ + Entries: []*ldap.Entry{ldap.NewEntry("cn=paging,ou=paging,dc=paging,dc=com", map[string][]string{"paging": {"true"}})}, + } + + testConfig := ldaptestclient.NewConfig(ldaptestclient.NewPagingOnlyClient(ldaptestclient.New(), + expectedResult, + )) + testSearchRequest := &ldap.SearchRequest{ + BaseDN: "dc=example,dc=com", + Scope: ldap.ScopeWholeSubtree, + DerefAliases: int(DefaultDerefAliases), + SizeLimit: DefaultSizeLimit, + TimeLimit: DefaultTimeLimit, + TypesOnly: DefaultTypesOnly, + Filter: "(objectClass=*)", + Attributes: append(DefaultAttributes), + Controls: []ldap.Control{ldap.NewControlPaging(5)}, + } + + // test that a search request with paging controls gets correctly routed to the SearchWithPaging call + response, err := QueryForEntries(testConfig, testSearchRequest) + if err != nil { + t.Errorf("query with paging control should not create error, but got %v", err) + } + if !reflect.DeepEqual(expectedResult.Entries, response) { + t.Errorf("query with paging did not return correct response: expected %v, got %v", expectedResult.Entries, response) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldapquery/types.go b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/types.go new file mode 100644 index 000000000..a321df70a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldapquery/types.go @@ -0,0 +1,31 @@ +package ldapquery + +type SerializeableLDAPQuery struct { + // The DN of the branch of the directory where all searches should start from + BaseDN string + + // The (optional) scope of the search. Can be: + // base: only the base object, + // one: all object on the base level, + // sub: the entire subtree + // Defaults to the entire subtree if not set + Scope string + + // The (optional) behavior of the search with regards to alisases. Can be: + // never: never dereference aliases, + // search: only dereference in searching, + // base: only dereference in finding the base object, + // always: always dereference + // Defaults to always dereferencing if not set + DerefAliases string + + // TimeLimit holds the limit of time in seconds that any request to the server can remain outstanding + // before the wait for a response is given up. If this is 0, no client-side limit is imposed + TimeLimit int + + // Filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN + Filter string + + // PageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done. + PageSize int +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldaptestclient/testclient.go b/vendor/github.com/openshift/library-go/pkg/security/ldaptestclient/testclient.go new file mode 100644 index 000000000..46bd5fdc0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldaptestclient/testclient.go @@ -0,0 +1,170 @@ +package ldaptestclient + +import ( + "crypto/tls" + "time" + + "gopkg.in/ldap.v2" +) + +// Fake is a mock client for an LDAP server +// The following methods define safe defaults for the return values. In order to adapt this test client +// for a specific test, anonymously include it and override the method being tested. In the over-riden +// method, if you are not covering all method calls with your override, defer to the parent for handling. +type Fake struct { + SimpleBindResponse *ldap.SimpleBindResult + PasswordModifyResponse *ldap.PasswordModifyResult + SearchResponse *ldap.SearchResult +} + +var _ ldap.Client = &Fake{} + +// NewTestClient returns a new test client with safe default return values +func New() *Fake { + return &Fake{ + SimpleBindResponse: &ldap.SimpleBindResult{ + Controls: []ldap.Control{}, + }, + PasswordModifyResponse: &ldap.PasswordModifyResult{ + GeneratedPassword: "", + }, + SearchResponse: &ldap.SearchResult{ + Entries: []*ldap.Entry{}, + Referrals: []string{}, + Controls: []ldap.Control{}, + }, + } +} + +// Start starts the LDAP connection +func (c *Fake) Start() { + return +} + +// StartTLS begins a TLS-wrapped LDAP connection +func (c *Fake) StartTLS(config *tls.Config) error { + return nil +} + +// Close closes an LDAP connection +func (c *Fake) Close() { + return +} + +// Bind binds to the LDAP server with a bind DN and password +func (c *Fake) Bind(username, password string) error { + return nil +} + +// SimpleBind binds to the LDAP server using the Simple Bind mechanism +func (c *Fake) SimpleBind(simpleBindRequest *ldap.SimpleBindRequest) (*ldap.SimpleBindResult, error) { + return c.SimpleBindResponse, nil +} + +// Add forwards an addition request to the LDAP server +func (c *Fake) Add(addRequest *ldap.AddRequest) error { + return nil +} + +// Del forwards a deletion request to the LDAP server +func (c *Fake) Del(delRequest *ldap.DelRequest) error { + return nil +} + +// Modify forwards a modification request to the LDAP server +func (c *Fake) Modify(modifyRequest *ldap.ModifyRequest) error { + return nil +} + +// Compare ... ? +func (c *Fake) Compare(dn, attribute, value string) (bool, error) { + return false, nil +} + +// PasswordModify forwards a password modify request to the LDAP server +func (c *Fake) PasswordModify(passwordModifyRequest *ldap.PasswordModifyRequest) (*ldap.PasswordModifyResult, error) { + return c.PasswordModifyResponse, nil +} + +// Search forwards a search request to the LDAP server +func (c *Fake) Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error) { + return c.SearchResponse, nil +} + +// SearchWithPaging forwards a search request to the LDAP server and pages the response +func (c *Fake) SearchWithPaging(searchRequest *ldap.SearchRequest, pagingSize uint32) (*ldap.SearchResult, error) { + return c.SearchResponse, nil +} + +// SetTimeout sets a timeout on the client +func (c *Fake) SetTimeout(d time.Duration) { +} + +// NewMatchingSearchErrorClient returns a new MatchingSearchError client sitting on top of the parent +// client. This client returns the given error when a search base DN matches the given base DN, and +// defers to the parent otherwise. +func NewMatchingSearchErrorClient(parent ldap.Client, baseDN string, returnErr error) ldap.Client { + return &MatchingSearchErrClient{ + Client: parent, + BaseDN: baseDN, + ReturnErr: returnErr, + } +} + +// MatchingSearchErrClient returns the ReturnErr on every Search() where the search base DN matches the given DN +// or defers the search to the parent client +type MatchingSearchErrClient struct { + ldap.Client + BaseDN string + ReturnErr error +} + +func (c *MatchingSearchErrClient) Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error) { + if searchRequest.BaseDN == c.BaseDN { + return nil, c.ReturnErr + } + return c.Client.Search(searchRequest) +} + +// NewDNMappingClient returns a new DNMappingClient sitting on top of the parent client. This client returns the +// ldap entries mapped to with this DN in its' internal DN map, or defers to the parent if the DN is not mapped. +func NewDNMappingClient(parent ldap.Client, DNMapping map[string][]*ldap.Entry) ldap.Client { + return &DNMappingClient{ + Client: parent, + DNMapping: DNMapping, + } +} + +// DNMappingClient returns the LDAP entry mapped to by the base dn given, or if no mapping happens, defers to the parent +type DNMappingClient struct { + ldap.Client + DNMapping map[string][]*ldap.Entry +} + +func (c *DNMappingClient) Search(searchRequest *ldap.SearchRequest) (*ldap.SearchResult, error) { + if entries, exists := c.DNMapping[searchRequest.BaseDN]; exists { + return &ldap.SearchResult{Entries: entries}, nil + } + + return c.Client.Search(searchRequest) +} + +// NewPagingOnlyClient returns a new PagingOnlyClient sitting on top of the parent client. This client returns the +// provided search response for any calls to SearchWithPaging, or defers to the parent if the call is not to the +// paged search function. +func NewPagingOnlyClient(parent ldap.Client, response *ldap.SearchResult) ldap.Client { + return &PagingOnlyClient{ + Client: parent, + Response: response, + } +} + +// PagingOnlyClient responds with a canned search result for any calls to SearchWithPaging +type PagingOnlyClient struct { + ldap.Client + Response *ldap.SearchResult +} + +func (c *PagingOnlyClient) SearchWithPaging(searchRequest *ldap.SearchRequest, pagingSize uint32) (*ldap.SearchResult, error) { + return c.Response, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldaptestclient/testclientconfig.go b/vendor/github.com/openshift/library-go/pkg/security/ldaptestclient/testclientconfig.go new file mode 100644 index 000000000..6106de0e9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldaptestclient/testclientconfig.go @@ -0,0 +1,30 @@ +package ldaptestclient + +import ( + "github.com/openshift/library-go/pkg/security/ldapclient" + "gopkg.in/ldap.v2" +) + +// fakeConfig regurgitates internal state in order to conform to Config +type fakeConfig struct { + client ldap.Client +} + +// NewConfig creates a new Config impl that regurgitates the given data +func NewConfig(client ldap.Client) ldapclient.Config { + return &fakeConfig{ + client: client, + } +} + +func (c *fakeConfig) Connect() (ldap.Client, error) { + return c.client, nil +} + +func (c *fakeConfig) GetBindCredentials() (string, string) { + return "", "" +} + +func (c *fakeConfig) Host() string { + return "" +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute.go b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute.go new file mode 100644 index 000000000..16ca72231 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute.go @@ -0,0 +1,47 @@ +package ldaputil + +import ( + "encoding/base64" + "strings" + + "gopkg.in/ldap.v2" +) + +// GetAttributeValue finds the first attribute of those given that the LDAP entry has, and +// returns it. GetAttributeValue is able to query the DN as well as Attributes of the LDAP entry. +// If no value is found, the empty string is returned. +func GetAttributeValue(entry *ldap.Entry, attributes []string) string { + for _, k := range attributes { + // Ignore empty attributes + if len(k) == 0 { + continue + } + // Special-case DN, since it's not an attribute + if strings.ToLower(k) == "dn" { + return entry.DN + } + // Otherwise get an attribute and return it if present + if v := entry.GetAttributeValue(k); len(v) > 0 { + return v + } + } + return "" +} + +func GetRawAttributeValue(entry *ldap.Entry, attributes []string) string { + for _, k := range attributes { + // Ignore empty attributes + if len(k) == 0 { + continue + } + // Special-case DN, since it's not an attribute + if strings.ToLower(k) == "dn" { + return base64.RawURLEncoding.EncodeToString([]byte(entry.DN)) + } + // Otherwise get an attribute and return it if present + if v := entry.GetRawAttributeValue(k); len(v) > 0 { + return base64.RawURLEncoding.EncodeToString(v) + } + } + return "" +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute_test.go b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute_test.go new file mode 100644 index 000000000..c709e39f4 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/attribute_test.go @@ -0,0 +1,70 @@ +package ldaputil + +import ( + "testing" + + "gopkg.in/ldap.v2" +) + +func TestGetAttributeValue(t *testing.T) { + testcases := map[string]struct { + Entry *ldap.Entry + Attributes []string + ExpectedValue string + }{ + "empty": { + Attributes: []string{}, + Entry: &ldap.Entry{DN: "", Attributes: []*ldap.EntryAttribute{}}, + ExpectedValue: "", + }, + + "dn": { + Attributes: []string{"dn"}, + Entry: &ldap.Entry{DN: "foo", Attributes: []*ldap.EntryAttribute{}}, + ExpectedValue: "foo", + }, + "DN": { + Attributes: []string{"DN"}, + Entry: &ldap.Entry{DN: "foo", Attributes: []*ldap.EntryAttribute{}}, + ExpectedValue: "foo", + }, + + "missing": { + Attributes: []string{"foo", "bar", "baz"}, + Entry: &ldap.Entry{DN: "", Attributes: []*ldap.EntryAttribute{}}, + ExpectedValue: "", + }, + + "present": { + Attributes: []string{"foo"}, + Entry: &ldap.Entry{DN: "", Attributes: []*ldap.EntryAttribute{ + {Name: "foo", Values: []string{"fooValue"}}, + }}, + ExpectedValue: "fooValue", + }, + "first of multi-value attribute": { + Attributes: []string{"foo"}, + Entry: &ldap.Entry{DN: "", Attributes: []*ldap.EntryAttribute{ + {Name: "foo", Values: []string{"fooValue", "fooValue2"}}, + }}, + ExpectedValue: "fooValue", + }, + "first present attribute": { + Attributes: []string{"foo", "bar", "baz"}, + Entry: &ldap.Entry{DN: "", Attributes: []*ldap.EntryAttribute{ + {Name: "foo", Values: []string{""}}, + {Name: "bar", Values: []string{"barValue"}}, + {Name: "baz", Values: []string{"bazValue"}}, + }}, + ExpectedValue: "barValue", + }, + } + + for k, tc := range testcases { + v := GetAttributeValue(tc.Entry, tc.Attributes) + if v != tc.ExpectedValue { + t.Errorf("%s: Expected %q, got %q", k, tc.ExpectedValue, v) + } + } + +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldaputil/url.go b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/url.go new file mode 100644 index 000000000..caf64963a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/url.go @@ -0,0 +1,247 @@ +package ldaputil + +import ( + "fmt" + "net" + "net/url" + "strings" + + "gopkg.in/ldap.v2" +) + +// Scheme is a valid ldap scheme +type Scheme string + +const ( + SchemeLDAP Scheme = "ldap" + SchemeLDAPS Scheme = "ldaps" +) + +// Scope is a valid LDAP search scope +type Scope int + +const ( + ScopeWholeSubtree Scope = ldap.ScopeWholeSubtree + ScopeSingleLevel Scope = ldap.ScopeSingleLevel + ScopeBaseObject Scope = ldap.ScopeBaseObject +) + +// DerefAliases is a valid LDAP alias dereference parameter +type DerefAliases int + +const ( + DerefAliasesNever = ldap.NeverDerefAliases + DerefAliasesSearching = ldap.DerefInSearching + DerefAliasesFinding = ldap.DerefFindingBaseObj + DerefAliasesAlways = ldap.DerefAlways +) + +const ( + defaultLDAPPort = "389" + defaultLDAPSPort = "636" + + defaultHost = "localhost" + defaultQueryAttribute = "uid" + defaultFilter = "(objectClass=*)" + + scopeWholeSubtreeString = "sub" + scopeSingleLevelString = "one" + scopeBaseObjectString = "base" + + criticalExtensionPrefix = "!" +) + +// LDAPURL holds a parsed RFC 2255 URL +type LDAPURL struct { + // Scheme is ldap or ldaps + Scheme Scheme + // Host is the host:port of the LDAP server + Host string + // The DN of the branch of the directory where all searches should start from + BaseDN string + // The attribute to search for + QueryAttribute string + // The scope of the search. Can be ldap.ScopeWholeSubtree, ldap.ScopeSingleLevel, or ldap.ScopeBaseObject + Scope Scope + // A valid LDAP search filter (e.g. "(objectClass=*)") + Filter string +} + +// ParseURL parsed the given ldapURL as an RFC 2255 URL +// The syntax of the URL is ldap://host:port/basedn?attribute?scope?filter +func ParseURL(ldapURL string) (LDAPURL, error) { + // Must be a valid URL to start + parsedURL, err := url.Parse(ldapURL) + if err != nil { + return LDAPURL{}, err + } + + opts := LDAPURL{} + + determinedScheme, err := DetermineLDAPScheme(parsedURL.Scheme) + if err != nil { + return LDAPURL{}, err + } + opts.Scheme = determinedScheme + + determinedHost, err := DetermineLDAPHost(parsedURL.Host, opts.Scheme) + if err != nil { + return LDAPURL{}, err + } + opts.Host = determinedHost + + // Set base dn (default to "") + // url.Parse() already percent-decodes the path + opts.BaseDN = strings.TrimLeft(parsedURL.Path, "/") + + attributes, scope, filter, extensions, err := SplitLDAPQuery(parsedURL.RawQuery) + if err != nil { + return LDAPURL{}, err + } + + // Attributes contains comma-separated attributes + // Set query attribute to first attribute + // Default to uid to match mod_auth_ldap + opts.QueryAttribute = strings.Split(attributes, ",")[0] + if len(opts.QueryAttribute) == 0 { + opts.QueryAttribute = defaultQueryAttribute + } + + determinedScope, err := DetermineLDAPScope(scope) + if err != nil { + return LDAPURL{}, err + } + opts.Scope = determinedScope + + determinedFilter, err := DetermineLDAPFilter(filter) + if err != nil { + return LDAPURL{}, err + } + opts.Filter = determinedFilter + + // Extensions are in "name=value,name2=value2" form + // Critical extensions are prefixed with a ! + // Optional extensions are ignored, per RFC + // Fail if there are any critical extensions, since we don't support any + if len(extensions) > 0 { + for _, extension := range strings.Split(extensions, ",") { + exttype := strings.SplitN(extension, "=", 2)[0] + if strings.HasPrefix(exttype, criticalExtensionPrefix) { + return LDAPURL{}, fmt.Errorf("unsupported critical extension %s", extension) + } + } + } + + return opts, nil + +} + +// DetermineLDAPScheme determines the LDAP connection scheme. Scheme is one of "ldap" or "ldaps" +// Default to "ldap" +func DetermineLDAPScheme(scheme string) (Scheme, error) { + switch Scheme(scheme) { + case SchemeLDAP, SchemeLDAPS: + return Scheme(scheme), nil + default: + return "", fmt.Errorf("invalid scheme %q", scheme) + } +} + +// DetermineLDAPHost determines the host and port for the LDAP connection. +// The default host is localhost; the default port for scheme "ldap" is 389, for "ldaps" is 686 +func DetermineLDAPHost(hostport string, scheme Scheme) (string, error) { + if len(hostport) == 0 { + hostport = defaultHost + } + // add port if missing + if _, _, err := net.SplitHostPort(hostport); err != nil { + switch scheme { + case SchemeLDAPS: + return net.JoinHostPort(hostport, defaultLDAPSPort), nil + case SchemeLDAP: + return net.JoinHostPort(hostport, defaultLDAPPort), nil + default: + return "", fmt.Errorf("no default port for scheme %q", scheme) + } + } + // nothing needed to be done + return hostport, nil +} + +// SplitLDAPQuery splits the query in the URL into the substituent parts. All sections are optional. +// Query syntax is attribute?scope?filter?extensions +func SplitLDAPQuery(query string) (attributes, scope, filter, extensions string, err error) { + parts := strings.Split(query, "?") + switch len(parts) { + case 4: + extensions = parts[3] + fallthrough + case 3: + if v, err := url.QueryUnescape(parts[2]); err != nil { + return "", "", "", "", err + } else { + filter = v + } + fallthrough + case 2: + if v, err := url.QueryUnescape(parts[1]); err != nil { + return "", "", "", "", err + } else { + scope = v + } + fallthrough + case 1: + if v, err := url.QueryUnescape(parts[0]); err != nil { + return "", "", "", "", err + } else { + attributes = v + } + return attributes, scope, filter, extensions, nil + case 0: + return + default: + err = fmt.Errorf("too many query options %q", query) + return "", "", "", "", err + } +} + +// DetermineLDAPScope determines the LDAP search scope. Scope is one of "sub", "one", or "base" +// Default to "sub" to match mod_auth_ldap +func DetermineLDAPScope(scope string) (Scope, error) { + switch scope { + case "", scopeWholeSubtreeString: + return ScopeWholeSubtree, nil + case scopeSingleLevelString: + return ScopeSingleLevel, nil + case scopeBaseObjectString: + return ScopeBaseObject, nil + default: + return -1, fmt.Errorf("invalid scope %q", scope) + } +} + +// DetermineLDAPFilter determines the LDAP search filter. Filter is a valid LDAP filter +// Default to "(objectClass=*)" per RFC +func DetermineLDAPFilter(filter string) (string, error) { + if len(filter) == 0 { + return defaultFilter, nil + } + if _, err := ldap.CompileFilter(filter); err != nil { + return "", fmt.Errorf("invalid filter: %v", err) + } + return filter, nil +} + +func DetermineDerefAliasesBehavior(derefAliasesString string) (DerefAliases, error) { + mapping := map[string]DerefAliases{ + "never": DerefAliasesNever, + "search": DerefAliasesSearching, + "base": DerefAliasesFinding, + "always": DerefAliasesAlways, + } + derefAliases, exists := mapping[derefAliasesString] + if !exists { + return -1, fmt.Errorf("not a valid LDAP alias dereferncing behavior: %s", derefAliasesString) + } + return derefAliases, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/ldaputil/url_test.go b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/url_test.go new file mode 100644 index 000000000..29b7842b3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/ldaputil/url_test.go @@ -0,0 +1,103 @@ +package ldaputil + +import ( + "reflect" + "testing" + + "gopkg.in/ldap.v2" +) + +func TestParseURL(t *testing.T) { + testcases := map[string]struct { + URL string + ExpectedLDAPURL LDAPURL + ExpectedError string + }{ + // Defaults + "defaults for ldap://": { + URL: "ldap://", + ExpectedLDAPURL: LDAPURL{Scheme: "ldap", Host: "localhost:389", BaseDN: "", QueryAttribute: "uid", Scope: ldap.ScopeWholeSubtree, Filter: "(objectClass=*)"}, + }, + "defaults for ldaps://": { + URL: "ldaps://", + ExpectedLDAPURL: LDAPURL{Scheme: "ldaps", Host: "localhost:636", BaseDN: "", QueryAttribute: "uid", Scope: ldap.ScopeWholeSubtree, Filter: "(objectClass=*)"}, + }, + + // Valid + "fully specified": { + URL: "ldap://myhost:123/o=myorg?cn?one?(o=mygroup*)?ext=1", + ExpectedLDAPURL: LDAPURL{Scheme: "ldap", Host: "myhost:123", BaseDN: "o=myorg", QueryAttribute: "cn", Scope: ldap.ScopeSingleLevel, Filter: "(o=mygroup*)"}, + }, + "first attribute used for query": { + URL: "ldap://myhost:123/o=myorg?cn,uid?one?(o=mygroup*)?ext=1", + ExpectedLDAPURL: LDAPURL{Scheme: "ldap", Host: "myhost:123", BaseDN: "o=myorg", QueryAttribute: "cn", Scope: ldap.ScopeSingleLevel, Filter: "(o=mygroup*)"}, + }, + + // Escaping + "percent escaped 1": { + URL: "ldap://myhost:123/o=my%20org?my%20attr?one?(o=my%20group%3f*)?ext=1", + ExpectedLDAPURL: LDAPURL{Scheme: "ldap", Host: "myhost:123", BaseDN: "o=my org", QueryAttribute: "my attr", Scope: ldap.ScopeSingleLevel, Filter: "(o=my group?*)"}, + }, + "percent escaped 2": { + URL: "ldap://myhost:123/o=Babsco,c=US???(four-octet=%5c00%5c00%5c00%5c04)", + ExpectedLDAPURL: LDAPURL{Scheme: "ldap", Host: "myhost:123", BaseDN: "o=Babsco,c=US", QueryAttribute: "uid", Scope: ldap.ScopeWholeSubtree, Filter: `(four-octet=\00\00\00\04)`}, + }, + "percent escaped 3": { + URL: "ldap://myhost:123/o=An%20Example%5C2C%20Inc.,c=US", + ExpectedLDAPURL: LDAPURL{Scheme: "ldap", Host: "myhost:123", BaseDN: `o=An Example\2C Inc.,c=US`, QueryAttribute: "uid", Scope: ldap.ScopeWholeSubtree, Filter: "(objectClass=*)"}, + }, + + // Invalid + "empty": { + URL: "", + ExpectedError: `invalid scheme ""`, + }, + "invalid scheme": { + URL: "http://myhost:123/o=myorg?cn?one?(o=mygroup*)?ext=1", + ExpectedError: `invalid scheme "http"`, + }, + "invalid scope": { + URL: "ldap://myhost:123/o=myorg?cn?foo?(o=mygroup*)?ext=1", + ExpectedError: `invalid scope "foo"`, + }, + "invalid filter": { + URL: "ldap://myhost:123/o=myorg?cn?one?(mygroup*)?ext=1", + ExpectedError: `invalid filter: LDAP Result Code 201 "Filter Compile Error": ldap: error parsing filter`, + }, + "invalid segments": { + URL: "ldap://myhost:123/o=myorg?cn?one?(o=mygroup*)?ext=1?extrasegment", + ExpectedError: `too many query options "cn?one?(o=mygroup*)?ext=1?extrasegment"`, + }, + + // Extension handling + "ignored optional extension": { + URL: "ldap:///??sub??e-bindname=cn=Manager%2cdc=example%2cdc=com", + ExpectedLDAPURL: LDAPURL{Scheme: "ldap", Host: "localhost:389", BaseDN: "", QueryAttribute: "uid", Scope: ldap.ScopeWholeSubtree, Filter: "(objectClass=*)"}, + }, + "rejected required extension": { + URL: "ldap:///??sub??!e-bindname=cn=Manager%2cdc=example%2cdc=com", + ExpectedError: "unsupported critical extension !e-bindname=cn=Manager%2cdc=example%2cdc=com", + }, + } + + for k, tc := range testcases { + ldapURL, err := ParseURL(tc.URL) + if err != nil { + if len(tc.ExpectedError) == 0 { + t.Errorf("%s: Unexpected error: %v", k, err) + } + if err.Error() != tc.ExpectedError { + t.Errorf("%s: Expected error %q, got %v", k, tc.ExpectedError, err) + } + continue + } + if len(tc.ExpectedError) > 0 { + t.Errorf("%s: Expected error %q, got none", k, tc.ExpectedError) + continue + } + if !reflect.DeepEqual(tc.ExpectedLDAPURL, ldapURL) { + t.Errorf("%s: Expected\n\t%#v\ngot\n\t%#v", k, tc.ExpectedLDAPURL, ldapURL) + continue + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/uid/uid.go b/vendor/github.com/openshift/library-go/pkg/security/uid/uid.go new file mode 100644 index 000000000..836a71a5a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/uid/uid.go @@ -0,0 +1,125 @@ +package uid + +import ( + "fmt" + "strings" +) + +type Block struct { + Start uint32 + End uint32 +} + +var ( + ErrBlockSlashBadFormat = fmt.Errorf("block not in the format \"/\"") + ErrBlockDashBadFormat = fmt.Errorf("block not in the format \"-\"") +) + +func ParseBlock(in string) (Block, error) { + if strings.Contains(in, "/") { + var start, size uint32 + n, err := fmt.Sscanf(in, "%d/%d", &start, &size) + if err != nil { + return Block{}, err + } + if n != 2 { + return Block{}, ErrBlockSlashBadFormat + } + return Block{Start: start, End: start + size - 1}, nil + } + + var start, end uint32 + n, err := fmt.Sscanf(in, "%d-%d", &start, &end) + if err != nil { + return Block{}, err + } + if n != 2 { + return Block{}, ErrBlockDashBadFormat + } + return Block{Start: start, End: end}, nil +} + +func (b Block) String() string { + return fmt.Sprintf("%d/%d", b.Start, b.Size()) +} + +func (b Block) RangeString() string { + return fmt.Sprintf("%d-%d", b.Start, b.End) +} + +func (b Block) Size() uint32 { + return b.End - b.Start + 1 +} + +type Range struct { + block Block + size uint32 +} + +func NewRange(start, end, size uint32) (*Range, error) { + if start > end { + return nil, fmt.Errorf("start %d must be less than end %d", start, end) + } + if size == 0 { + return nil, fmt.Errorf("block size must be a positive integer") + } + if (end - start) < size { + return nil, fmt.Errorf("block size must be less than or equal to the range") + } + return &Range{ + block: Block{start, end}, + size: size, + }, nil +} + +func ParseRange(in string) (*Range, error) { + var start, end, block uint32 + n, err := fmt.Sscanf(in, "%d-%d/%d", &start, &end, &block) + if err != nil { + return nil, err + } + if n != 3 { + return nil, fmt.Errorf("range not in the format \"-/\"") + } + return NewRange(start, end, block) +} + +func (r *Range) Size() uint32 { + return r.block.Size() / r.size +} + +func (r *Range) String() string { + return fmt.Sprintf("%s/%d", r.block.RangeString(), r.size) +} + +func (r *Range) BlockAt(offset uint32) (Block, bool) { + if offset > r.Size() { + return Block{}, false + } + start := r.block.Start + offset*r.size + return Block{ + Start: start, + End: start + r.size - 1, + }, true +} + +func (r *Range) Contains(block Block) bool { + ok, _ := r.Offset(block) + return ok +} + +func (r *Range) Offset(block Block) (bool, uint32) { + if block.Start < r.block.Start { + return false, 0 + } + if block.End > r.block.End { + return false, 0 + } + if block.End-block.Start+1 != r.size { + return false, 0 + } + if (block.Start-r.block.Start)%r.size != 0 { + return false, 0 + } + return true, (block.Start - r.block.Start) / r.size +} diff --git a/vendor/github.com/openshift/library-go/pkg/security/uid/uid_test.go b/vendor/github.com/openshift/library-go/pkg/security/uid/uid_test.go new file mode 100644 index 000000000..c8fbbf4ca --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/security/uid/uid_test.go @@ -0,0 +1,157 @@ +package uid + +import ( + "strings" + "testing" +) + +func TestParseRange(t *testing.T) { + testCases := map[string]struct { + in string + errFn func(error) bool + r Range + total uint32 + }{ + "identity range": { + in: "1-1/1", + r: Range{ + block: Block{1, 1}, + size: 1, + }, + total: 1, + }, + "simple range": { + in: "1-2/1", + r: Range{ + block: Block{1, 2}, + size: 1, + }, + total: 2, + }, + "wide range": { + in: "10000-999999/1000", + r: Range{ + block: Block{10000, 999999}, + size: 1000, + }, + total: 990, + }, + "overflow uint": { + in: "1000-100000000000000/1", + errFn: func(err error) bool { return strings.Contains(err.Error(), "unsigned integer overflow") }, + }, + "negative range": { + in: "1000-999/1", + errFn: func(err error) bool { return strings.Contains(err.Error(), "must be less than end 999") }, + }, + "zero block size": { + in: "1000-1000/0", + errFn: func(err error) bool { return strings.Contains(err.Error(), "block size must be a positive integer") }, + }, + "large block size": { + in: "1000-1001/3", + errFn: func(err error) bool { return strings.Contains(err.Error(), "must be less than or equal to the range") }, + }, + } + + for s, testCase := range testCases { + r, err := ParseRange(testCase.in) + if testCase.errFn != nil && !testCase.errFn(err) { + t.Errorf("%s: unexpected error: %v", s, err) + continue + } + if err != nil { + continue + } + if r.block.Start != testCase.r.block.Start || r.block.End != testCase.r.block.End || r.size != testCase.r.size { + t.Errorf("%s: unexpected range: %#v", s, r) + } + if r.Size() != testCase.total { + t.Errorf("%s: unexpected total: %d", s, r.Size()) + } + } +} + +func TestBlock(t *testing.T) { + b := Block{Start: 100, End: 109} + if b.String() != "100/10" { + t.Errorf("unexpected block string: %s", b.String()) + } + b, err := ParseBlock("100-109") + if err != nil { + t.Fatal(err) + } + if b.String() != "100/10" { + t.Errorf("unexpected block string: %s", b.String()) + } +} + +func TestOffset(t *testing.T) { + testCases := map[string]struct { + r Range + block Block + contained bool + offset uint32 + }{ + "identity range": { + r: Range{ + block: Block{1, 1}, + size: 1, + }, + block: Block{1, 1}, + contained: true, + }, + "out of identity range": { + r: Range{ + block: Block{1, 1}, + size: 1, + }, + block: Block{2, 2}, + }, + "out of identity range expanded": { + r: Range{ + block: Block{1, 1}, + size: 1, + }, + block: Block{2, 3}, + }, + "aligned to offset": { + r: Range{ + block: Block{0, 100}, + size: 10, + }, + block: Block{10, 19}, + contained: true, + offset: 1, + }, + "not aligned": { + r: Range{ + block: Block{0, 100}, + size: 10, + }, + block: Block{11, 20}, + }, + } + + for s, testCase := range testCases { + contained, offset := testCase.r.Offset(testCase.block) + if contained != testCase.contained { + t.Errorf("%s: unexpected contained: %t", s, contained) + continue + } + if offset != testCase.offset { + t.Errorf("%s: unexpected offset: %d", s, offset) + continue + } + if contained { + block, ok := testCase.r.BlockAt(offset) + if !ok { + t.Errorf("%s: should find block", s) + continue + } + if block != testCase.block { + t.Errorf("%s: blocks are not equivalent: %#v", s, block) + } + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/serviceability/logrus.go b/vendor/github.com/openshift/library-go/pkg/serviceability/logrus.go index 4f6802ae3..2f84af742 100644 --- a/vendor/github.com/openshift/library-go/pkg/serviceability/logrus.go +++ b/vendor/github.com/openshift/library-go/pkg/serviceability/logrus.go @@ -4,9 +4,23 @@ import ( "os" "strings" + "k8s.io/klog" + "github.com/sirupsen/logrus" ) +// InitLogrusFromKlog sets the logrus trace level based on the klog trace level. +func InitLogrusFromKlog() { + switch { + case bool(klog.V(4)): + InitLogrus("DEBUG") + case bool(klog.V(2)): + InitLogrus("INFO") + case bool(klog.V(0)): + InitLogrus("WARN") + } +} + // InitLogrus initializes logrus by setting a loglevel for it. func InitLogrus(level string) { if len(level) == 0 { diff --git a/vendor/github.com/openshift/library-go/pkg/template/OWNERS b/vendor/github.com/openshift/library-go/pkg/template/OWNERS new file mode 100644 index 000000000..29a933a5f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/OWNERS @@ -0,0 +1,11 @@ +reviewers: + - smarterclayton + - mfojtik + - bparees + - soltysh + - adambkaplan +approvers: + - mfojtik + - bparees + - soltysh + - adambkaplan diff --git a/vendor/github.com/openshift/library-go/pkg/template/generator/doc.go b/vendor/github.com/openshift/library-go/pkg/template/generator/doc.go new file mode 100644 index 000000000..ee5f59063 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/generator/doc.go @@ -0,0 +1,3 @@ +// Package generator defines GeneratorInterface interface and implements +// some random value generators. +package generator diff --git a/vendor/github.com/openshift/library-go/pkg/template/generator/examples/doc.go b/vendor/github.com/openshift/library-go/pkg/template/generator/examples/doc.go new file mode 100644 index 000000000..4276a3b0d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/generator/examples/doc.go @@ -0,0 +1,3 @@ +// Package examples demonstrates possible implementation of some +// random value generators. +package examples diff --git a/vendor/github.com/openshift/library-go/pkg/template/generator/examples/remotevalue.go b/vendor/github.com/openshift/library-go/pkg/template/generator/examples/remotevalue.go new file mode 100644 index 000000000..ad61fb8bd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/generator/examples/remotevalue.go @@ -0,0 +1,46 @@ +package examples + +import ( + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strings" +) + +// RemoteValueGenerator implements GeneratorInterface. It fetches random value +// from an external url endpoint based on the "[GET:]" input expression. +// +// Example: +// - "[GET:http://api.example.com/generateRandomValue]" +type RemoteValueGenerator struct { +} + +var remoteExp = regexp.MustCompile(`\[GET\:(http(s)?:\/\/(.+))\]`) + +// NewRemoteValueGenerator creates new RemoteValueGenerator. +func NewRemoteValueGenerator() RemoteValueGenerator { + return RemoteValueGenerator{} +} + +// GenerateValue fetches random value from an external url. The input +// expression must be of the form "[GET:]". +func (g RemoteValueGenerator) GenerateValue(expression string) (interface{}, error) { + matches := remoteExp.FindAllStringIndex(expression, -1) + if len(matches) < 1 { + return expression, fmt.Errorf("no matches found.") + } + for _, r := range matches { + response, err := http.Get(expression[5 : len(expression)-1]) + if err != nil { + return "", err + } + defer response.Body.Close() + body, err := ioutil.ReadAll(response.Body) + if err != nil { + return "", err + } + expression = strings.Replace(expression, expression[r[0]:r[1]], strings.TrimSpace(string(body)), 1) + } + return expression, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/generator/examples/remotevalue_test.go b/vendor/github.com/openshift/library-go/pkg/template/generator/examples/remotevalue_test.go new file mode 100644 index 000000000..9106ddc0d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/generator/examples/remotevalue_test.go @@ -0,0 +1,36 @@ +package examples + +import ( + "fmt" + "net" + "net/http" + "testing" +) + +func TestRemoteValueGenerator(t *testing.T) { + generator := NewRemoteValueGenerator() + + _, err := generator.GenerateValue("[GET:http://api.example.com/new]") + if err == nil { + t.Errorf("Expected error while fetching non-existent remote.") + } +} + +func TestFakeRemoteValueGenerator(t *testing.T) { + // Run the fake remote server + http.HandleFunc("/v1/value/generate", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "NewRandomString") + }) + listener, _ := net.Listen("tcp", ":12345") + go http.Serve(listener, nil) + + generator := NewRemoteValueGenerator() + + value, err := generator.GenerateValue("[GET:http://127.0.0.1:12345/v1/value/generate]") + if err != nil { + t.Errorf(err.Error()) + } + if value != "NewRandomString" { + t.Errorf("Failed to fetch remote value using GET.") + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue.go b/vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue.go new file mode 100644 index 000000000..03579a64b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue.go @@ -0,0 +1,160 @@ +package generator + +import ( + "fmt" + "math/rand" + "regexp" + "strconv" + "strings" +) + +// ExpressionValueGenerator implements Generator interface. It generates +// random string based on the input expression. The input expression is +// a string, which may contain "[a-zA-Z0-9]{length}" constructs, +// defining range and length of the result random characters. +// +// Examples: +// +// from | value +// ----------------------------- +// "test[0-9]{1}x" | "test7x" +// "[0-1]{8}" | "01001100" +// "0x[A-F0-9]{4}" | "0xB3AF" +// "[a-zA-Z0-9]{8}" | "hW4yQU5i" +// +// TODO: Support more regexp constructs. +type ExpressionValueGenerator struct { + seed *rand.Rand +} + +const ( + Alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + Numerals = "0123456789" + Symbols = "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:`" + ASCII = Alphabet + Numerals + Symbols +) + +var ( + rangeExp = regexp.MustCompile(`([\\]?[a-zA-Z0-9]\-?[a-zA-Z0-9]?)`) + generatorsExp = regexp.MustCompile(`\[([a-zA-Z0-9\-\\]+)\](\{([0-9]+)\})`) + expressionExp = regexp.MustCompile(`\[(\\w|\\d|\\a|\\A)|([a-zA-Z0-9]\-[a-zA-Z0-9])+\]`) +) + +// NewExpressionValueGenerator creates new ExpressionValueGenerator. +func NewExpressionValueGenerator(seed *rand.Rand) ExpressionValueGenerator { + return ExpressionValueGenerator{seed: seed} +} + +// GenerateValue generates random string based on the input expression. +// The input expression is a pseudo-regex formatted string. See +// ExpressionValueGenerator for more details. +func (g ExpressionValueGenerator) GenerateValue(expression string) (interface{}, error) { + for { + r := generatorsExp.FindStringIndex(expression) + if r == nil { + break + } + ranges, length, err := rangesAndLength(expression[r[0]:r[1]]) + if err != nil { + return "", err + } + err = replaceWithGenerated( + &expression, + expression[r[0]:r[1]], + findExpressionPos(ranges), + length, + g.seed, + ) + if err != nil { + return "", err + } + } + return expression, nil +} + +// alphabetSlice produces a string slice that contains all characters within +// a specified range. +func alphabetSlice(from, to byte) (string, error) { + leftPos := strings.Index(ASCII, string(from)) + rightPos := strings.LastIndex(ASCII, string(to)) + if leftPos > rightPos { + return "", fmt.Errorf("invalid range specified: %s-%s", string(from), string(to)) + } + return ASCII[leftPos:rightPos], nil +} + +// replaceWithGenerated replaces all occurrences of the given expression +// in the string with random characters of the specified range and length. +func replaceWithGenerated(s *string, expression string, ranges [][]byte, length int, seed *rand.Rand) error { + var alphabet string + for _, r := range ranges { + switch string(r[0]) + string(r[1]) { + case `\w`: + alphabet += Alphabet + Numerals + "_" + case `\d`: + alphabet += Numerals + case `\a`: + alphabet += Alphabet + Numerals + case `\A`: + alphabet += Symbols + default: + slice, err := alphabetSlice(r[0], r[1]) + if err != nil { + return err + } + alphabet += slice + } + } + result := make([]byte, length) + alphabet = removeDuplicateChars(alphabet) + for i := 0; i < length; i++ { + result[i] = alphabet[seed.Intn(len(alphabet))] + } + *s = strings.Replace(*s, expression, string(result), 1) + return nil +} + +// removeDuplicateChars removes the duplicate characters from the data slice +func removeDuplicateChars(input string) string { + data := []byte(input) + length := len(data) - 1 + for i := 0; i < length; i++ { + for j := i + 1; j <= length; j++ { + if data[i] == data[j] { + data[j] = data[length] + data = data[0:length] + length-- + j-- + } + } + } + return string(data) +} + +// findExpressionPos searches the given string for the valid expressions +// and returns their corresponding indexes. +func findExpressionPos(s string) [][]byte { + matches := rangeExp.FindAllStringIndex(s, -1) + result := make([][]byte, len(matches)) + for i, r := range matches { + result[i] = []byte{s[r[0]], s[r[1]-1]} + } + return result +} + +// rangesAndLength extracts the expression ranges (eg. [A-Z0-9]) and length +// (eg. {3}). This helper function also validates the expression syntax and +// its length (must be within 1..255). +func rangesAndLength(s string) (string, int, error) { + expr := s[0:strings.LastIndex(s, "{")] + if !expressionExp.MatchString(expr) { + return "", 0, fmt.Errorf("malformed expresion syntax: %s", expr) + } + + length, _ := strconv.Atoi(s[strings.LastIndex(s, "{")+1 : len(s)-1]) + // TODO: We do need to set a better limit for the number of generated characters. + if length > 0 && length <= 255 { + return expr, length, nil + } + return "", 0, fmt.Errorf("range must be within [1-255] characters (%d)", length) +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue_test.go b/vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue_test.go new file mode 100644 index 000000000..bda53c94c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/generator/expressionvalue_test.go @@ -0,0 +1,73 @@ +package generator + +import ( + "math/rand" + "testing" +) + +func TestExpressionValueGenerator(t *testing.T) { + var tests = []struct { + Expression string + ExpectedValue string + }{ + {"test[A-Z0-9]{4}template", "testQ3HVtemplate"}, + {"[\\d]{3}", "889"}, + {"[\\w]{20}", "hiG4uRbcUDd5PEJLyHZ7"}, + {"[\\a]{10}", "4U390O49B9"}, + {"[\\A]{10}", ")^&-|_:[><"}, + {"strongPassword[\\w]{3}[\\A]{3}", "strongPasswordhiG-|_"}, + {"admin[0-9]{2}[A-Z]{2}", "admin78YB"}, + {"admin[0-9]{2}test[A-Z]{2}", "admin78testYB"}, + } + + for _, test := range tests { + generator := NewExpressionValueGenerator(rand.New(rand.NewSource(1337))) + value, err := generator.GenerateValue(test.Expression) + if err != nil { + t.Errorf("Failed to generate value from %s due to error: %v", test.Expression, err) + } + if value != test.ExpectedValue { + t.Errorf("Failed to generate expected value from %s\n. Generated: %s\n. Expected: %s\n", test.Expression, value, test.ExpectedValue) + } + } +} + +func TestRemoveDuplicatedCharacters(t *testing.T) { + var tests = []struct { + Expression string + ExpectedValue string + }{ + {"abcdefgh", "abcdefgh"}, + {"abcabc", "abc"}, + {"1111111", "1"}, + {"1234567890", "1234567890"}, + {"test@@", "tes@"}, + } + + for _, test := range tests { + result := removeDuplicateChars(test.Expression) + if result != test.ExpectedValue { + t.Errorf("Expected %q, got %q", test.ExpectedValue, result) + } + } +} + +func TestExpressionValueGeneratorErrors(t *testing.T) { + generator := NewExpressionValueGenerator(rand.New(rand.NewSource(1337))) + + if v, err := generator.GenerateValue("[ABC]{3}"); err == nil { + t.Errorf("Expected [ABC]{3} to produce malformed syntax error (returned: %s)", v) + } + + if v, err := generator.GenerateValue("[Z-A]{3}"); err == nil { + t.Errorf("Expected Invalid range specified error, got %s", v) + } + + if v, err := generator.GenerateValue("[A-Z]{300}"); err == nil { + t.Errorf("Expected Invalid range specified error, got %s", v) + } + + if v, err := generator.GenerateValue("[A-Z]{0}"); err == nil { + t.Errorf("Expected Invalid range specified error, got %s", v) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/generator/generator.go b/vendor/github.com/openshift/library-go/pkg/template/generator/generator.go new file mode 100644 index 000000000..6d3a08b2a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/generator/generator.go @@ -0,0 +1,7 @@ +package generator + +// Generator is an interface for generating random values +// from an input expression +type Generator interface { + GenerateValue(expression string) (interface{}, error) +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object.go b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object.go new file mode 100644 index 000000000..efe692f57 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object.go @@ -0,0 +1,120 @@ +package templateprocessing + +import ( + "encoding/json" + "fmt" + "reflect" + + "k8s.io/klog" +) + +// visitObjectStrings recursively visits all string fields in the object and calls the +// visitor function on them. The visitor function can be used to modify the +// value of the string fields. +func visitObjectStrings(obj interface{}, visitor func(string) (string, bool)) error { + return visitValue(reflect.ValueOf(obj), visitor) +} + +func visitValue(v reflect.Value, visitor func(string) (string, bool)) error { + // you'll never be able to substitute on a nil. Check the kind first or you'll accidentally + // end up panic-ing + switch v.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if v.IsNil() { + return nil + } + } + + switch v.Kind() { + + case reflect.Ptr, reflect.Interface: + err := visitValue(v.Elem(), visitor) + if err != nil { + return err + } + case reflect.Slice, reflect.Array: + vt := v.Type().Elem() + for i := 0; i < v.Len(); i++ { + val, err := visitUnsettableValues(vt, v.Index(i), visitor) + if err != nil { + return err + } + v.Index(i).Set(val) + } + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + err := visitValue(v.Field(i), visitor) + if err != nil { + return err + } + } + case reflect.Map: + vt := v.Type().Elem() + for _, oldKey := range v.MapKeys() { + newKey, err := visitUnsettableValues(oldKey.Type(), oldKey, visitor) + if err != nil { + return err + } + + oldValue := v.MapIndex(oldKey) + newValue, err := visitUnsettableValues(vt, oldValue, visitor) + if err != nil { + return err + } + v.SetMapIndex(oldKey, reflect.Value{}) + v.SetMapIndex(newKey, newValue) + } + case reflect.String: + if !v.CanSet() { + return fmt.Errorf("unable to set String value '%v'", v) + } + s, asString := visitor(v.String()) + if !asString { + return fmt.Errorf("attempted to set String field to non-string value '%v'", s) + } + v.SetString(s) + default: + klog.V(5).Infof("Ignoring non-parameterizable field type '%s': %v", v.Kind(), v) + return nil + } + return nil +} + +// visitUnsettableValues creates a copy of the object you want to modify and returns the modified result +func visitUnsettableValues(typeOf reflect.Type, original reflect.Value, visitor func(string) (string, bool)) (reflect.Value, error) { + val := reflect.New(typeOf).Elem() + existing := original + // if the value type is interface, we must resolve it to a concrete value prior to setting it back. + if existing.CanInterface() { + existing = reflect.ValueOf(existing.Interface()) + } + switch existing.Kind() { + case reflect.String: + s, asString := visitor(existing.String()) + + if asString { + val = reflect.ValueOf(s) + } else { + b := []byte(s) + var data interface{} + err := json.Unmarshal(b, &data) + if err != nil { + // the result of substitution may have been an unquoted string value, + // which is an error when decoding in json(only "true", "false", and numeric + // values can be unquoted), so try wrapping the value in quotes so it will be + // properly converted to a string type during decoding. + val = reflect.ValueOf(s) + } else { + val = reflect.ValueOf(data) + } + } + + default: + if existing.IsValid() && existing.Kind() != reflect.Invalid { + val.Set(existing) + } + visitValue(val, visitor) + } + + return val, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object_test.go b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object_test.go new file mode 100644 index 000000000..a8ef7d478 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/object_test.go @@ -0,0 +1,110 @@ +package templateprocessing + +import ( + "fmt" + "reflect" + "testing" +) + +type sampleInnerStruct struct { + Name string + Number int + List []string + Map map[string]string +} + +type sampleStruct struct { + Name string + Inner sampleInnerStruct + Ptr *sampleInnerStruct + MapInMap map[string]map[string]string + ArrayInArray [][]string + Array []string + ArrayInMap map[string][]interface{} +} + +func TestVisitObjectStringsOnStruct(t *testing.T) { + samples := [][]sampleStruct{ + {{}, {}}, + {{Name: "Foo"}, {Name: "sample-Foo"}}, + {{Ptr: nil}, {Ptr: nil}}, + {{Ptr: &sampleInnerStruct{Name: "foo"}}, {Ptr: &sampleInnerStruct{Name: "sample-foo"}}}, + {{Inner: sampleInnerStruct{Name: "foo"}}, {Inner: sampleInnerStruct{Name: "sample-foo"}}}, + {{Array: []string{"foo", "bar"}}, {Array: []string{"sample-foo", "sample-bar"}}}, + { + { + MapInMap: map[string]map[string]string{ + "foo": {"bar": "test"}, + }, + }, + { + MapInMap: map[string]map[string]string{ + "sample-foo": {"sample-bar": "sample-test"}, + }, + }, + }, + { + {ArrayInArray: [][]string{{"foo", "bar"}}}, + {ArrayInArray: [][]string{{"sample-foo", "sample-bar"}}}, + }, + { + {ArrayInMap: map[string][]interface{}{"key": {"foo", "bar"}}}, + {ArrayInMap: map[string][]interface{}{"sample-key": {"sample-foo", "sample-bar"}}}, + }, + } + for i := range samples { + visitObjectStrings(&samples[i][0], func(in string) (string, bool) { + if len(in) == 0 { + return in, true + } + return fmt.Sprintf("sample-%s", in), true + }) + if !reflect.DeepEqual(samples[i][0], samples[i][1]) { + t.Errorf("[%d] Got:\n%#v\nExpected:\n%#v", i, samples[i][0], samples[i][1]) + } + } +} + +func TestVisitObjectStringsOnMap(t *testing.T) { + samples := [][]map[string]string{ + { + {"foo": "bar"}, + {"sample-foo": "sample-bar"}, + }, + { + {"empty": ""}, + {"sample-empty": "sample-"}, + }, + { + {"": "invalid"}, + {"sample-": "sample-invalid"}, + }, + } + + for i := range samples { + visitObjectStrings(&samples[i][0], func(in string) (string, bool) { + return fmt.Sprintf("sample-%s", in), true + }) + if !reflect.DeepEqual(samples[i][0], samples[i][1]) { + t.Errorf("Got %#v, expected %#v", samples[i][0], samples[i][1]) + } + } +} + +func TestVisitObjectStringsOnArray(t *testing.T) { + samples := [][][]string{ + { + {"foo", "bar"}, + {"sample-foo", "sample-bar"}, + }, + } + + for i := range samples { + visitObjectStrings(&samples[i][0], func(in string) (string, bool) { + return fmt.Sprintf("sample-%s", in), true + }) + if !reflect.DeepEqual(samples[i][0], samples[i][1]) { + t.Errorf("Got %#v, expected %#v", samples[i][0], samples[i][1]) + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template.go b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template.go new file mode 100644 index 000000000..497c0e399 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template.go @@ -0,0 +1,295 @@ +package templateprocessing + +import ( + "fmt" + "regexp" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + + appsv1 "github.com/openshift/api/apps/v1" + templatev1 "github.com/openshift/api/template/v1" + "github.com/openshift/library-go/pkg/legacyapi/legacygroupification" + . "github.com/openshift/library-go/pkg/template/generator" +) + +// match ${KEY}, KEY will be grouped +var stringParameterExp = regexp.MustCompile(`\$\{([a-zA-Z0-9\_]+?)\}`) + +// match ${{KEY}} exact match only, KEY will be grouped +var nonStringParameterExp = regexp.MustCompile(`^\$\{\{([a-zA-Z0-9\_]+)\}\}$`) + +// Processor process the Template into the List with substituted parameters +type Processor struct { + Generators map[string]Generator +} + +// NewProcessor creates new Processor and initializv1es its set of generators. +func NewProcessor(generators map[string]Generator) *Processor { + return &Processor{Generators: generators} +} + +// Process transforms Template object into List object. It generates +// Parameter values using the defined set of generators first, and then it +// substitutes all Parameter expression occurrences with their corresponding +// values (currently in the containers' Environment variables only). +func (p *Processor) Process(template *templatev1.Template) field.ErrorList { + templateErrors := field.ErrorList{} + + if errs := p.GenerateParameterValues(template); len(errs) > 0 { + return append(templateErrors, errs...) + } + + // Place parameters into a map for efficient lookup + paramMap := make(map[string]templatev1.Parameter) + for _, param := range template.Parameters { + paramMap[param.Name] = param + } + + // Perform parameter substitution on the template's user message. This can be used to + // instruct a user on next steps for the template. + template.Message, _ = p.EvaluateParameterSubstitution(paramMap, template.Message) + + // substitute parameters in ObjectLabels - must be done before the template + // objects themselves are iterated. + for k, v := range template.ObjectLabels { + newk, _ := p.EvaluateParameterSubstitution(paramMap, k) + v, _ = p.EvaluateParameterSubstitution(paramMap, v) + template.ObjectLabels[newk] = v + + if newk != k { + delete(template.ObjectLabels, k) + } + } + + itemPath := field.NewPath("item") + for i, item := range template.Objects { + idxPath := itemPath.Index(i) + var currObj runtime.Object + + if len(item.Raw) > 0 { + // TODO: use runtime.DecodeList when it returns ValidationErrorList + decodedObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, item.Raw) + if err != nil { + templateErrors = append(templateErrors, field.Invalid(idxPath.Child("objects"), item, fmt.Sprintf("unable to handle object: %v", err))) + continue + } + currObj = decodedObj + } else { + currObj = item.Object.DeepCopyObject() + } + + // If an object definition's metadata includes a hardcoded namespace field, the field will be stripped out of + // the definition during template instantiation. Namespace fields that contain a ${PARAMETER_REFERENCE} + // will be left in place, resolved during parameter substition, and the object will be created in the + // referenced namespace. + stripNamespace(currObj) + + newItem, err := p.SubstituteParameters(paramMap, currObj) + if err != nil { + templateErrors = append(templateErrors, field.Invalid(idxPath.Child("parameters"), template.Parameters, err.Error())) + } + + // this changes oapi GVKs to groupified GVKs so they can be submitted to modern, aggregated servers + // It is done after substitution in case someone substitutes a kind. + gvk := currObj.GetObjectKind().GroupVersionKind() + legacygroupification.OAPIToGroupifiedGVK(&gvk) + newItem.GetObjectKind().SetGroupVersionKind(gvk) + + if err := addObjectLabels(newItem, template.ObjectLabels); err != nil { + templateErrors = append(templateErrors, field.Invalid(idxPath.Child("labels"), + template.ObjectLabels, fmt.Sprintf("label could not be applied: %v", err))) + } + template.Objects[i] = runtime.RawExtension{Object: newItem} + } + + return templateErrors +} + +func stripNamespace(obj runtime.Object) { + // Remove namespace from the item unless it contains a ${PARAMETER_REFERENCE} + if itemMeta, err := meta.Accessor(obj); err == nil && len(itemMeta.GetNamespace()) > 0 && !stringParameterExp.MatchString(itemMeta.GetNamespace()) { + itemMeta.SetNamespace("") + return + } + // TODO: allow meta.Accessor to handle runtime.Unstructured + if unstruct, ok := obj.(*unstructured.Unstructured); ok && unstruct.Object != nil { + if obj, ok := unstruct.Object["metadata"]; ok { + if m, ok := obj.(map[string]interface{}); ok { + if ns, ok := m["namespace"]; ok { + if ns, ok := ns.(string); !ok || !stringParameterExp.MatchString(ns) { + m["namespace"] = "" + } + } + } + return + } + if ns, ok := unstruct.Object["namespace"]; ok { + if ns, ok := ns.(string); !ok || !stringParameterExp.MatchString(ns) { + unstruct.Object["namespace"] = "" + return + } + } + } +} + +// GetParameterByName searches for a Parameter in the Template +// based on its name. +func GetParameterByName(t *templatev1.Template, name string) *templatev1.Parameter { + for i, param := range t.Parameters { + if param.Name == name { + return &(t.Parameters[i]) + } + } + return nil +} + +// EvaluateParameterSubstitution replaces escaped parameters in a string with values from the +// provided map. Returns the substituted value (if any substitution applied) and a boolean +// indicating if the resulting value should be treated as a string(true) or a non-string +// value(false) for purposes of json encoding. +func (p *Processor) EvaluateParameterSubstitution(params map[string]templatev1.Parameter, in string) (string, bool) { + out := in + // First check if the value matches the "${{KEY}}" substitution syntax, which + // means replace and drop the quotes because the parameter value is to be used + // as a non-string value. If we hit a match here, we're done because the + // "${{KEY}}" syntax is exact match only, it cannot be used in a value like + // "FOO_${{KEY}}_BAR", no substitution will be performed if it is used in that way. + for _, match := range nonStringParameterExp.FindAllStringSubmatch(in, -1) { + if len(match) > 1 { + if paramValue, found := params[match[1]]; found { + out = strings.Replace(out, match[0], paramValue.Value, 1) + return out, false + } + } + } + + // If we didn't do a non-string substitution above, do normal string substitution + // on the value here if it contains a "${KEY}" reference. This substitution does + // allow multiple matches and prefix/postfix, eg "FOO_${KEY1}_${KEY2}_BAR" + for _, match := range stringParameterExp.FindAllStringSubmatch(in, -1) { + if len(match) > 1 { + if paramValue, found := params[match[1]]; found { + out = strings.Replace(out, match[0], paramValue.Value, 1) + } + } + } + return out, true +} + +// SubstituteParameters loops over all values defined in structured +// and unstructured types that are children of item. +// +// Example of Parameter expression: +// - ${PARAMETER_NAME} +// +func (p *Processor) SubstituteParameters(params map[string]templatev1.Parameter, item runtime.Object) (runtime.Object, error) { + visitObjectStrings(item, func(in string) (string, bool) { + return p.EvaluateParameterSubstitution(params, in) + }) + return item, nil +} + +// GenerateParameterValues generates Value for each Parameter of the given +// Template that has Generate field specified where Value is not already +// supplied. +// +// Examples: +// +// from | value +// ----------------------------- +// "test[0-9]{1}x" | "test7x" +// "[0-1]{8}" | "01001100" +// "0x[A-F0-9]{4}" | "0xB3AF" +// "[a-zA-Z0-9]{8}" | "hW4yQU5i" +// If an error occurs, the parameter that caused the error is returned along with the error message. +func (p *Processor) GenerateParameterValues(t *templatev1.Template) field.ErrorList { + var errs field.ErrorList + + for i := range t.Parameters { + param := &t.Parameters[i] + if len(param.Value) > 0 { + continue + } + templatePath := field.NewPath("template").Child("parameters").Index(i) + if param.Generate != "" { + generator, ok := p.Generators[param.Generate] + if !ok { + err := fmt.Errorf("Unknown generator name '%v' for parameter %s", param.Generate, param.Name) + errs = append(errs, field.Invalid(templatePath, param.Generate, err.Error())) + continue + } + if generator == nil { + err := fmt.Errorf("template.parameters[%v]: Invalid '%v' generator for parameter %s", i, param.Generate, param.Name) + errs = append(errs, field.Invalid(templatePath, param, err.Error())) + continue + } + value, err := generator.GenerateValue(param.From) + if err != nil { + errs = append(errs, field.Invalid(templatePath, param, err.Error())) + continue + } + param.Value, ok = value.(string) + if !ok { + err := fmt.Errorf("template.parameters[%v]: Unable to convert the generated value '%#v' to string for parameter %s", i, value, param.Name) + errs = append(errs, field.Invalid(templatePath, param, err.Error())) + continue + } + } + if len(param.Value) == 0 && param.Required { + err := fmt.Errorf("template.parameters[%v]: parameter %s is required and must be specified", i, param.Name) + errs = append(errs, field.Required(templatePath, err.Error())) + } + } + + return errs +} + +// addObjectLabels adds new label(s) to a single runtime.Object, overwriting +// existing labels that have the same key. +func addObjectLabels(obj runtime.Object, labels labels.Set) error { + if labels == nil { + return nil + } + + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + + metaLabels := accessor.GetLabels() + if metaLabels == nil { + metaLabels = make(map[string]string) + } + for k, v := range labels { + metaLabels[k] = v + } + accessor.SetLabels(metaLabels) + + switch objType := obj.(type) { + case *appsv1.DeploymentConfig: + if err := addDeploymentConfigNestedLabels(objType, labels); err != nil { + return fmt.Errorf("unable to add nested labels to %s/%s: %v", obj.GetObjectKind().GroupVersionKind(), accessor.GetName(), err) + } + } + + return nil +} + +// addDeploymentConfigNestedLabels adds new label(s) to a nested labels of a single DeploymentConfig object +func addDeploymentConfigNestedLabels(obj *appsv1.DeploymentConfig, labels labels.Set) error { + if obj.Spec.Template == nil { + return nil + } + if obj.Spec.Template.Labels == nil { + obj.Spec.Template.Labels = make(map[string]string) + } + for k, v := range labels { + obj.Spec.Template.Labels[k] = v + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template_test.go b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template_test.go new file mode 100644 index 000000000..21d2ced92 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/template_test.go @@ -0,0 +1,589 @@ +package templateprocessing + +import ( + "fmt" + "io/ioutil" + "math/rand" + "reflect" + "regexp" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/apitesting" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/validation/field" + + appsv1 "github.com/openshift/api/apps/v1" + templatev1 "github.com/openshift/api/template/v1" + "github.com/openshift/library-go/pkg/template/generator" +) + +var codecFactory = serializer.CodecFactory{} + +func init() { + _, codecFactory = apitesting.SchemeForOrDie(templatev1.Install) +} + +func makeParameter(name, value, generate string, required bool) templatev1.Parameter { + return templatev1.Parameter{ + Name: name, + Value: value, + Generate: generate, + Required: required, + } +} + +type FooGenerator struct { +} + +func (g FooGenerator) GenerateValue(expression string) (interface{}, error) { + return "foo", nil +} + +type ErrorGenerator struct { +} + +func (g ErrorGenerator) GenerateValue(expression string) (interface{}, error) { + return "", fmt.Errorf("error") +} + +type NoStringGenerator struct { +} + +func (g NoStringGenerator) GenerateValue(expression string) (interface{}, error) { + return NoStringGenerator{}, nil +} + +type EmptyGenerator struct { +} + +func (g EmptyGenerator) GenerateValue(expression string) (interface{}, error) { + return "", nil +} + +func TestParameterGenerators(t *testing.T) { + tests := []struct { + parameter templatev1.Parameter + generators map[string]generator.Generator + shouldPass bool + expected templatev1.Parameter + errType field.ErrorType + fieldPath string + }{ + { // Empty generator, should pass + makeParameter("PARAM-pass-empty-gen", "X", "", false), + map[string]generator.Generator{}, + true, + makeParameter("PARAM-pass-empty-gen", "X", "", false), + "", + "", + }, + { // Foo generator, should pass + makeParameter("PARAM-pass-foo-gen", "", "foo", false), + map[string]generator.Generator{"foo": FooGenerator{}}, + true, + makeParameter("PARAM-pass-foo-gen", "foo", "", false), + "", + "", + }, + { // Foo generator, should fail + makeParameter("PARAM-fail-foo-gen", "", "foo", false), + map[string]generator.Generator{}, + false, + makeParameter("PARAM-fail-foo-gen", "foo", "", false), + field.ErrorTypeInvalid, + "template.parameters[0]", + }, + { // No str generator, should fail + makeParameter("PARAM-fail-nostr-gen", "", "foo", false), + map[string]generator.Generator{"foo": NoStringGenerator{}}, + false, + makeParameter("PARAM-fail-nostr-gen", "foo", "", false), + field.ErrorTypeInvalid, + "template.parameters[0]", + }, + { // Invalid generator, should fail + makeParameter("PARAM-fail-inv-gen", "", "invalid", false), + map[string]generator.Generator{"invalid": nil}, + false, + makeParameter("PARAM-fail-inv-gen", "", "invalid", false), + field.ErrorTypeInvalid, + "template.parameters[0]", + }, + { // Error generator, should fail + makeParameter("PARAM-fail-err-gen", "", "error", false), + map[string]generator.Generator{"error": ErrorGenerator{}}, + false, + makeParameter("PARAM-fail-err-gen", "", "error", false), + field.ErrorTypeInvalid, + "template.parameters[0]", + }, + { // Error required parameter, no value, should fail + makeParameter("PARAM-fail-no-val", "", "", true), + map[string]generator.Generator{"error": ErrorGenerator{}}, + false, + makeParameter("PARAM-fail-no-val", "", "", true), + field.ErrorTypeRequired, + "template.parameters[0]", + }, + { // Error required parameter, no value from generator, should fail + makeParameter("PARAM-fail-no-val-from-gen", "", "empty", true), + map[string]generator.Generator{"empty": EmptyGenerator{}}, + false, + makeParameter("PARAM-fail-no-val-from-gen", "", "empty", true), + field.ErrorTypeRequired, + "template.parameters[0]", + }, + } + + for i, test := range tests { + processor := NewProcessor(test.generators) + template := templatev1.Template{Parameters: []templatev1.Parameter{test.parameter}} + errs := processor.GenerateParameterValues(&template) + if errs != nil && test.shouldPass { + t.Errorf("test[%v]: Unexpected error %v", i, errs) + } + if errs == nil && !test.shouldPass { + t.Errorf("test[%v]: Expected error", i) + } + if errs != nil { + if test.errType != errs[0].Type { + t.Errorf("test[%v]: Unexpected error type: Expected: %s, got %s", i, test.errType, errs[0].Type) + } + if test.fieldPath != errs[0].Field { + t.Errorf("test[%v]: Unexpected error type: Expected: %s, got %s", i, test.fieldPath, errs[0].Field) + } + continue + } + actual := template.Parameters[0] + if actual.Value != test.expected.Value { + t.Errorf("test[%v]: Unexpected value: Expected: %#v, got: %#v", i, test.expected.Value, test.parameter.Value) + } + } +} + +func TestProcessValue(t *testing.T) { + var template templatev1.Template + if err := runtime.DecodeInto(codecFactory.UniversalDecoder(), []byte(`{ + "kind":"Template", "apiVersion":"template.openshift.io/v1", + "objects": [ + { + "kind": "Service", "apiVersion": "v${VALUE}", + "metadata": { + "labels": { + "i1": "${{INT_1}}", + "invalidjsonmap": "${{INVALID_JSON_MAP}}", + "invalidjsonarray": "${{INVALID_JSON_ARRAY}}", + "key1": "${VALUE}", + "key2": "$${VALUE}", + "quoted_string": "${{STRING_1}}", + "s1_s1": "${STRING_1}_${STRING_1}", + "s1_s2": "${STRING_1}_${STRING_2}", + "untouched": "a${{INT_1}}", + "untouched2": "${{INT_1}}a", + "untouched3": "${{INVALID_PARAMETER}}", + "untouched4": "${{INVALID PARAMETER}}", + "validjsonmap": "${{VALID_JSON_MAP}}", + "validjsonarray": "${{VALID_JSON_ARRAY}}" + + } + } + } + ] + }`), &template); err != nil { + t.Fatalf("unexpected error: %v", err) + } + generators := map[string]generator.Generator{ + "expression": generator.NewExpressionValueGenerator(rand.New(rand.NewSource(1337))), + } + processor := NewProcessor(generators) + + // Define custom parameter for the transformation: + addParameter(&template, makeParameter("VALUE", "1", "", false)) + addParameter(&template, makeParameter("STRING_1", "string1", "", false)) + addParameter(&template, makeParameter("STRING_2", "string2", "", false)) + addParameter(&template, makeParameter("INT_1", "1", "", false)) + addParameter(&template, makeParameter("VALID_JSON_MAP", "{\"key\":\"value\"}", "", false)) + addParameter(&template, makeParameter("INVALID_JSON_MAP", "{\"key\":\"value\"", "", false)) + addParameter(&template, makeParameter("VALID_JSON_ARRAY", "[\"key\",\"value\"]", "", false)) + addParameter(&template, makeParameter("INVALID_JSON_ARRAY", "[\"key\":\"value\"", "", false)) + + // Transform the template config into the result config + errs := processor.Process(&template) + if len(errs) > 0 { + t.Fatalf("unexpected error: %v", errs) + } + result, err := runtime.Encode(codecFactory.LegacyCodec(templatev1.GroupVersion), &template) + if err != nil { + t.Fatalf("unexpected error during encoding Config: %#v", err) + } + expect := `{"kind":"Template","apiVersion":"template.openshift.io/v1","metadata":{"creationTimestamp":null},"objects":[{"apiVersion":"v1","kind":"Service","metadata":{"labels":{"i1":1,"invalidjsonarray":"[\"key\":\"value\"","invalidjsonmap":"{\"key\":\"value\"","key1":"1","key2":"$1","quoted_string":"string1","s1_s1":"string1_string1","s1_s2":"string1_string2","untouched":"a${{INT_1}}","untouched2":"${{INT_1}}a","untouched3":"${{INVALID_PARAMETER}}","untouched4":"${{INVALID PARAMETER}}","validjsonarray":["key","value"],"validjsonmap":{"key":"value"}}}}],"parameters":[{"name":"VALUE","value":"1"},{"name":"STRING_1","value":"string1"},{"name":"STRING_2","value":"string2"},{"name":"INT_1","value":"1"},{"name":"VALID_JSON_MAP","value":"{\"key\":\"value\"}"},{"name":"INVALID_JSON_MAP","value":"{\"key\":\"value\""},{"name":"VALID_JSON_ARRAY","value":"[\"key\",\"value\"]"},{"name":"INVALID_JSON_ARRAY","value":"[\"key\":\"value\""}]}` + stringResult := strings.TrimSpace(string(result)) + if expect != stringResult { + //t.Errorf("unexpected output, expected: \n%s\nGot:\n%s\n", expect, stringResult) + t.Errorf("unexpected output: %s", diff.StringDiff(expect, stringResult)) + } +} + +var trailingWhitespace = regexp.MustCompile(`\n\s*`) + +func TestEvaluateLabels(t *testing.T) { + testCases := map[string]struct { + Input string + Output string + Labels map[string]string + }{ + "no labels": { + Input: `{ + "kind":"Template", "apiVersion":"template.openshift.io/v1", + "objects": [ + { + "kind": "Service", "apiVersion": "v1", + "metadata": {"labels": {"key1": "v1", "key2": "v2"} } + } + ] + }`, + Output: `{ + "kind":"Template","apiVersion":"template.openshift.io/v1","metadata":{"creationTimestamp":null}, + "objects":[ + { + "apiVersion":"v1","kind":"Service","metadata":{ + "labels":{"key1":"v1","key2":"v2"}} + } + ] + }`, + }, + "one different label": { + Input: `{ + "kind":"Template", "apiVersion":"template.openshift.io/v1", + "objects": [ + { + "kind": "Service", "apiVersion": "v1", + "metadata": {"labels": {"key1": "v1", "key2": "v2"} } + } + ] + }`, + Output: `{ + "kind":"Template","apiVersion":"template.openshift.io/v1","metadata":{"creationTimestamp":null}, + "objects":[ + { + "apiVersion":"v1","kind":"Service","metadata":{ + "labels":{"key1":"v1","key2":"v2","key3":"v3"}} + } + ], + "labels":{"key3":"v3"} + }`, + Labels: map[string]string{"key3": "v3"}, + }, + "when the root object has labels and metadata": { + Input: `{ + "kind":"Template", "apiVersion":"template.openshift.io/v1", + "objects": [ + { + "kind": "Service", "apiVersion": "v1", + "metadata": {}, + "labels": { + "key1": "v1", + "key2": "v2" + } + } + ] + }`, + Output: `{ + "kind":"Template","apiVersion":"template.openshift.io/v1","metadata":{"creationTimestamp":null}, + "objects":[ + { + "apiVersion":"v1","kind":"Service", + "labels":{"key1":"v1","key2":"v2"}, + "metadata":{"labels":{"key3":"v3"}} + } + ], + "labels":{"key3":"v3"} + }`, + Labels: map[string]string{"key3": "v3"}, + }, + "overwrites label": { + Input: `{ + "kind":"Template", "apiVersion":"template.openshift.io/v1", + "objects": [ + { + "kind": "Service", "apiVersion": "v1", + "metadata": {"labels": {"key1": "v1", "key2": "v2"} } + } + ] + }`, + Output: `{ + "kind":"Template","apiVersion":"template.openshift.io/v1","metadata":{"creationTimestamp":null}, + "objects":[ + { + "apiVersion":"v1","kind":"Service","metadata":{ + "labels":{"key1":"v1","key2":"v3"}} + } + ], + "labels":{"key2":"v3"} + }`, + Labels: map[string]string{"key2": "v3"}, + }, + "parameterised labels": { + Input: `{ + "kind":"Template", "apiVersion":"template.openshift.io/v1", + "objects": [ + { + "kind": "Service", "apiVersion": "v1", + "metadata": {"labels": {"key1": "v1", "key2": "v2"}} + } + ], + "parameters": [ + { + "name": "KEY", + "value": "key" + }, + { + "name": "VALUE", + "value": "value" + } + ] + }`, + Output: `{ + "kind":"Template","apiVersion":"template.openshift.io/v1","metadata":{"creationTimestamp":null}, + "objects":[ + { + "apiVersion":"v1","kind":"Service","metadata":{ + "labels":{"key":"value","key1":"v1","key2":"v2"}} + } + ], + "parameters":[ + { + "name":"KEY", + "value":"key" + }, + { + "name":"VALUE", + "value":"value" + } + ], + "labels":{"key":"value"} + }`, + Labels: map[string]string{"${KEY}": "${VALUE}"}, + }, + } + + for k, testCase := range testCases { + var template templatev1.Template + if err := runtime.DecodeInto(codecFactory.UniversalDecoder(), []byte(testCase.Input), &template); err != nil { + t.Errorf("%s: unexpected error: %v", k, err) + continue + } + + generators := map[string]generator.Generator{ + "expression": generator.NewExpressionValueGenerator(rand.New(rand.NewSource(1337))), + } + processor := NewProcessor(generators) + + template.ObjectLabels = testCase.Labels + + // Transform the template config into the result config + errs := processor.Process(&template) + if len(errs) > 0 { + t.Errorf("%s: unexpected error: %v", k, errs) + continue + } + result, err := runtime.Encode(codecFactory.LegacyCodec(templatev1.GroupVersion), &template) + if err != nil { + t.Errorf("%s: unexpected error: %v", k, err) + continue + } + expect := testCase.Output + expect = trailingWhitespace.ReplaceAllString(expect, "") + stringResult := strings.TrimSpace(string(result)) + if expect != stringResult { + t.Errorf("%s: unexpected output: %s", k, diff.StringDiff(expect, stringResult)) + continue + } + } +} + +func TestProcessTemplateParameters(t *testing.T) { + var template, expectedTemplate templatev1.Template + jsonData, _ := ioutil.ReadFile("testdata/guestbook.json") + if err := runtime.DecodeInto(codecFactory.UniversalDecoder(), jsonData, &template); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + expectedData, _ := ioutil.ReadFile("testdata/guestbook_list.json") + if err := runtime.DecodeInto(codecFactory.UniversalDecoder(), expectedData, &expectedTemplate); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + generators := map[string]generator.Generator{ + "expression": generator.NewExpressionValueGenerator(rand.New(rand.NewSource(1337))), + } + processor := NewProcessor(generators) + + // Define custom parameter for the transformation: + addParameter(&template, makeParameter("CUSTOM_PARAM1", "1", "", false)) + + // Transform the template config into the result config + errs := processor.Process(&template) + if len(errs) > 0 { + t.Fatalf("unexpected error: %v", errs) + } + result, err := runtime.Encode(codecFactory.LegacyCodec(templatev1.GroupVersion), &template) + if err != nil { + t.Fatalf("unexpected error during encoding Config: %#v", err) + } + exp, _ := runtime.Encode(codecFactory.LegacyCodec(templatev1.GroupVersion), &expectedTemplate) + + if string(result) != string(exp) { + t.Errorf("unexpected output: %s", diff.StringDiff(string(exp), string(result))) + } +} + +// addParameter adds new custom parameter to the Template. It overrides +// the existing parameter, if already defined. +func addParameter(t *templatev1.Template, param templatev1.Parameter) { + if existing := GetParameterByName(t, param.Name); existing != nil { + *existing = param + } else { + t.Parameters = append(t.Parameters, param) + } +} + +func TestAddConfigLabels(t *testing.T) { + var nilLabels map[string]string + + testCases := []struct { + obj runtime.Object + addLabels map[string]string + err bool + expectedLabels map[string]string + }{ + { // [0] Test nil + nil => nil + obj: &corev1.Pod{}, + addLabels: nilLabels, + err: false, + expectedLabels: nilLabels, + }, + { // [1] Test nil + empty labels => empty labels + obj: &corev1.Pod{}, + addLabels: map[string]string{}, + err: false, + expectedLabels: map[string]string{}, + }, + { // [2] Test obj.Labels + nil => obj.Labels + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + }, + addLabels: nilLabels, + err: false, + expectedLabels: map[string]string{"foo": "bar"}, + }, + { // [3] Test obj.Labels + empty labels => obj.Labels + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}, + }, + addLabels: map[string]string{}, + err: false, + expectedLabels: map[string]string{"foo": "bar"}, + }, + { // [4] Test nil + addLabels => addLabels + obj: &corev1.Pod{}, + addLabels: map[string]string{"foo": "bar"}, + err: false, + expectedLabels: map[string]string{"foo": "bar"}, + }, + { // [5] Test obj.labels + addLabels => expectedLabels + obj: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"baz": ""}}, + }, + addLabels: map[string]string{"foo": "bar"}, + err: false, + expectedLabels: map[string]string{"foo": "bar", "baz": ""}, + }, + { // [6] Test conflicting keys with the same value + obj: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "same value"}}, + }, + addLabels: map[string]string{"foo": "same value"}, + err: false, + expectedLabels: map[string]string{"foo": "same value"}, + }, + { // [7] Test conflicting keys with a different value + obj: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "first value"}}, + }, + addLabels: map[string]string{"foo": "second value"}, + err: false, + expectedLabels: map[string]string{"foo": "second value"}, + }, + { // [8] Test conflicting keys with the same value in ReplicationController nested labels + obj: &corev1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "same value"}, + }, + Spec: corev1.ReplicationControllerSpec{ + Template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{}, + }, + }, + }, + }, + addLabels: map[string]string{"foo": "same value"}, + err: false, + expectedLabels: map[string]string{"foo": "same value"}, + }, + { // [9] Test adding labels to a DeploymentConfig object + obj: &appsv1.DeploymentConfig{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "first value"}, + }, + Spec: appsv1.DeploymentConfigSpec{ + Template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"foo": "first value"}, + }, + }, + }, + }, + addLabels: map[string]string{"bar": "second value"}, + err: false, + expectedLabels: map[string]string{"foo": "first value", "bar": "second value"}, + }, + } + + for i, test := range testCases { + err := addObjectLabels(test.obj, test.addLabels) + if err != nil && !test.err { + t.Errorf("Unexpected error while setting labels on testCase[%v]: %v.", i, err) + } else if err == nil && test.err { + t.Errorf("Unexpected non-error while setting labels on testCase[%v].", i) + } + + accessor, err := meta.Accessor(test.obj) + if err != nil { + t.Error(err) + } + metaLabels := accessor.GetLabels() + if e, a := test.expectedLabels, metaLabels; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected labels on testCase[%v]. Expected: %#v, got: %#v.", i, e, a) + } + + // must not add any new nested labels + switch objType := test.obj.(type) { + case *corev1.ReplicationController: + if e, a := map[string]string{}, objType.Spec.Template.Labels; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected labels on testCase[%v]. Expected: %#v, got: %#v.", i, e, a) + } + case *appsv1.DeploymentConfig: + if e, a := test.expectedLabels, objType.Spec.Template.Labels; !reflect.DeepEqual(e, a) { + t.Errorf("Unexpected labels on testCase[%v]. Expected: %#v, got: %#v.", i, e, a) + } + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/testdata/guestbook.json b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/testdata/guestbook.json new file mode 100644 index 000000000..146ecc87a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/testdata/guestbook.json @@ -0,0 +1,305 @@ +{ + "kind": "Template", + "apiVersion": "template.openshift.io/v1", + "metadata": { + "name": "guestbook-example", + "creationTimestamp": null, + "annotations": { + "openshift.io/display-name": "Guestbook Example", + "description": "Example shows how to build a simple multi-tier application using Kubernetes and Docker" + } + }, + "message": "Your admin credentials are ${ADMIN_USERNAME}:${ADMIN_PASSWORD}", + "objects": [ + { + "kind": "Route", + "apiVersion": "v1", + "metadata": { + "name": "frontend-route", + "creationTimestamp": null + }, + "spec": { + "host": "guestbook.example.com", + "to": { + "kind": "Service", + "name": "frontend-service" + } + }, + "status": {} + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "frontend-service", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "protocol": "TCP", + "port": 5432, + "targetPort": 5432, + "nodePort": 0 + } + ], + "selector": { + "name": "frontend-service" + }, + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "redis-master", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "protocol": "TCP", + "port": 10000, + "targetPort": 10000, + "nodePort": 0 + } + ], + "selector": { + "name": "redis-master" + }, + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "Service", + "apiVersion": "v1", + "metadata": { + "name": "${SLAVE_SERVICE_NAME}", + "creationTimestamp": null + }, + "spec": { + "ports": [ + { + "protocol": "TCP", + "port": 10001, + "targetPort": 10001, + "nodePort": 0 + } + ], + "selector": { + "name": "${SLAVE_SERVICE_NAME}" + }, + "type": "ClusterIP", + "sessionAffinity": "None" + }, + "status": { + "loadBalancer": {} + } + }, + { + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "redis-master", + "creationTimestamp": null, + "labels": { + "name": "redis-master" + } + }, + "spec": { + "containers": [ + { + "name": "master", + "image": "dockerfile/redis", + "ports": [ + { + "containerPort": 6379, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "REDIS_PASSWORD", + "value": "${REDIS_PASSWORD}" + } + ], + "resources": {}, + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst", + "serviceAccount": "" + }, + "status": {} + }, + { + "kind": "ReplicationController", + "apiVersion": "v1", + "metadata": { + "name": "guestbook", + "creationTimestamp": null, + "labels": { + "name": "frontend-service" + } + }, + "spec": { + "replicas": 3, + "selector": { + "name": "frontend-service" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "frontend-service" + } + }, + "spec": { + "containers": [ + { + "name": "php-redis", + "image": "brendanburns/php-redis", + "ports": [ + { + "hostPort": 8000, + "containerPort": 80, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "ADMIN_USERNAME", + "value": "${ADMIN_USERNAME}" + }, + { + "name": "ADMIN_PASSWORD", + "value": "${ADMIN_PASSWORD}" + }, + { + "name": "REDIS_PASSWORD", + "value": "${REDIS_PASSWORD}" + } + ], + "resources": {}, + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst", + "serviceAccount": "" + } + } + }, + "status": { + "replicas": 0 + } + }, + { + "kind": "ReplicationController", + "apiVersion": "v1", + "metadata": { + "name": "${SLAVE_SERVICE_NAME}", + "creationTimestamp": null, + "labels": { + "name": "${SLAVE_SERVICE_NAME}" + } + }, + "spec": { + "replicas": 2, + "selector": { + "name": "${SLAVE_SERVICE_NAME}" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "${SLAVE_SERVICE_NAME}" + } + }, + "spec": { + "containers": [ + { + "name": "slave", + "image": "brendanburns/${SLAVE_SERVICE_NAME}", + "ports": [ + { + "hostPort": 6380, + "containerPort": 6379, + "protocol": "TCP" + } + ], + "env": [ + { + "name": "REDIS_PASSWORD", + "value": "${REDIS_PASSWORD}" + } + ], + "resources": {}, + "terminationMessagePath": "/dev/termination-log", + "imagePullPolicy": "IfNotPresent", + "capabilities": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + } + } + ], + "restartPolicy": "Always", + "dnsPolicy": "ClusterFirst", + "serviceAccount": "" + } + } + }, + "status": { + "replicas": 0 + } + } + ], + "parameters": [ + { + "name": "ADMIN_USERNAME", + "description": "Guestbook administrator username", + "generate": "expression", + "from": "admin[A-Z0-9]{3}" + }, + { + "name": "ADMIN_PASSWORD", + "description": "Guestbook administrator password", + "generate": "expression", + "from": "[a-zA-Z0-9]{8}" + }, + { + "name": "REDIS_PASSWORD", + "description": "Redis password", + "generate": "expression", + "from": "[a-zA-Z0-9]{8}" + }, + { + "name": "SLAVE_SERVICE_NAME", + "description": "Slave Service name", + "value": "redis-slave" + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/testdata/guestbook_list.json b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/testdata/guestbook_list.json new file mode 100644 index 000000000..d2bdcd7aa --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/templateprocessing/testdata/guestbook_list.json @@ -0,0 +1,312 @@ +{ + "kind": "Template", + "apiVersion": "template.openshift.io/v1", + "metadata": { + "name": "guestbook-example", + "creationTimestamp": null, + "annotations": { + "openshift.io/display-name": "Guestbook Example", + "description": "Example shows how to build a simple multi-tier application using Kubernetes and Docker" + } + }, + "message": "Your admin credentials are adminQ3H:dwNJiJwW", + "objects": [ + { + "apiVersion": "route.openshift.io/v1", + "kind": "Route", + "metadata": { + "creationTimestamp": null, + "name": "frontend-route" + }, + "spec": { + "host": "guestbook.example.com", + "to": { + "kind": "Service", + "name": "frontend-service" + } + }, + "status": {} + }, + { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "creationTimestamp": null, + "name": "frontend-service" + }, + "spec": { + "ports": [ + { + "nodePort": 0, + "port": 5432, + "protocol": "TCP", + "targetPort": 5432 + } + ], + "selector": { + "name": "frontend-service" + }, + "sessionAffinity": "None", + "type": "ClusterIP" + }, + "status": { + "loadBalancer": {} + } + }, + { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "creationTimestamp": null, + "name": "redis-master" + }, + "spec": { + "ports": [ + { + "nodePort": 0, + "port": 10000, + "protocol": "TCP", + "targetPort": 10000 + } + ], + "selector": { + "name": "redis-master" + }, + "sessionAffinity": "None", + "type": "ClusterIP" + }, + "status": { + "loadBalancer": {} + } + }, + { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "creationTimestamp": null, + "name": "redis-slave" + }, + "spec": { + "ports": [ + { + "nodePort": 0, + "port": 10001, + "protocol": "TCP", + "targetPort": 10001 + } + ], + "selector": { + "name": "redis-slave" + }, + "sessionAffinity": "None", + "type": "ClusterIP" + }, + "status": { + "loadBalancer": {} + } + }, + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "redis-master" + }, + "name": "redis-master" + }, + "spec": { + "containers": [ + { + "capabilities": {}, + "env": [ + { + "name": "REDIS_PASSWORD", + "value": "P8vxbV4C" + } + ], + "image": "dockerfile/redis", + "imagePullPolicy": "IfNotPresent", + "name": "master", + "ports": [ + { + "containerPort": 6379, + "protocol": "TCP" + } + ], + "resources": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + }, + "terminationMessagePath": "/dev/termination-log" + } + ], + "dnsPolicy": "ClusterFirst", + "restartPolicy": "Always", + "serviceAccount": "" + }, + "status": {} + }, + { + "apiVersion": "v1", + "kind": "ReplicationController", + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "frontend-service" + }, + "name": "guestbook" + }, + "spec": { + "replicas": 3, + "selector": { + "name": "frontend-service" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "frontend-service" + } + }, + "spec": { + "containers": [ + { + "capabilities": {}, + "env": [ + { + "name": "ADMIN_USERNAME", + "value": "adminQ3H" + }, + { + "name": "ADMIN_PASSWORD", + "value": "dwNJiJwW" + }, + { + "name": "REDIS_PASSWORD", + "value": "P8vxbV4C" + } + ], + "image": "brendanburns/php-redis", + "imagePullPolicy": "IfNotPresent", + "name": "php-redis", + "ports": [ + { + "containerPort": 80, + "hostPort": 8000, + "protocol": "TCP" + } + ], + "resources": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + }, + "terminationMessagePath": "/dev/termination-log" + } + ], + "dnsPolicy": "ClusterFirst", + "restartPolicy": "Always", + "serviceAccount": "" + } + } + }, + "status": { + "replicas": 0 + } + }, + { + "apiVersion": "v1", + "kind": "ReplicationController", + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "redis-slave" + }, + "name": "redis-slave" + }, + "spec": { + "replicas": 2, + "selector": { + "name": "redis-slave" + }, + "template": { + "metadata": { + "creationTimestamp": null, + "labels": { + "name": "redis-slave" + } + }, + "spec": { + "containers": [ + { + "capabilities": {}, + "env": [ + { + "name": "REDIS_PASSWORD", + "value": "P8vxbV4C" + } + ], + "image": "brendanburns/redis-slave", + "imagePullPolicy": "IfNotPresent", + "name": "slave", + "ports": [ + { + "containerPort": 6379, + "hostPort": 6380, + "protocol": "TCP" + } + ], + "resources": {}, + "securityContext": { + "capabilities": {}, + "privileged": false + }, + "terminationMessagePath": "/dev/termination-log" + } + ], + "dnsPolicy": "ClusterFirst", + "restartPolicy": "Always", + "serviceAccount": "" + } + } + }, + "status": { + "replicas": 0 + } + } + ], + "parameters": [ + { + "name": "ADMIN_USERNAME", + "description": "Guestbook administrator username", + "value": "adminQ3H", + "generate": "expression", + "from": "admin[A-Z0-9]{3}" + }, + { + "name": "ADMIN_PASSWORD", + "description": "Guestbook administrator password", + "value": "dwNJiJwW", + "generate": "expression", + "from": "[a-zA-Z0-9]{8}" + }, + { + "name": "REDIS_PASSWORD", + "description": "Redis password", + "value": "P8vxbV4C", + "generate": "expression", + "from": "[a-zA-Z0-9]{8}" + }, + { + "name": "SLAVE_SERVICE_NAME", + "description": "Slave Service name", + "value": "redis-slave" + }, + { + "name": "CUSTOM_PARAM1", + "value": "1" + } + ] +} \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/template/templateprocessingclient/dynamic_process.go b/vendor/github.com/openshift/library-go/pkg/template/templateprocessingclient/dynamic_process.go new file mode 100644 index 000000000..03fc79b97 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/template/templateprocessingclient/dynamic_process.go @@ -0,0 +1,59 @@ +package templateprocessingclient + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/dynamic" + + templatev1 "github.com/openshift/api/template/v1" +) + +type DynamicTemplateProcessor interface { + ProcessToList(template *templatev1.Template) (*unstructured.UnstructuredList, error) + ProcessToListFromUnstructured(unstructuredTemplate *unstructured.Unstructured) (*unstructured.UnstructuredList, error) +} + +type dynamicTemplateProcessor struct { + client dynamic.Interface +} + +func NewDynamicTemplateProcessor(client dynamic.Interface) DynamicTemplateProcessor { + return &dynamicTemplateProcessor{client: client} +} + +func (c *dynamicTemplateProcessor) ProcessToList(template *templatev1.Template) (*unstructured.UnstructuredList, error) { + versionedTemplate, err := scheme.ConvertToVersion(template, templatev1.GroupVersion) + if err != nil { + return nil, err + } + unstructuredTemplate, err := runtime.DefaultUnstructuredConverter.ToUnstructured(versionedTemplate) + if err != nil { + return nil, err + } + + return c.ProcessToListFromUnstructured(&unstructured.Unstructured{Object: unstructuredTemplate}) +} + +func (c *dynamicTemplateProcessor) ProcessToListFromUnstructured(unstructuredTemplate *unstructured.Unstructured) (*unstructured.UnstructuredList, error) { + processedTemplate, err := c.client.Resource(templatev1.GroupVersion.WithResource("processedtemplates")). + Namespace("default").Create(unstructuredTemplate, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + + // convert the template into something we iterate over as a list + if err := unstructured.SetNestedField(processedTemplate.Object, processedTemplate.Object["objects"], "items"); err != nil { + return nil, err + } + return processedTemplate.ToList() +} + +var ( + scheme = runtime.NewScheme() +) + +func init() { + utilruntime.Must(templatev1.Install(scheme)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/unidling/unidlingclient/scale.go b/vendor/github.com/openshift/library-go/pkg/unidling/unidlingclient/scale.go new file mode 100644 index 000000000..ec814da72 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/unidling/unidlingclient/scale.go @@ -0,0 +1,193 @@ +package unidlingclient + +import ( + "fmt" + + autoscalingv1 "k8s.io/api/autoscaling/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/scale" + "k8s.io/klog" + + appsv1 "github.com/openshift/api/apps/v1" + unidlingapi "github.com/openshift/api/unidling/v1alpha1" + appsclient "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1" +) + +const legacyGroupName = "" + +// TODO: remove the below functions once we get a way to mark/unmark an object as idled +// via the scale endpoint + +type AnnotationFunc func(currentReplicas int32, annotations map[string]string) + +func NewScaleAnnotater(scales scale.ScalesGetter, mapper meta.RESTMapper, dcs appsclient.DeploymentConfigsGetter, rcs corev1client.ReplicationControllersGetter, changeAnnots AnnotationFunc) *ScaleAnnotater { + return &ScaleAnnotater{ + mapper: mapper, + scales: scales, + dcs: dcs, + rcs: rcs, + ChangeAnnotations: changeAnnots, + } +} + +type ScaleAnnotater struct { + mapper meta.RESTMapper + scales scale.ScalesGetter + dcs appsclient.DeploymentConfigsGetter + rcs corev1client.ReplicationControllersGetter + ChangeAnnotations AnnotationFunc +} + +// ScaleUpdater implements a method "Update" that knows how to update a given object +type ScaleUpdater interface { + Update(*ScaleAnnotater, runtime.Object, *autoscalingv1.Scale) error +} + +// ScaleUpdater implements unidlingutil.ScaleUpdater +type scaleUpdater struct { + encoder runtime.Encoder + namespace string + dcGetter appsclient.DeploymentConfigsGetter + rcGetter corev1client.ReplicationControllersGetter +} + +func NewScaleUpdater(encoder runtime.Encoder, namespace string, dcGetter appsclient.DeploymentConfigsGetter, rcGetter corev1client.ReplicationControllersGetter) ScaleUpdater { + return scaleUpdater{ + encoder: encoder, + namespace: namespace, + dcGetter: dcGetter, + rcGetter: rcGetter, + } +} + +func (s scaleUpdater) Update(annotator *ScaleAnnotater, obj runtime.Object, scale *autoscalingv1.Scale) error { + var ( + err error + patchBytes, originalObj, newObj []byte + ) + + originalObj, err = runtime.Encode(s.encoder, obj) + if err != nil { + return err + } + + switch typedObj := obj.(type) { + case *appsv1.DeploymentConfig: + if typedObj.Annotations == nil { + typedObj.Annotations = make(map[string]string) + } + + annotator.ChangeAnnotations(typedObj.Spec.Replicas, typedObj.Annotations) + typedObj.Spec.Replicas = scale.Spec.Replicas + + newObj, err = runtime.Encode(s.encoder, typedObj) + if err != nil { + return err + } + + patchBytes, err = strategicpatch.CreateTwoWayMergePatch(originalObj, newObj, &appsv1.DeploymentConfig{}) + if err != nil { + return err + } + + _, err = s.dcGetter.DeploymentConfigs(s.namespace).Patch(typedObj.Name, types.StrategicMergePatchType, patchBytes) + case *corev1.ReplicationController: + if typedObj.Annotations == nil { + typedObj.Annotations = make(map[string]string) + } + + annotator.ChangeAnnotations(*typedObj.Spec.Replicas, typedObj.Annotations) + typedObj.Spec.Replicas = &scale.Spec.Replicas + + newObj, err = runtime.Encode(s.encoder, typedObj) + if err != nil { + return err + } + + patchBytes, err = strategicpatch.CreateTwoWayMergePatch(originalObj, newObj, &corev1.ReplicationController{}) + if err != nil { + return err + } + + _, err = s.rcGetter.ReplicationControllers(s.namespace).Patch(typedObj.Name, types.StrategicMergePatchType, patchBytes) + } + return err +} + +// getObjectWithScale either fetches a known type of object and constructs a Scale from that, or uses the scale +// subresource to fetch a Scale by itself. +func (c *ScaleAnnotater) GetObjectWithScale(namespace string, ref unidlingapi.CrossGroupObjectReference) (runtime.Object, *autoscalingv1.Scale, error) { + var obj runtime.Object + var err error + var scale *autoscalingv1.Scale + + switch { + case ref.Kind == "DeploymentConfig" && (ref.Group == appsv1.GroupName || ref.Group == legacyGroupName): + var dc *appsv1.DeploymentConfig + dc, err = c.dcs.DeploymentConfigs(namespace).Get(ref.Name, metav1.GetOptions{}) + + if err != nil { + return nil, nil, err + } + obj = dc + case ref.Kind == "ReplicationController" && ref.Group == corev1.GroupName: + var rc *corev1.ReplicationController + rc, err = c.rcs.ReplicationControllers(namespace).Get(ref.Name, metav1.GetOptions{}) + if err != nil { + return nil, nil, err + } + obj = rc + } + + mappings, err := c.mapper.RESTMappings(schema.GroupKind{Group: ref.Group, Kind: ref.Kind}) + if err != nil { + return nil, nil, err + } + for _, mapping := range mappings { + scale, err = c.scales.Scales(namespace).Get(mapping.Resource.GroupResource(), ref.Name) + if err != nil { + return nil, nil, err + } + } + + return obj, scale, err +} + +// updateObjectScale updates the scale of an object and removes unidling annotations for objects of a know type. +// For objects of an unknown type, it scales the object using the scale subresource +// (and does not change annotations). +func (c *ScaleAnnotater) UpdateObjectScale(updater ScaleUpdater, namespace string, ref unidlingapi.CrossGroupObjectReference, obj runtime.Object, scale *autoscalingv1.Scale) error { + var err error + + mappings, err := c.mapper.RESTMappings(schema.GroupKind{Group: ref.Group, Kind: ref.Kind}) + if err != nil { + return err + } + if len(mappings) == 0 { + return fmt.Errorf("cannot locate resource for %s.%s/%s", ref.Kind, ref.Group, ref.Name) + } + + for _, mapping := range mappings { + if obj == nil { + _, err = c.scales.Scales(namespace).Update(mapping.Resource.GroupResource(), scale) + return err + } + + switch obj.(type) { + case *appsv1.DeploymentConfig, *corev1.ReplicationController: + return updater.Update(c, obj, scale) + default: + klog.V(2).Infof("Unidling unknown type %t: using scale interface and not removing annotations", obj) + _, err = c.scales.Scales(namespace).Update(mapping.Resource.GroupResource(), scale) + } + } + + return err +} diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md index d86768a2c..9fdbacd3c 100644 --- a/vendor/google.golang.org/appengine/README.md +++ b/vendor/google.golang.org/appengine/README.md @@ -71,3 +71,30 @@ A few APIs were cleaned up, and there are some differences: [blobstore package](https://google.golang.org/appengine/blobstore). * `appengine/socket` is not required on App Engine flexible environment / Managed VMs. Use the standard `net` package instead. + +## Key Encode/Decode compatibiltiy to help with datastore library migrations + +Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore. +The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type. + +### Enabling key conversion + +Enable key conversion by calling `EnableKeyConversion(ctx)` in the `/_ah/start` handler for basic and manual scaling or any handler in automatic scaling. + +#### 1. Basic or manual scaling + +This start handler will enable key conversion for all handlers in the service. + +``` +http.HandleFunc("/_ah/start", func(w http.ResponseWriter, r *http.Request) { + datastore.EnableKeyConversion(appengine.NewContext(r)) +}) +``` + +#### 2. Automatic scaling + +`/_ah/start` is not supported for automatic scaling and `/_ah/warmup` is not guaranteed to run, so you must call `datastore.EnableKeyConversion(appengine.NewContext(r))` +before you use code that needs key conversion. + +You may want to add this to each of your handlers, or introduce middleware where it's called. +`EnableKeyConversion` is safe for concurrent use. Any call to it after the first is ignored. \ No newline at end of file diff --git a/vendor/google.golang.org/appengine/datastore/internal/cloudkey/cloudkey.go b/vendor/google.golang.org/appengine/datastore/internal/cloudkey/cloudkey.go new file mode 100644 index 000000000..643d4049c --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/internal/cloudkey/cloudkey.go @@ -0,0 +1,120 @@ +// Copyright 2019 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package cloudpb is a subset of types and functions, copied from cloud.google.com/go/datastore. +// +// They are copied here to provide compatibility to decode keys generated by the cloud.google.com/go/datastore package. +package cloudkey + +import ( + "encoding/base64" + "errors" + "strings" + + "github.com/golang/protobuf/proto" + cloudpb "google.golang.org/appengine/datastore/internal/cloudpb" +) + +///////////////////////////////////////////////////////////////////// +// Code below is copied from https://github.com/googleapis/google-cloud-go/blob/master/datastore/datastore.go +///////////////////////////////////////////////////////////////////// + +var ( + // ErrInvalidKey is returned when an invalid key is presented. + ErrInvalidKey = errors.New("datastore: invalid key") +) + +///////////////////////////////////////////////////////////////////// +// Code below is copied from https://github.com/googleapis/google-cloud-go/blob/master/datastore/key.go +///////////////////////////////////////////////////////////////////// + +// Key represents the datastore key for a stored entity. +type Key struct { + // Kind cannot be empty. + Kind string + // Either ID or Name must be zero for the Key to be valid. + // If both are zero, the Key is incomplete. + ID int64 + Name string + // Parent must either be a complete Key or nil. + Parent *Key + + // Namespace provides the ability to partition your data for multiple + // tenants. In most cases, it is not necessary to specify a namespace. + // See docs on datastore multitenancy for details: + // https://cloud.google.com/datastore/docs/concepts/multitenancy + Namespace string +} + +// DecodeKey decodes a key from the opaque representation returned by Encode. +func DecodeKey(encoded string) (*Key, error) { + // Re-add padding. + if m := len(encoded) % 4; m != 0 { + encoded += strings.Repeat("=", 4-m) + } + + b, err := base64.URLEncoding.DecodeString(encoded) + if err != nil { + return nil, err + } + + pKey := new(cloudpb.Key) + if err := proto.Unmarshal(b, pKey); err != nil { + return nil, err + } + return protoToKey(pKey) +} + +// valid returns whether the key is valid. +func (k *Key) valid() bool { + if k == nil { + return false + } + for ; k != nil; k = k.Parent { + if k.Kind == "" { + return false + } + if k.Name != "" && k.ID != 0 { + return false + } + if k.Parent != nil { + if k.Parent.Incomplete() { + return false + } + if k.Parent.Namespace != k.Namespace { + return false + } + } + } + return true +} + +// Incomplete reports whether the key does not refer to a stored entity. +func (k *Key) Incomplete() bool { + return k.Name == "" && k.ID == 0 +} + +// protoToKey decodes a protocol buffer representation of a key into an +// equivalent *Key object. If the key is invalid, protoToKey will return the +// invalid key along with ErrInvalidKey. +func protoToKey(p *cloudpb.Key) (*Key, error) { + var key *Key + var namespace string + if partition := p.PartitionId; partition != nil { + namespace = partition.NamespaceId + } + for _, el := range p.Path { + key = &Key{ + Namespace: namespace, + Kind: el.Kind, + ID: el.GetId(), + Name: el.GetName(), + Parent: key, + } + } + if !key.valid() { // Also detects key == nil. + return key, ErrInvalidKey + } + return key, nil +} diff --git a/vendor/google.golang.org/appengine/datastore/internal/cloudpb/entity.pb.go b/vendor/google.golang.org/appengine/datastore/internal/cloudpb/entity.pb.go new file mode 100644 index 000000000..af8195f3f --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/internal/cloudpb/entity.pb.go @@ -0,0 +1,344 @@ +// Copyright 2019 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package cloudpb is a subset of protobufs, copied from google.golang.org/genproto/googleapis/datastore/v1. +// +// They are copied here to provide compatibility to decode keys generated by the cloud.google.com/go/datastore package. +package cloudpb + +import ( + "fmt" + + "github.com/golang/protobuf/proto" +) + +// A partition ID identifies a grouping of entities. The grouping is always +// by project and namespace, however the namespace ID may be empty. +// +// A partition ID contains several dimensions: +// project ID and namespace ID. +// +// Partition dimensions: +// +// - May be `""`. +// - Must be valid UTF-8 bytes. +// - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` +// If the value of any dimension matches regex `__.*__`, the partition is +// reserved/read-only. +// A reserved/read-only partition ID is forbidden in certain documented +// contexts. +// +// Foreign partition IDs (in which the project ID does +// not match the context project ID ) are discouraged. +// Reads and writes of foreign partition IDs may fail if the project is not in +// an active state. +type PartitionId struct { + // The ID of the project to which the entities belong. + ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + // If not empty, the ID of the namespace to which the entities belong. + NamespaceId string `protobuf:"bytes,4,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PartitionId) Reset() { *m = PartitionId{} } +func (m *PartitionId) String() string { return proto.CompactTextString(m) } +func (*PartitionId) ProtoMessage() {} +func (*PartitionId) Descriptor() ([]byte, []int) { + return fileDescriptor_entity_096a297364b049a5, []int{0} +} +func (m *PartitionId) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PartitionId.Unmarshal(m, b) +} +func (m *PartitionId) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PartitionId.Marshal(b, m, deterministic) +} +func (dst *PartitionId) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartitionId.Merge(dst, src) +} +func (m *PartitionId) XXX_Size() int { + return xxx_messageInfo_PartitionId.Size(m) +} +func (m *PartitionId) XXX_DiscardUnknown() { + xxx_messageInfo_PartitionId.DiscardUnknown(m) +} + +var xxx_messageInfo_PartitionId proto.InternalMessageInfo + +func (m *PartitionId) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *PartitionId) GetNamespaceId() string { + if m != nil { + return m.NamespaceId + } + return "" +} + +// A unique identifier for an entity. +// If a key's partition ID or any of its path kinds or names are +// reserved/read-only, the key is reserved/read-only. +// A reserved/read-only key is forbidden in certain documented contexts. +type Key struct { + // Entities are partitioned into subsets, currently identified by a project + // ID and namespace ID. + // Queries are scoped to a single partition. + PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id,json=partitionId,proto3" json:"partition_id,omitempty"` + // The entity path. + // An entity path consists of one or more elements composed of a kind and a + // string or numerical identifier, which identify entities. The first + // element identifies a _root entity_, the second element identifies + // a _child_ of the root entity, the third element identifies a child of the + // second entity, and so forth. The entities identified by all prefixes of + // the path are called the element's _ancestors_. + // + // An entity path is always fully complete: *all* of the entity's ancestors + // are required to be in the path along with the entity identifier itself. + // The only exception is that in some documented cases, the identifier in the + // last path element (for the entity) itself may be omitted. For example, + // the last path element of the key of `Mutation.insert` may have no + // identifier. + // + // A path can never be empty, and a path can have at most 100 elements. + Path []*Key_PathElement `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Key) Reset() { *m = Key{} } +func (m *Key) String() string { return proto.CompactTextString(m) } +func (*Key) ProtoMessage() {} +func (*Key) Descriptor() ([]byte, []int) { + return fileDescriptor_entity_096a297364b049a5, []int{1} +} +func (m *Key) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Key.Unmarshal(m, b) +} +func (m *Key) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Key.Marshal(b, m, deterministic) +} +func (dst *Key) XXX_Merge(src proto.Message) { + xxx_messageInfo_Key.Merge(dst, src) +} +func (m *Key) XXX_Size() int { + return xxx_messageInfo_Key.Size(m) +} +func (m *Key) XXX_DiscardUnknown() { + xxx_messageInfo_Key.DiscardUnknown(m) +} + +// A (kind, ID/name) pair used to construct a key path. +// +// If either name or ID is set, the element is complete. +// If neither is set, the element is incomplete. +type Key_PathElement struct { + // The kind of the entity. + // A kind matching regex `__.*__` is reserved/read-only. + // A kind must not contain more than 1500 bytes when UTF-8 encoded. + // Cannot be `""`. + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // The type of ID. + // + // Types that are valid to be assigned to IdType: + // *Key_PathElement_Id + // *Key_PathElement_Name + IdType isKey_PathElement_IdType `protobuf_oneof:"id_type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } +func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } +func (*Key_PathElement) ProtoMessage() {} +func (*Key_PathElement) Descriptor() ([]byte, []int) { + return fileDescriptor_entity_096a297364b049a5, []int{1, 0} +} +func (m *Key_PathElement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Key_PathElement.Unmarshal(m, b) +} +func (m *Key_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Key_PathElement.Marshal(b, m, deterministic) +} +func (dst *Key_PathElement) XXX_Merge(src proto.Message) { + xxx_messageInfo_Key_PathElement.Merge(dst, src) +} +func (m *Key_PathElement) XXX_Size() int { + return xxx_messageInfo_Key_PathElement.Size(m) +} +func (m *Key_PathElement) XXX_DiscardUnknown() { + xxx_messageInfo_Key_PathElement.DiscardUnknown(m) +} + +var xxx_messageInfo_Key_PathElement proto.InternalMessageInfo + +func (m *Key_PathElement) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +type isKey_PathElement_IdType interface { + isKey_PathElement_IdType() +} + +type Key_PathElement_Id struct { + Id int64 `protobuf:"varint,2,opt,name=id,proto3,oneof"` +} + +type Key_PathElement_Name struct { + Name string `protobuf:"bytes,3,opt,name=name,proto3,oneof"` +} + +func (*Key_PathElement_Id) isKey_PathElement_IdType() {} + +func (*Key_PathElement_Name) isKey_PathElement_IdType() {} + +func (m *Key_PathElement) GetIdType() isKey_PathElement_IdType { + if m != nil { + return m.IdType + } + return nil +} + +func (m *Key_PathElement) GetId() int64 { + if x, ok := m.GetIdType().(*Key_PathElement_Id); ok { + return x.Id + } + return 0 +} + +func (m *Key_PathElement) GetName() string { + if x, ok := m.GetIdType().(*Key_PathElement_Name); ok { + return x.Name + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Key_PathElement) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Key_PathElement_OneofMarshaler, _Key_PathElement_OneofUnmarshaler, _Key_PathElement_OneofSizer, []interface{}{ + (*Key_PathElement_Id)(nil), + (*Key_PathElement_Name)(nil), + } +} + +func _Key_PathElement_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Key_PathElement) + // id_type + switch x := m.IdType.(type) { + case *Key_PathElement_Id: + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Id)) + case *Key_PathElement_Name: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Name) + case nil: + default: + return fmt.Errorf("Key_PathElement.IdType has unexpected type %T", x) + } + return nil +} + +func _Key_PathElement_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Key_PathElement) + switch tag { + case 2: // id_type.id + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.IdType = &Key_PathElement_Id{int64(x)} + return true, err + case 3: // id_type.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.IdType = &Key_PathElement_Name{x} + return true, err + default: + return false, nil + } +} + +func _Key_PathElement_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Key_PathElement) + // id_type + switch x := m.IdType.(type) { + case *Key_PathElement_Id: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.Id)) + case *Key_PathElement_Name: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +var fileDescriptor_entity_096a297364b049a5 = []byte{ + // 780 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x94, 0xff, 0x6e, 0xdc, 0x44, + 0x10, 0xc7, 0xed, 0xbb, 0x5c, 0x1a, 0x8f, 0xdd, 0xa4, 0x6c, 0x2a, 0x61, 0x02, 0x28, 0x26, 0x80, + 0x74, 0x02, 0xc9, 0x6e, 0xc2, 0x1f, 0x54, 0x14, 0xa4, 0x72, 0x25, 0xe0, 0x28, 0x15, 0x9c, 0x56, + 0x55, 0x24, 0x50, 0xa4, 0xd3, 0xde, 0x79, 0xeb, 0x2e, 0x67, 0xef, 0x5a, 0xf6, 0x3a, 0xaa, 0xdf, + 0x05, 0xf1, 0x00, 0x3c, 0x0a, 0x8f, 0x80, 0x78, 0x18, 0xb4, 0x3f, 0xec, 0x0b, 0xed, 0x35, 0xff, + 0x79, 0x67, 0x3e, 0xdf, 0xd9, 0xef, 0xec, 0xce, 0x1a, 0xa2, 0x5c, 0x88, 0xbc, 0xa0, 0x49, 0x46, + 0x24, 0x69, 0xa4, 0xa8, 0x69, 0x72, 0x73, 0x9a, 0x50, 0x2e, 0x99, 0xec, 0xe2, 0xaa, 0x16, 0x52, + 0xa0, 0x43, 0x43, 0xc4, 0x03, 0x11, 0xdf, 0x9c, 0x1e, 0x7d, 0x64, 0x65, 0xa4, 0x62, 0x09, 0xe1, + 0x5c, 0x48, 0x22, 0x99, 0xe0, 0x8d, 0x91, 0x0c, 0x59, 0xbd, 0x5a, 0xb6, 0x2f, 0x93, 0x46, 0xd6, + 0xed, 0x4a, 0xda, 0xec, 0xf1, 0x9b, 0x59, 0xc9, 0x4a, 0xda, 0x48, 0x52, 0x56, 0x16, 0x08, 0x2d, + 0x20, 0xbb, 0x8a, 0x26, 0x05, 0x91, 0x05, 0xcf, 0x4d, 0xe6, 0xe4, 0x17, 0xf0, 0xe7, 0xa4, 0x96, + 0x4c, 0x6d, 0x76, 0x91, 0xa1, 0x8f, 0x01, 0xaa, 0x5a, 0xfc, 0x4e, 0x57, 0x72, 0xc1, 0xb2, 0x70, + 0x14, 0xb9, 0x53, 0x0f, 0x7b, 0x36, 0x72, 0x91, 0xa1, 0x4f, 0x20, 0xe0, 0xa4, 0xa4, 0x4d, 0x45, + 0x56, 0x54, 0x01, 0x3b, 0x1a, 0xf0, 0x87, 0xd8, 0x45, 0x76, 0xf2, 0x8f, 0x0b, 0xe3, 0x4b, 0xda, + 0xa1, 0x67, 0x10, 0x54, 0x7d, 0x61, 0x85, 0xba, 0x91, 0x3b, 0xf5, 0xcf, 0xa2, 0x78, 0x4b, 0xef, + 0xf1, 0x2d, 0x07, 0xd8, 0xaf, 0x6e, 0xd9, 0x79, 0x0c, 0x3b, 0x15, 0x91, 0xaf, 0xc2, 0x51, 0x34, + 0x9e, 0xfa, 0x67, 0x9f, 0x6d, 0x15, 0x5f, 0xd2, 0x2e, 0x9e, 0x13, 0xf9, 0xea, 0xbc, 0xa0, 0x25, + 0xe5, 0x12, 0x6b, 0xc5, 0xd1, 0x0b, 0xd5, 0xd7, 0x10, 0x44, 0x08, 0x76, 0xd6, 0x8c, 0x1b, 0x17, + 0x1e, 0xd6, 0xdf, 0xe8, 0x01, 0x8c, 0x6c, 0x8f, 0xe3, 0xd4, 0xc1, 0x23, 0x96, 0xa1, 0x87, 0xb0, + 0xa3, 0x5a, 0x09, 0xc7, 0x8a, 0x4a, 0x1d, 0xac, 0x57, 0x33, 0x0f, 0xee, 0xb1, 0x6c, 0xa1, 0x8e, + 0xee, 0xe4, 0x29, 0xc0, 0xf7, 0x75, 0x4d, 0xba, 0x2b, 0x52, 0xb4, 0x14, 0x9d, 0xc1, 0xee, 0x8d, + 0xfa, 0x68, 0x42, 0x57, 0xfb, 0x3b, 0xda, 0xea, 0x4f, 0xb3, 0xd8, 0x92, 0x27, 0x7f, 0x4c, 0x60, + 0x62, 0xd4, 0x4f, 0x00, 0x78, 0x5b, 0x14, 0x0b, 0x9d, 0x08, 0xfd, 0xc8, 0x9d, 0xee, 0x6f, 0x2a, + 0xf4, 0x37, 0x19, 0xff, 0xdc, 0x16, 0x85, 0xe6, 0x53, 0x07, 0x7b, 0xbc, 0x5f, 0xa0, 0xcf, 0xe1, + 0xfe, 0x52, 0x88, 0x82, 0x12, 0x6e, 0xf5, 0xaa, 0xb1, 0xbd, 0xd4, 0xc1, 0x81, 0x0d, 0x0f, 0x18, + 0xe3, 0x92, 0xe6, 0xb4, 0xb6, 0x58, 0xdf, 0x6d, 0x60, 0xc3, 0x06, 0xfb, 0x14, 0x82, 0x4c, 0xb4, + 0xcb, 0x82, 0x5a, 0x4a, 0xf5, 0xef, 0xa6, 0x0e, 0xf6, 0x4d, 0xd4, 0x40, 0xe7, 0x70, 0x30, 0x8c, + 0x95, 0xe5, 0x40, 0xdf, 0xe9, 0xdb, 0xa6, 0x5f, 0xf4, 0x5c, 0xea, 0xe0, 0xfd, 0x41, 0x64, 0xca, + 0x7c, 0x0d, 0xde, 0x9a, 0x76, 0xb6, 0xc0, 0x44, 0x17, 0x08, 0xdf, 0x75, 0xaf, 0xa9, 0x83, 0xf7, + 0xd6, 0xb4, 0x1b, 0x4c, 0x36, 0xb2, 0x66, 0x3c, 0xb7, 0xda, 0xf7, 0xec, 0x25, 0xf9, 0x26, 0x6a, + 0xa0, 0x63, 0x80, 0x65, 0x21, 0x96, 0x16, 0x41, 0x91, 0x3b, 0x0d, 0xd4, 0xc1, 0xa9, 0x98, 0x01, + 0xbe, 0x83, 0x83, 0x9c, 0x8a, 0x45, 0x25, 0x18, 0x97, 0x96, 0xda, 0xd3, 0x26, 0x0e, 0x7b, 0x13, + 0xea, 0xa2, 0xe3, 0xe7, 0x44, 0x3e, 0xe7, 0x79, 0xea, 0xe0, 0xfb, 0x39, 0x15, 0x73, 0x05, 0x1b, + 0xf9, 0x53, 0x08, 0xcc, 0x53, 0xb6, 0xda, 0x5d, 0xad, 0xfd, 0x70, 0x6b, 0x03, 0xe7, 0x1a, 0x54, + 0x0e, 0x8d, 0xc4, 0x54, 0x98, 0x81, 0x4f, 0xd4, 0x08, 0xd9, 0x02, 0x9e, 0x2e, 0x70, 0xbc, 0xb5, + 0xc0, 0x66, 0xd4, 0x52, 0x07, 0x03, 0xd9, 0x0c, 0x5e, 0x08, 0xf7, 0x4a, 0x4a, 0x38, 0xe3, 0x79, + 0xb8, 0x1f, 0xb9, 0xd3, 0x09, 0xee, 0x97, 0xe8, 0x11, 0x3c, 0xa4, 0xaf, 0x57, 0x45, 0x9b, 0xd1, + 0xc5, 0xcb, 0x5a, 0x94, 0x0b, 0xc6, 0x33, 0xfa, 0x9a, 0x36, 0xe1, 0xa1, 0x1a, 0x0f, 0x8c, 0x6c, + 0xee, 0xc7, 0x5a, 0x94, 0x17, 0x26, 0x33, 0x0b, 0x00, 0xb4, 0x13, 0x33, 0xe0, 0xff, 0xba, 0xb0, + 0x6b, 0x7c, 0xa3, 0x2f, 0x60, 0xbc, 0xa6, 0x9d, 0x7d, 0xb7, 0xef, 0xbc, 0x22, 0xac, 0x20, 0x74, + 0xa9, 0x7f, 0x1b, 0x15, 0xad, 0x25, 0xa3, 0x4d, 0x38, 0xd6, 0xaf, 0xe1, 0xcb, 0x3b, 0x0e, 0x25, + 0x9e, 0x0f, 0xf4, 0x39, 0x97, 0x75, 0x87, 0x6f, 0xc9, 0x8f, 0x7e, 0x85, 0x83, 0x37, 0xd2, 0xe8, + 0xc1, 0xc6, 0x8b, 0x67, 0x76, 0x7c, 0x04, 0x93, 0xcd, 0x44, 0xdf, 0xfd, 0xf4, 0x0c, 0xf8, 0xcd, + 0xe8, 0xb1, 0x3b, 0xfb, 0xd3, 0x85, 0xf7, 0x57, 0xa2, 0xdc, 0x06, 0xcf, 0x7c, 0x63, 0x6d, 0xae, + 0x86, 0x78, 0xee, 0xfe, 0xf6, 0xad, 0x65, 0x72, 0x51, 0x10, 0x9e, 0xc7, 0xa2, 0xce, 0x93, 0x9c, + 0x72, 0x3d, 0xe2, 0x89, 0x49, 0x91, 0x8a, 0x35, 0xff, 0xfb, 0xcb, 0x3f, 0x19, 0x16, 0x7f, 0x8d, + 0x3e, 0xf8, 0xc9, 0xc8, 0x9f, 0x15, 0xa2, 0xcd, 0xe2, 0x1f, 0x86, 0x8d, 0xae, 0x4e, 0xff, 0xee, + 0x73, 0xd7, 0x3a, 0x77, 0x3d, 0xe4, 0xae, 0xaf, 0x4e, 0x97, 0xbb, 0x7a, 0x83, 0xaf, 0xfe, 0x0b, + 0x00, 0x00, 0xff, 0xff, 0xf3, 0xdd, 0x11, 0x96, 0x45, 0x06, 0x00, 0x00, +} + +var xxx_messageInfo_Key proto.InternalMessageInfo diff --git a/vendor/google.golang.org/appengine/datastore/key.go b/vendor/google.golang.org/appengine/datastore/key.go index 6ab83eaf6..fd598dc96 100644 --- a/vendor/google.golang.org/appengine/datastore/key.go +++ b/vendor/google.golang.org/appengine/datastore/key.go @@ -254,6 +254,10 @@ func DecodeKey(encoded string) (*Key, error) { ref := new(pb.Reference) if err := proto.Unmarshal(b, ref); err != nil { + // Couldn't decode it as an App Engine key, try decoding it as a key encoded by cloud.google.com/go/datastore. + if k := decodeCloudKey(encoded); k != nil { + return k, nil + } return nil, err } diff --git a/vendor/google.golang.org/appengine/datastore/keycompat.go b/vendor/google.golang.org/appengine/datastore/keycompat.go new file mode 100644 index 000000000..371a64eee --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/keycompat.go @@ -0,0 +1,89 @@ +// Copyright 2019 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "sync" + + "golang.org/x/net/context" + + "google.golang.org/appengine/datastore/internal/cloudkey" + "google.golang.org/appengine/internal" +) + +var keyConversion struct { + mu sync.RWMutex + appID string // read using getKeyConversionAppID +} + +// EnableKeyConversion enables encoded key compatibility with the Cloud +// Datastore client library (cloud.google.com/go/datastore). Encoded keys +// generated by the Cloud Datastore client library will be decoded into App +// Engine datastore keys. +// +// The context provided must be an App Engine context if running in App Engine +// first generation runtime. This can be called in the /_ah/start handler. It is +// safe to call multiple times, and is cheap to call, so can also be inserted as +// middleware. +// +// Enabling key compatibility does not affect the encoding format used by +// Key.Encode, it only expands the type of keys that are able to be decoded with +// DecodeKey. +func EnableKeyConversion(ctx context.Context) { + // Only attempt to set appID if it's unset. + // If already set, ignore. + if getKeyConversionAppID() != "" { + return + } + + keyConversion.mu.Lock() + // Check again to avoid race where another goroutine set appID between the call + // to getKeyConversionAppID above and taking the write lock. + if keyConversion.appID == "" { + keyConversion.appID = internal.FullyQualifiedAppID(ctx) + } + keyConversion.mu.Unlock() +} + +func getKeyConversionAppID() string { + keyConversion.mu.RLock() + appID := keyConversion.appID + keyConversion.mu.RUnlock() + return appID +} + +// decodeCloudKey attempts to decode the given encoded key generated by the +// Cloud Datastore client library (cloud.google.com/go/datastore), returning nil +// if the key couldn't be decoded. +func decodeCloudKey(encoded string) *Key { + appID := getKeyConversionAppID() + if appID == "" { + return nil + } + + k, err := cloudkey.DecodeKey(encoded) + if err != nil { + return nil + } + return convertCloudKey(k, appID) +} + +// convertCloudKey converts a Cloud Datastore key and converts it to an App +// Engine Datastore key. Cloud Datastore keys don't include the project/app ID, +// so we must add it back in. +func convertCloudKey(key *cloudkey.Key, appID string) *Key { + if key == nil { + return nil + } + k := &Key{ + intID: key.ID, + kind: key.Kind, + namespace: key.Namespace, + parent: convertCloudKey(key.Parent, appID), + stringID: key.Name, + appID: appID, + } + return k +} diff --git a/vendor/google.golang.org/appengine/datastore/keycompat_test.go b/vendor/google.golang.org/appengine/datastore/keycompat_test.go new file mode 100644 index 000000000..923fdac9a --- /dev/null +++ b/vendor/google.golang.org/appengine/datastore/keycompat_test.go @@ -0,0 +1,89 @@ +// Copyright 2019 Google Inc. All Rights Reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package datastore + +import ( + "reflect" + "testing" +) + +func TestKeyConversion(t *testing.T) { + var tests = []struct { + desc string + key *Key + encodedKey string + }{ + { + desc: "A control test for legacy to legacy key conversion int as the key", + key: &Key{ + kind: "Person", + intID: 1, + appID: "glibrary", + }, + encodedKey: "aghnbGlicmFyeXIMCxIGUGVyc29uGAEM", + }, + { + desc: "A control test for legacy to legacy key conversion string as the key", + key: &Key{ + kind: "Graph", + stringID: "graph:7-day-active", + appID: "glibrary", + }, + encodedKey: "aghnbGlicmFyeXIdCxIFR3JhcGgiEmdyYXBoOjctZGF5LWFjdGl2ZQw", + }, + + // These are keys encoded with cloud.google.com/go/datastore + // Standard int as the key + { + desc: "Convert new key format to old key with int id", + key: &Key{ + kind: "WordIndex", + intID: 1033, + appID: "glibrary", + }, + encodedKey: "Eg4KCVdvcmRJbmRleBCJCA", + }, + // These are keys encoded with cloud.google.com/go/datastore + // Standard string + { + desc: "Convert new key format to old key with string id", + key: &Key{ + kind: "WordIndex", + stringID: "IAmAnID", + appID: "glibrary", + }, + encodedKey: "EhQKCVdvcmRJbmRleBoHSUFtQW5JRA", + }, + + // These are keys encoded with cloud.google.com/go/datastore + // ID String with parent as string + { + desc: "Convert new key format to old key with string id with a parent", + key: &Key{ + kind: "WordIndex", + stringID: "IAmAnID", + appID: "glibrary", + parent: &Key{ + kind: "LetterIndex", + stringID: "IAmAnotherID", + appID: "glibrary", + }, + }, + encodedKey: "EhsKC0xldHRlckluZGV4GgxJQW1Bbm90aGVySUQSFAoJV29yZEluZGV4GgdJQW1BbklE", + }, + } + + // Simulate the key converter enablement + keyConversion.appID = "glibrary" + for _, tc := range tests { + dk, err := DecodeKey(tc.encodedKey) + if err != nil { + t.Fatalf("DecodeKey: %v", err) + } + if !reflect.DeepEqual(dk, tc.key) { + t.Errorf("%s: got %+v, want %+v", tc.desc, dk, tc.key) + } + } +} diff --git a/vendor/google.golang.org/appengine/file/file.go b/vendor/google.golang.org/appengine/file/file.go index c3cd58baf..e63a4aca9 100644 --- a/vendor/google.golang.org/appengine/file/file.go +++ b/vendor/google.golang.org/appengine/file/file.go @@ -22,7 +22,7 @@ func DefaultBucketName(c context.Context) (string, error) { err := internal.Call(c, "app_identity_service", "GetDefaultGcsBucketName", req, res) if err != nil { - return "", fmt.Errorf("file: no default bucket name returned in RPC response: %v", res) + return "", fmt.Errorf("file: no default bucket name returned in RPC response: %v", err) } return res.GetDefaultGcsBucketName(), nil } diff --git a/vendor/google.golang.org/appengine/go.mod b/vendor/google.golang.org/appengine/go.mod index f449359d2..451592798 100644 --- a/vendor/google.golang.org/appengine/go.mod +++ b/vendor/google.golang.org/appengine/go.mod @@ -1,7 +1,10 @@ module google.golang.org/appengine require ( - github.com/golang/protobuf v1.2.0 - golang.org/x/net v0.0.0-20180724234803-3673e40ba225 - golang.org/x/text v0.3.0 + github.com/golang/protobuf v1.3.1 + golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 // indirect + golang.org/x/net v0.0.0-20190603091049-60506f45cf65 + golang.org/x/sys v0.0.0-20190606165138-5da285871e9c // indirect + golang.org/x/text v0.3.2 + golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b // indirect ) diff --git a/vendor/google.golang.org/appengine/go.sum b/vendor/google.golang.org/appengine/go.sum index 1a221c089..cb3232556 100644 --- a/vendor/google.golang.org/appengine/go.sum +++ b/vendor/google.golang.org/appengine/go.sum @@ -1,6 +1,22 @@ github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225 h1:kNX+jCowfMYzvlSvJu5pQWEmyWFrBXJ3PBy10xKMXK8= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=