diff --git a/Gopkg.lock b/Gopkg.lock index 01214d040e..8cfae0ec18 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -477,15 +477,16 @@ version = "v1.4.2" [[projects]] - digest = "1:bec734dc3474457e6cfa4b2723db944c81d2b354ca3a55200ee551d7ad5d1eea" + branch = "release-4.1" + digest = "1:56398f7981973b24d73965fa1255c8a728d6a44a578e84e374e899217b83cc20" name = "github.com/openshift/api" packages = ["config/v1"] pruneopts = "T" - revision = "8e476cb7322e59919cbb6482fd076ec5a214df25" + revision = "9ea19f9dd57858bb3fd8ec7051a99ca5e1ae88d6" [[projects]] - branch = "openshift-4.0-cluster-api-0.0.0-alpha.4" - digest = "1:4700944f85b629a06b45a541430c0e6d3e7fe8bf1d757511448e27c7b0e5f701" + branch = "openshift-4.1-cluster-api-0.0.0-alpha.4" + digest = "1:73619f9a971bb6b4c577902ea24c5c708807154170e45aaeec15f0f9404df21f" name = "github.com/openshift/cluster-api" packages = [ "pkg/apis", @@ -501,15 +502,16 @@ "pkg/controller/machine", "pkg/controller/node", "pkg/controller/noderefutil", + "pkg/drain", "pkg/errors", "pkg/util", ] pruneopts = "T" - revision = "0b649f443f830353db113f92aa86db2d4d34943a" + revision = "53b696be18ad63f7a21f603a4311044a9697807c" [[projects]] - branch = "master" - digest = "1:8ac51ce0543106c1f7ae943463db52e48e816420351886a14fcf799b5fd14be9" + branch = "release-4.1" + digest = "1:4e34274122a8409db992f8048735b5ade4a0310874d8a10f5fa5958e5d75a05f" name = "github.com/openshift/cluster-api-actuator-pkg" packages = [ "pkg/e2e/autoscaler", @@ -520,11 +522,11 @@ "pkg/types", ] pruneopts = "" - revision = "dc2d4c7f4e839792f76b43e4844a5f87a09300b2" + revision = "4c3a3f8d76e964f8ab60f7b639fb833271cc087b" [[projects]] - branch = "master" - digest = "1:4e2b5e8a9ae67c0c8c25c6a93bbf5b710baac24b20e6843a81ee75f18ac57a85" + branch = "release-4.1" + digest = "1:3f5e1154f354133b5ed16a3c84f2cd629954c5e42b281bc9085bf395954c1c65" name = "github.com/openshift/cluster-autoscaler-operator" packages = [ "pkg/apis", @@ -532,15 +534,7 @@ "pkg/apis/autoscaling/v1beta1", ] pruneopts = "T" - revision = "17350a84b40565851a3f1bd21c6f8a6763b8eacb" - -[[projects]] - branch = "master" - digest = "1:b79de44700bc7fc32b4f898e283754fa027616f87074be1cf65a7be5a961cc85" - name = "github.com/openshift/kubernetes-drain" - packages = ["."] - pruneopts = "T" - revision = "4b061affbd00bfc62036a5cd3a57493db6c94151" + revision = "dfdc9d81b23009362ba8b280df8e039466bf6f9f" [[projects]] branch = "master" @@ -1020,6 +1014,52 @@ packages = [ "discovery", "dynamic", + "informers", + "informers/admissionregistration", + "informers/admissionregistration/v1alpha1", + "informers/admissionregistration/v1beta1", + "informers/apps", + "informers/apps/v1", + "informers/apps/v1beta1", + "informers/apps/v1beta2", + "informers/auditregistration", + "informers/auditregistration/v1alpha1", + "informers/autoscaling", + "informers/autoscaling/v1", + "informers/autoscaling/v2beta1", + "informers/autoscaling/v2beta2", + "informers/batch", + "informers/batch/v1", + "informers/batch/v1beta1", + "informers/batch/v2alpha1", + "informers/certificates", + "informers/certificates/v1beta1", + "informers/coordination", + "informers/coordination/v1beta1", + "informers/core", + "informers/core/v1", + "informers/events", + "informers/events/v1beta1", + "informers/extensions", + "informers/extensions/v1beta1", + "informers/internalinterfaces", + "informers/networking", + "informers/networking/v1", + "informers/policy", + "informers/policy/v1beta1", + "informers/rbac", + "informers/rbac/v1", + "informers/rbac/v1alpha1", + "informers/rbac/v1beta1", + "informers/scheduling", + "informers/scheduling/v1alpha1", + "informers/scheduling/v1beta1", + "informers/settings", + "informers/settings/v1alpha1", + "informers/storage", + "informers/storage/v1", + "informers/storage/v1alpha1", + "informers/storage/v1beta1", "kubernetes", "kubernetes/scheme", "kubernetes/typed/admissionregistration/v1alpha1", @@ -1054,6 +1094,34 @@ "kubernetes/typed/storage/v1", "kubernetes/typed/storage/v1alpha1", "kubernetes/typed/storage/v1beta1", + "listers/admissionregistration/v1alpha1", + "listers/admissionregistration/v1beta1", + "listers/apps/v1", + "listers/apps/v1beta1", + "listers/apps/v1beta2", + "listers/auditregistration/v1alpha1", + "listers/autoscaling/v1", + "listers/autoscaling/v2beta1", + "listers/autoscaling/v2beta2", + "listers/batch/v1", + "listers/batch/v1beta1", + "listers/batch/v2alpha1", + "listers/certificates/v1beta1", + "listers/coordination/v1beta1", + "listers/core/v1", + "listers/events/v1beta1", + "listers/extensions/v1beta1", + "listers/networking/v1", + "listers/policy/v1beta1", + "listers/rbac/v1", + "listers/rbac/v1alpha1", + "listers/rbac/v1beta1", + "listers/scheduling/v1alpha1", + "listers/scheduling/v1beta1", + "listers/settings/v1alpha1", + "listers/storage/v1", + "listers/storage/v1alpha1", + "listers/storage/v1beta1", "pkg/apis/clientauthentication", "pkg/apis/clientauthentication/v1alpha1", "pkg/apis/clientauthentication/v1beta1", @@ -1258,7 +1326,6 @@ "github.com/openshift/cluster-api-actuator-pkg/pkg/types", "github.com/openshift/cluster-api/pkg/apis", "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1", - "github.com/openshift/cluster-api/pkg/apis/machine/common", "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1", "github.com/openshift/cluster-api/pkg/controller/error", "github.com/openshift/cluster-api/pkg/controller/machine", @@ -1274,16 +1341,13 @@ "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", "k8s.io/apimachinery/pkg/api/equality", "k8s.io/apimachinery/pkg/apis/meta/v1", - "k8s.io/apimachinery/pkg/apis/meta/v1/validation", "k8s.io/apimachinery/pkg/labels", "k8s.io/apimachinery/pkg/runtime", "k8s.io/apimachinery/pkg/runtime/schema", "k8s.io/apimachinery/pkg/runtime/serializer", "k8s.io/apimachinery/pkg/types", "k8s.io/apimachinery/pkg/util/errors", - "k8s.io/apimachinery/pkg/util/intstr", "k8s.io/apimachinery/pkg/util/uuid", - "k8s.io/apimachinery/pkg/util/validation/field", "k8s.io/apimachinery/pkg/util/wait", "k8s.io/client-go/kubernetes", "k8s.io/client-go/kubernetes/scheme", diff --git a/Gopkg.toml b/Gopkg.toml index c6a27bc094..75cda220e6 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -30,11 +30,11 @@ required = [ [[override]] name = "github.com/openshift/cluster-api" - branch = "openshift-4.0-cluster-api-0.0.0-alpha.4" + branch = "openshift-4.1-cluster-api-0.0.0-alpha.4" [[override]] name = "github.com/openshift/cluster-api-actuator-pkg" - branch = "master" + branch = "release-4.1" [[override]] name = "k8s.io/code-generator" diff --git a/vendor/github.com/openshift/api/Dockerfile.build b/vendor/github.com/openshift/api/Dockerfile.build new file mode 100644 index 0000000000..1b01504405 --- /dev/null +++ b/vendor/github.com/openshift/api/Dockerfile.build @@ -0,0 +1,13 @@ +FROM fedora:29 + +ENV GOPATH=/go +ENV PATH=/go/bin:$PATH + +RUN dnf -y install make git unzip golang wget +RUN go get -u -v golang.org/x/tools/cmd/... +RUN wget https://github.com/google/protobuf/releases/download/v3.0.2/protoc-3.0.2-linux-x86_64.zip && \ + mkdir protoc && \ + unzip protoc-3.0.2-linux-x86_64.zip -d protoc/ && \ + mv protoc/bin/protoc /usr/bin && \ + rm -rf protoc + diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile index 9454458743..1667fa5f9d 100644 --- a/vendor/github.com/openshift/api/Makefile +++ b/vendor/github.com/openshift/api/Makefile @@ -1,6 +1,9 @@ all: build .PHONY: all +RUNTIME ?= podman +RUNTIME_IMAGE_NAME ?= openshift-api-generator + build: go build github.com/openshift/api/... .PHONY: build @@ -19,6 +22,10 @@ update-deps: hack/update-deps.sh .PHONY: update-deps +generate-with-container: Dockerfile.build + $(RUNTIME) build -t $(RUNTIME_IMAGE_NAME) -f Dockerfile.build . + $(RUNTIME) run -ti --rm -v $(PWD):/go/src/github.com/openshift/api:z -w /go/src/github.com/openshift/api $(RUNTIME_IMAGE_NAME) make generate + generate: hack/update-deepcopy.sh hack/update-protobuf.sh diff --git a/vendor/github.com/openshift/api/OWNERS b/vendor/github.com/openshift/api/OWNERS index e47019b22b..70b782d9e5 100644 --- a/vendor/github.com/openshift/api/OWNERS +++ b/vendor/github.com/openshift/api/OWNERS @@ -9,4 +9,5 @@ approvers: - jwforres - knobunc - sjenning - - soltysh \ No newline at end of file + - soltysh + - bparees diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.proto b/vendor/github.com/openshift/api/authorization/v1/generated.proto index 12c2d27885..9816838461 100644 --- a/vendor/github.com/openshift/api/authorization/v1/generated.proto +++ b/vendor/github.com/openshift/api/authorization/v1/generated.proto @@ -116,9 +116,11 @@ message GroupRestriction { // Groups is a list of groups used to match against an individual user's // groups. If the user is a member of one of the whitelisted groups, the user // is allowed to be bound to a role. + // +nullable repeated string groups = 1; // Selectors specifies a list of label selectors over group labels. + // +nullable repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labels = 2; } @@ -470,9 +472,11 @@ message UserRestriction { repeated string users = 1; // Groups specifies a list of literal group names. + // +nullable repeated string groups = 2; // Selectors specifies a list of label selectors over user labels. + // +nullable repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labels = 3; } diff --git a/vendor/github.com/openshift/api/authorization/v1/types.go b/vendor/github.com/openshift/api/authorization/v1/types.go index 3e619df0a7..94b4aed0ce 100644 --- a/vendor/github.com/openshift/api/authorization/v1/types.go +++ b/vendor/github.com/openshift/api/authorization/v1/types.go @@ -493,9 +493,11 @@ type UserRestriction struct { Users []string `json:"users" protobuf:"bytes,1,rep,name=users"` // Groups specifies a list of literal group names. + // +nullable Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"` // Selectors specifies a list of label selectors over user labels. + // +nullable Selectors []metav1.LabelSelector `json:"labels" protobuf:"bytes,3,rep,name=labels"` } @@ -505,9 +507,11 @@ type GroupRestriction struct { // Groups is a list of groups used to match against an individual user's // groups. If the user is a member of one of the whitelisted groups, the user // is allowed to be bound to a role. + // +nullable Groups []string `json:"groups" protobuf:"bytes,1,rep,name=groups"` // Selectors specifies a list of label selectors over group labels. + // +nullable Selectors []metav1.LabelSelector `json:"labels" protobuf:"bytes,2,rep,name=labels"` } diff --git a/vendor/github.com/openshift/api/build/v1/generated.proto b/vendor/github.com/openshift/api/build/v1/generated.proto index 31cedc6553..cb374e22e4 100644 --- a/vendor/github.com/openshift/api/build/v1/generated.proto +++ b/vendor/github.com/openshift/api/build/v1/generated.proto @@ -110,11 +110,13 @@ message BuildConfigSpec { optional CommonSpec commonSpec = 3; // successfulBuildsHistoryLimit is the number of old successful builds to retain. - // If not specified, all successful builds are retained. + // When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all successful builds are retained. optional int32 successfulBuildsHistoryLimit = 4; // failedBuildsHistoryLimit is the number of old failed builds to retain. - // If not specified, all failed builds are retained. + // When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all failed builds are retained. optional int32 failedBuildsHistoryLimit = 5; } diff --git a/vendor/github.com/openshift/api/build/v1/types.go b/vendor/github.com/openshift/api/build/v1/types.go index 391da409ed..82bb7883ad 100644 --- a/vendor/github.com/openshift/api/build/v1/types.go +++ b/vendor/github.com/openshift/api/build/v1/types.go @@ -902,11 +902,13 @@ type BuildConfigSpec struct { CommonSpec `json:",inline" protobuf:"bytes,3,opt,name=commonSpec"` // successfulBuildsHistoryLimit is the number of old successful builds to retain. - // If not specified, all successful builds are retained. + // When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all successful builds are retained. SuccessfulBuildsHistoryLimit *int32 `json:"successfulBuildsHistoryLimit,omitempty" protobuf:"varint,4,opt,name=successfulBuildsHistoryLimit"` // failedBuildsHistoryLimit is the number of old failed builds to retain. - // If not specified, all failed builds are retained. + // When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all failed builds are retained. FailedBuildsHistoryLimit *int32 `json:"failedBuildsHistoryLimit,omitempty" protobuf:"varint,5,opt,name=failedBuildsHistoryLimit"` } diff --git a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go index cc926d4431..1436e65543 100644 --- a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go @@ -80,8 +80,8 @@ var map_BuildConfigSpec = map[string]string{ "": "BuildConfigSpec describes when and how builds are created", "triggers": "triggers determine how new Builds can be launched from a BuildConfig. If no triggers are defined, a new build can only occur as a result of an explicit client build creation.", "runPolicy": "RunPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".", - "successfulBuildsHistoryLimit": "successfulBuildsHistoryLimit is the number of old successful builds to retain. If not specified, all successful builds are retained.", - "failedBuildsHistoryLimit": "failedBuildsHistoryLimit is the number of old failed builds to retain. If not specified, all failed builds are retained.", + "successfulBuildsHistoryLimit": "successfulBuildsHistoryLimit is the number of old successful builds to retain. When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. If removed after the BuildConfig has been created, all successful builds are retained.", + "failedBuildsHistoryLimit": "failedBuildsHistoryLimit is the number of old failed builds to retain. When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. If removed after the BuildConfig has been created, all failed builds are retained.", } func (BuildConfigSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index 9a13b85274..52018d20bb 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -33,15 +33,6 @@ type APIServerSpec struct { } type APIServerServingCerts struct { - // defaultServingCertificate references a kubernetes.io/tls type secret containing the default TLS cert info for - // serving secure traffic. If no named certificates match the server name as understood by a client, this default - // certificate will be used. If defaultServingCertificate is not specified, then a operator managed certificate will - // be used. - // The secret must exist in the openshift-config namespace and contain the following required fields: - // - Secret.Data["tls.key"] - TLS private key. - // - Secret.Data["tls.crt"] - TLS certificate. - // +optional - DefaultServingCertificate SecretNameReference `json:"defaultServingCertificate"` // namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. // If no named certificates are provided, or no named certificates match the server name as understood by a client, // the defaultServingCertificate will be used. diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go index 6adb4fc151..8508b5cd07 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -127,9 +127,6 @@ const ( // operator (eg: openshift-apiserver for the openshift-apiserver-operator). OperatorProgressing ClusterStatusConditionType = "Progressing" - // OperatorFailing is DEPRECATED - OperatorFailing ClusterStatusConditionType = "Failing" - // Degraded indicates that the operand is not functioning completely. An example of a degraded state // would be if there should be 5 copies of the operand running but only 4 are running. It may still be available, // but it is degraded diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index 8d4cb7776e..6ab92365b9 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -104,7 +104,7 @@ type ClusterVersionStatus struct { // conditions provides information about the cluster version. The condition // "Available" is set to true if the desiredUpdate has been reached. The // condition "Progressing" is set to true if an update is being applied. - // The condition "Failing" is set to true if an update is currently blocked + // The condition "Degraded" is set to true if an update is currently blocked // by a temporary or permanent error. Conditions are only valid for the // current desiredUpdate when metadata.generation is equal to // status.generation. @@ -158,6 +158,9 @@ type UpdateHistory struct { // image is a container image location that contains the update. This value // is always populated. Image string `json:"image"` + // verified indicates whether the provided update was properly verified + // before it was installed. If this is false the cluster may not be trusted. + Verified bool `json:"verified"` } // ClusterID is string RFC4122 uuid. @@ -202,6 +205,19 @@ type Update struct { // // +optional Image string `json:"image"` + // force allows an administrator to update to an image that has failed + // verification, does not appear in the availableUpdates list, or otherwise + // would be blocked by normal protections on update. This option should only + // be used when the authenticity of the provided image has been verified out + // of band because the provided image will run with full administrative access + // to the cluster. Do not use this flag with images that comes from unknown + // or potentially malicious sources. + // + // This flag does not override other forms of consistency checking that are + // required before a new update is deployed. + // + // +optional + Force bool `json:"force"` } // RetrievedUpdates reports whether available updates have been retrieved from diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 40e3f2c279..c59dc39c1e 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -37,25 +37,31 @@ type InfrastructureStatus struct { // alphanumeric or hyphen characters. InfrastructureName string `json:"infrastructureName"` - // platform is the underlying infrastructure provider for the cluster. This - // value controls whether infrastructure automation such as service load - // balancers, dynamic volume provisioning, machine creation and deletion, and - // other integrations are enabled. If None, no infrastructure automation is - // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", - // "OpenStack", "VSphere", and "None". Individual components may not support - // all platforms, and must handle unrecognized platforms as None if they do - // not support that platform. + // platform is the underlying infrastructure provider for the cluster. + // + // Deprecated: Use platformStatus.type instead. Platform PlatformType `json:"platform,omitempty"` + // platformStatus holds status information specific to the underlying + // infrastructure provider. + // +optional + PlatformStatus *PlatformStatus `json:"platformStatus,omitempty"` + // etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering // etcd servers and clients. // For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery EtcdDiscoveryDomain string `json:"etcdDiscoveryDomain"` - // apiServerURL is a valid URL with scheme(http/https), address and port. - // apiServerURL can be used by components like kubelet on machines, to contact the `apisever` - // using the infrastructure provider rather than the kubernetes networking. + // apiServerURL is a valid URI with scheme(http/https), address and + // port. apiServerURL can be used by components like the web console + // to tell users where to find the Kubernetes API. APIServerURL string `json:"apiServerURL"` + + // apiServerInternalURL is a valid URI with scheme(http/https), + // address and port. apiServerInternalURL can be used by components + // like kubelets, to contact the Kubernetes API server using the + // infrastructure provider rather than Kubernetes networking. + APIServerInternalURL string `json:"apiServerInternalURI"` } // PlatformType is a specific supported infrastructure provider. @@ -87,6 +93,31 @@ const ( VSpherePlatformType PlatformType = "VSphere" ) +// PlatformStatus holds the current status specific to the underlying infrastructure provider +// of the current cluster. Since these are used at status-level for the underlying cluster, it +// is supposed that only one of the status structs is set. +type PlatformStatus struct { + // type is the underlying infrastructure provider for the cluster. This + // value controls whether infrastructure automation such as service load + // balancers, dynamic volume provisioning, machine creation and deletion, and + // other integrations are enabled. If None, no infrastructure automation is + // enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", + // "OpenStack", "VSphere", and "None". Individual components may not support + // all platforms, and must handle unrecognized platforms as None if they do + // not support that platform. + Type PlatformType `json:"type"` + + // AWS contains settings specific to the Amazon Web Services infrastructure provider. + // +optional + AWS *AWSPlatformStatus `json:"aws,omitempty"` +} + +// AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider. +type AWSPlatformStatus struct { + // region holds the default AWS region for new AWS resources created by the cluster. + Region string `json:"region"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // InfrastructureList is diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 3ad614c5e3..2addbc310e 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -95,7 +95,6 @@ func (in *APIServerNamedServingCert) DeepCopy() *APIServerNamedServingCert { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *APIServerServingCerts) DeepCopyInto(out *APIServerServingCerts) { *out = *in - out.DefaultServingCertificate = in.DefaultServingCertificate if in.NamedCertificates != nil { in, out := &in.NamedCertificates, &out.NamedCertificates *out = make([]APIServerNamedServingCert, len(*in)) @@ -150,6 +149,22 @@ func (in *APIServerStatus) DeepCopy() *APIServerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSPlatformStatus) DeepCopyInto(out *AWSPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSPlatformStatus. +func (in *AWSPlatformStatus) DeepCopy() *AWSPlatformStatus { + if in == nil { + return nil + } + out := new(AWSPlatformStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) { *out = *in @@ -1613,7 +1628,7 @@ func (in *Infrastructure) DeepCopyInto(out *Infrastructure) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -1688,6 +1703,11 @@ func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InfrastructureStatus) DeepCopyInto(out *InfrastructureStatus) { *out = *in + if in.PlatformStatus != nil { + in, out := &in.PlatformStatus, &out.PlatformStatus + *out = new(PlatformStatus) + (*in).DeepCopyInto(*out) + } return } @@ -2271,6 +2291,27 @@ func (in *OperandVersion) DeepCopy() *OperandVersion { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSPlatformStatus) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformStatus. +func (in *PlatformStatus) DeepCopy() *PlatformStatus { + if in == nil { + return nil + } + out := new(PlatformStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Project) DeepCopyInto(out *Project) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 234a4211ee..67c118265d 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -262,8 +262,7 @@ func (APIServerNamedServingCert) SwaggerDoc() map[string]string { } var map_APIServerServingCerts = map[string]string{ - "defaultServingCertificate": "defaultServingCertificate references a kubernetes.io/tls type secret containing the default TLS cert info for serving secure traffic. If no named certificates match the server name as understood by a client, this default certificate will be used. If defaultServingCertificate is not specified, then a operator managed certificate will be used. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data[\"tls.key\"] - TLS private key. - Secret.Data[\"tls.crt\"] - TLS certificate.", - "namedCertificates": "namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used.", + "namedCertificates": "namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used.", } func (APIServerServingCerts) SwaggerDoc() map[string]string { @@ -492,7 +491,7 @@ var map_ClusterVersionStatus = map[string]string{ "history": "history contains a list of the most recent versions applied to the cluster. This value may be empty during cluster startup, and then will be updated when a new update is being applied. The newest update is first in the list and it is ordered by recency. Updates in the history have state Completed if the rollout completed - if an update was failing or halfway applied the state will be Partial. Only a limited amount of update history is preserved.", "observedGeneration": "observedGeneration reports which version of the spec is being synced. If this value is not equal to metadata.generation, then the desired and conditions fields may represent from a previous version.", "versionHash": "versionHash is a fingerprint of the content that the cluster will be updated with. It is used by the operator to avoid unnecessary work and is for internal use only.", - "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Failing\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", + "conditions": "conditions provides information about the cluster version. The condition \"Available\" is set to true if the desiredUpdate has been reached. The condition \"Progressing\" is set to true if an update is being applied. The condition \"Degraded\" is set to true if an update is currently blocked by a temporary or permanent error. Conditions are only valid for the current desiredUpdate when metadata.generation is equal to status.generation.", "availableUpdates": "availableUpdates contains the list of updates that are appropriate for this cluster. This list may be empty if no updates are recommended, if the update service is unavailable, or if an invalid channel has been specified.", } @@ -517,6 +516,7 @@ var map_Update = map[string]string{ "": "Update represents a release of the ClusterVersionOperator, referenced by the Image member.", "version": "version is a semantic versioning identifying the update version. When this field is part of spec, version is optional if image is specified.", "image": "image is a container image location that contains the update. When this field is part of spec, image is optional if version is specified and the availableUpdates field contains a matching version.", + "force": "force allows an administrator to update to an image that has failed verification, does not appear in the availableUpdates list, or otherwise would be blocked by normal protections on update. This option should only be used when the authenticity of the provided image has been verified out of band because the provided image will run with full administrative access to the cluster. Do not use this flag with images that comes from unknown or potentially malicious sources.\n\nThis flag does not override other forms of consistency checking that are required before a new update is deployed.", } func (Update) SwaggerDoc() map[string]string { @@ -530,6 +530,7 @@ var map_UpdateHistory = map[string]string{ "completionTime": "completionTime, if set, is when the update was fully applied. The update that is currently being applied will have a null completion time. Completion time will always be set for entries that are not the current update (usually to the started time of the next update).", "version": "version is a semantic versioning identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.", "image": "image is a container image location that contains the update. This value is always populated.", + "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted.", } func (UpdateHistory) SwaggerDoc() map[string]string { @@ -697,6 +698,15 @@ func (RegistrySources) SwaggerDoc() map[string]string { return map_RegistrySources } +var map_AWSPlatformStatus = map[string]string{ + "": "AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider.", + "region": "region holds the default AWS region for new AWS resources created by the cluster.", +} + +func (AWSPlatformStatus) SwaggerDoc() map[string]string { + return map_AWSPlatformStatus +} + var map_Infrastructure = map[string]string{ "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`", "metadata": "Standard object's metadata.", @@ -727,17 +737,29 @@ func (InfrastructureSpec) SwaggerDoc() map[string]string { } var map_InfrastructureStatus = map[string]string{ - "": "InfrastructureStatus describes the infrastructure the cluster is leveraging.", - "infrastructureName": "infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters.", - "platform": "platform is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", - "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery", - "apiServerURL": "apiServerURL is a valid URL with scheme(http/https), address and port. apiServerURL can be used by components like kubelet on machines, to contact the `apisever` using the infrastructure provider rather than the kubernetes networking.", + "": "InfrastructureStatus describes the infrastructure the cluster is leveraging.", + "infrastructureName": "infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters.", + "platform": "platform is the underlying infrastructure provider for the cluster.\n\nDeprecated: Use platformStatus.type instead.", + "platformStatus": "platformStatus holds status information specific to the underlying infrastructure provider.", + "etcdDiscoveryDomain": "etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery", + "apiServerURL": "apiServerURL is a valid URI with scheme(http/https), address and port. apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API.", + "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme(http/https), address and port. apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.", } func (InfrastructureStatus) SwaggerDoc() map[string]string { return map_InfrastructureStatus } +var map_PlatformStatus = map[string]string{ + "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.", + "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", + "aws": "AWS contains settings specific to the Amazon Web Services infrastructure provider.", +} + +func (PlatformStatus) SwaggerDoc() map[string]string { + return map_PlatformStatus +} + var map_Ingress = map[string]string{ "": "Ingress holds cluster-wide information about Ingress. The canonical name is `cluster`", "metadata": "Standard object's metadata.", diff --git a/vendor/github.com/openshift/api/hack/update-protobuf.sh b/vendor/github.com/openshift/api/hack/update-protobuf.sh index 1fca572cd6..93c4473047 100755 --- a/vendor/github.com/openshift/api/hack/update-protobuf.sh +++ b/vendor/github.com/openshift/api/hack/update-protobuf.sh @@ -11,6 +11,7 @@ install the platform appropriate Protobuf package for your OS: https://github.com/google/protobuf/releases To skip protobuf generation, set \$PROTO_OPTIONAL." + exit 1 fi rm -rf go-to-protobuf diff --git a/vendor/github.com/openshift/api/operator/v1/types.go b/vendor/github.com/openshift/api/operator/v1/types.go index d0eb81189f..3f5af652f6 100644 --- a/vendor/github.com/openshift/api/operator/v1/types.go +++ b/vendor/github.com/openshift/api/operator/v1/types.go @@ -133,8 +133,8 @@ var ( OperatorStatusTypeAvailable = "Available" // Progressing indicates that the operator is trying to transition the operand to a different state OperatorStatusTypeProgressing = "Progressing" - // Failing indicates that the operator (not the operand) is unable to fulfill the user intent - OperatorStatusTypeFailing = "Failing" + // Degraded indicates that the operator (not the operand) is unable to fulfill the user intent + OperatorStatusTypeDegraded = "Degraded" // PrereqsSatisfied indicates that the things this operator depends on are present and at levels compatible with the // current and desired states. OperatorStatusTypePrereqsSatisfied = "PrereqsSatisfied" diff --git a/vendor/github.com/openshift/api/operator/v1/types_etcd.go b/vendor/github.com/openshift/api/operator/v1/types_etcd.go index b1e1adda68..83376ba4f9 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_etcd.go +++ b/vendor/github.com/openshift/api/operator/v1/types_etcd.go @@ -21,11 +21,6 @@ type Etcd struct { type EtcdSpec struct { StaticPodOperatorSpec `json:",inline"` - - // forceRedeploymentReason can be used to force the redeployment of the kube-apiserver by providing a unique string. - // This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work - // this time instead of failing again on the same config. - ForceRedeploymentReason string `json:"forceRedeploymentReason"` } type EtcdStatus struct { diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index 53052e8b2a..8ba638ce9d 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -108,6 +108,10 @@ type AdditionalNetworkDefinition struct { // This must be unique. Name string `json:"name"` + // namespace is the namespace of the network. This will be populated in the resulting CRD + // If not given the network will be created in the default namespace. + Namespace string `json:"namespace,omitempty"` + // rawCNIConfig is the raw CNI configuration json to create in the // NetworkAttachmentDefinition CRD RawCNIConfig string `json:"rawCNIConfig"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go index e6bb242aa2..6d7f7dd453 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go +++ b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go @@ -13,11 +13,11 @@ type ServiceCA struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata"` - // +required //spec holds user settable values for configuration + // +required Spec ServiceCASpec `json:"spec"` - // +optional // status holds observed values from the cluster. They may not be overridden. + // +optional Status ServiceCAStatus `json:"status"` } diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index da167ba6d2..5296f8dc23 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -188,14 +188,6 @@ func (EtcdList) SwaggerDoc() map[string]string { return map_EtcdList } -var map_EtcdSpec = map[string]string{ - "forceRedeploymentReason": "forceRedeploymentReason can be used to force the redeployment of the kube-apiserver by providing a unique string. This provides a mechanism to kick a previously failed deployment and provide a reason why you think it will work this time instead of failing again on the same config.", -} - -func (EtcdSpec) SwaggerDoc() map[string]string { - return map_EtcdSpec -} - var map_EndpointPublishingStrategy = map[string]string{ "": "EndpointPublishingStrategy is a way to publish the endpoints of an IngressController, and represents the type and any additional configuration for a specific type.", "type": "type is the publishing strategy to use. Valid values are:\n\n* LoadBalancerService\n\nPublishes the ingress controller using a Kubernetes LoadBalancer Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment.\n\nSee: https://kubernetes.io/docs/concepts/services-networking/#loadbalancer\n\nIf domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone.\n\nWildcard DNS management is currently supported only on the AWS platform.\n\n* HostNetwork\n\nPublishes the ingress controller on node ports where the ingress controller is deployed.\n\nIn this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports.\n\n* Private\n\nDoes not publish the ingress controller.\n\nIn this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller.", @@ -301,6 +293,7 @@ var map_AdditionalNetworkDefinition = map[string]string{ "": "AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one \"Config\" that matches the type.", "type": "type is the type of network The only supported value is NetworkTypeRaw", "name": "name is the name of the network. This will be populated in the resulting CRD This must be unique.", + "namespace": "namespace is the namespace of the network. This will be populated in the resulting CRD If not given the network will be created in the default namespace.", "rawCNIConfig": "rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD", } diff --git a/vendor/github.com/openshift/api/route/v1/generated.pb.go b/vendor/github.com/openshift/api/route/v1/generated.pb.go index b0ad233717..dde33a9943 100644 --- a/vendor/github.com/openshift/api/route/v1/generated.pb.go +++ b/vendor/github.com/openshift/api/route/v1/generated.pb.go @@ -360,6 +360,10 @@ func (m *RouteSpec) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.WildcardPolicy))) i += copy(dAtA[i:], m.WildcardPolicy) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) + i += copy(dAtA[i:], m.Subdomain) return i, nil } @@ -598,6 +602,8 @@ func (m *RouteSpec) Size() (n int) { } l = len(m.WildcardPolicy) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -740,6 +746,7 @@ func (this *RouteSpec) String() string { `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "RoutePort", "RoutePort", 1) + `,`, `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "TLSConfig", 1) + `,`, `WildcardPolicy:` + fmt.Sprintf("%v", this.WildcardPolicy) + `,`, + `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, `}`, }, "") return s @@ -1770,6 +1777,35 @@ func (m *RouteSpec) Unmarshal(dAtA []byte) error { } m.WildcardPolicy = WildcardPolicyType(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subdomain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2442,77 +2478,78 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1146 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0x8f, 0xff, 0xa6, 0x1e, 0xb7, 0x85, 0x0c, 0x94, 0xba, 0x91, 0x62, 0xa7, 0x7b, 0x40, 0x29, - 0x2a, 0xbb, 0x24, 0x14, 0xa8, 0x84, 0x38, 0xd4, 0x29, 0x82, 0x34, 0x4e, 0x1a, 0x8d, 0x2d, 0x2a, - 0xaa, 0x1e, 0x98, 0xec, 0x8e, 0xd7, 0x83, 0xed, 0x99, 0x65, 0x66, 0x9c, 0xe2, 0x0b, 0xaa, 0xc4, - 0x17, 0x28, 0x7c, 0x1a, 0x3e, 0x42, 0x8e, 0x3d, 0xf6, 0x80, 0x2c, 0x62, 0x8e, 0x7c, 0x83, 0x9c, - 0xd0, 0xcc, 0x8e, 0xbd, 0xeb, 0xc4, 0x49, 0x5d, 0xb8, 0xed, 0xbc, 0xf7, 0x7e, 0xbf, 0xf7, 0xe6, - 0xbd, 0xb7, 0xbf, 0x01, 0x9b, 0x21, 0x55, 0x9d, 0xc1, 0xa1, 0xeb, 0xf3, 0xbe, 0xc7, 0x23, 0xc2, - 0x64, 0x87, 0xb6, 0x95, 0x87, 0x23, 0xea, 0x09, 0x3e, 0x50, 0xc4, 0x3b, 0xda, 0xf4, 0x42, 0xc2, - 0x88, 0xc0, 0x8a, 0x04, 0x6e, 0x24, 0xb8, 0xe2, 0xf0, 0x76, 0x02, 0x71, 0xa7, 0x10, 0x17, 0x47, - 0xd4, 0x35, 0x10, 0xf7, 0x68, 0x73, 0xf5, 0xe3, 0x14, 0x6b, 0xc8, 0x43, 0xee, 0x19, 0xe4, 0xe1, - 0xa0, 0x6d, 0x4e, 0xe6, 0x60, 0xbe, 0x62, 0xc6, 0x55, 0xa7, 0x7b, 0x5f, 0xba, 0x94, 0x9b, 0xb4, - 0x3e, 0x17, 0xf3, 0xb2, 0xae, 0xde, 0x4b, 0x62, 0xfa, 0xd8, 0xef, 0x50, 0x46, 0xc4, 0xd0, 0x8b, - 0xba, 0xa1, 0x36, 0x48, 0xaf, 0x4f, 0x14, 0x9e, 0x87, 0xfa, 0xfc, 0x22, 0x94, 0x18, 0x30, 0x45, - 0xfb, 0xc4, 0x93, 0x7e, 0x87, 0xf4, 0xf1, 0x39, 0xdc, 0xa7, 0x17, 0xe1, 0x06, 0x8a, 0xf6, 0x3c, - 0xca, 0x94, 0x54, 0xe2, 0x2c, 0xc8, 0xf9, 0x2d, 0x0b, 0x0a, 0x48, 0xb7, 0x00, 0xfe, 0x00, 0xae, - 0xe8, 0x8a, 0x02, 0xac, 0x70, 0x25, 0xb3, 0x9e, 0xd9, 0x28, 0x6f, 0x7d, 0xe2, 0xc6, 0x8c, 0x6e, - 0x9a, 0xd1, 0x8d, 0xba, 0xa1, 0x36, 0x48, 0x57, 0x47, 0xbb, 0x47, 0x9b, 0xee, 0xe3, 0xc3, 0x1f, - 0x89, 0xaf, 0xf6, 0x88, 0xc2, 0x75, 0x78, 0x3c, 0xaa, 0x2d, 0x8d, 0x47, 0x35, 0x90, 0xd8, 0xd0, - 0x94, 0x15, 0xee, 0x83, 0xbc, 0x8c, 0x88, 0x5f, 0xc9, 0x1a, 0xf6, 0xbb, 0xee, 0x1b, 0x67, 0xe2, - 0x9a, 0xca, 0x9a, 0x11, 0xf1, 0xeb, 0x57, 0x2d, 0x73, 0x5e, 0x9f, 0x90, 0xe1, 0x81, 0xdf, 0x81, - 0xa2, 0x54, 0x58, 0x0d, 0x64, 0x25, 0x67, 0x18, 0xdd, 0x85, 0x19, 0x0d, 0xaa, 0x7e, 0xdd, 0x72, - 0x16, 0xe3, 0x33, 0xb2, 0x6c, 0xce, 0xaf, 0x39, 0x70, 0xd5, 0xc4, 0xed, 0xb0, 0x50, 0x10, 0x29, - 0xe1, 0x3a, 0xc8, 0x77, 0xb8, 0x54, 0xa6, 0x2d, 0xa5, 0xa4, 0x94, 0x6f, 0xb9, 0x54, 0xc8, 0x78, - 0xe0, 0x16, 0x00, 0x26, 0x85, 0xd8, 0xc7, 0x7d, 0x62, 0x2e, 0x58, 0x4a, 0x9a, 0x81, 0xa6, 0x1e, - 0x94, 0x8a, 0x82, 0x3d, 0x00, 0x7c, 0xce, 0x02, 0xaa, 0x28, 0x67, 0xfa, 0x0a, 0xb9, 0x8d, 0xf2, - 0xd6, 0xfd, 0x45, 0xaf, 0x60, 0x4b, 0xdb, 0x9e, 0x10, 0x24, 0xd9, 0xa6, 0x26, 0x89, 0x52, 0xfc, - 0xb0, 0x05, 0xae, 0x3f, 0xa7, 0xbd, 0xc0, 0xc7, 0x22, 0x38, 0xe0, 0x3d, 0xea, 0x0f, 0x2b, 0x79, - 0x53, 0xe5, 0x5d, 0x8b, 0xbb, 0xfe, 0x64, 0xc6, 0x7b, 0x3a, 0xaa, 0xc1, 0x59, 0x4b, 0x6b, 0x18, - 0x11, 0x74, 0x86, 0x03, 0x7e, 0x0f, 0x6e, 0xc6, 0x37, 0xda, 0xc6, 0x8c, 0x33, 0xea, 0xe3, 0x9e, - 0x6e, 0x0a, 0xd3, 0x4d, 0x28, 0x18, 0xfa, 0x9a, 0xa5, 0xbf, 0x89, 0xe6, 0x87, 0xa1, 0x8b, 0xf0, - 0xce, 0x3f, 0x59, 0x70, 0x63, 0xee, 0x55, 0xe1, 0x57, 0x20, 0xaf, 0x86, 0x11, 0xb1, 0xe3, 0xb8, - 0x33, 0x19, 0x87, 0x2e, 0xf0, 0x74, 0x54, 0xbb, 0x35, 0x17, 0x64, 0xaa, 0x37, 0x30, 0xd8, 0x98, - 0xae, 0x4d, 0x3c, 0xa7, 0x7b, 0xb3, 0x6b, 0x70, 0x3a, 0xaa, 0xcd, 0xf9, 0xb7, 0xdd, 0x29, 0xd3, - 0xec, 0xb2, 0xc0, 0x0f, 0x41, 0x51, 0x10, 0x2c, 0x39, 0x33, 0x4b, 0x58, 0x4a, 0x96, 0x0a, 0x19, - 0x2b, 0xb2, 0x5e, 0x78, 0x07, 0x2c, 0xf7, 0x89, 0x94, 0x38, 0x24, 0xb6, 0xf1, 0xef, 0xd8, 0xc0, - 0xe5, 0xbd, 0xd8, 0x8c, 0x26, 0x7e, 0x28, 0x00, 0xec, 0x61, 0xa9, 0x5a, 0x02, 0x33, 0x19, 0x17, - 0x4f, 0x6d, 0x3f, 0xcb, 0x5b, 0x1f, 0x2d, 0xf6, 0x4f, 0x6a, 0x44, 0xfd, 0x83, 0xf1, 0xa8, 0x06, - 0x1b, 0xe7, 0x98, 0xd0, 0x1c, 0x76, 0xe7, 0x8f, 0x0c, 0x28, 0x99, 0xc6, 0x35, 0xa8, 0x54, 0xf0, - 0xd9, 0x39, 0x2d, 0x70, 0x17, 0xcb, 0xab, 0xd1, 0x46, 0x09, 0xde, 0xb5, 0xb7, 0xbb, 0x32, 0xb1, - 0xa4, 0x74, 0x60, 0x0f, 0x14, 0xa8, 0x22, 0x7d, 0xdd, 0x7f, 0xbd, 0xf3, 0x1b, 0x8b, 0xee, 0x7c, - 0xfd, 0x9a, 0x25, 0x2d, 0xec, 0x68, 0x38, 0x8a, 0x59, 0x9c, 0x9f, 0x6c, 0xe5, 0x07, 0x5c, 0x28, - 0x18, 0x00, 0xa0, 0xb0, 0x08, 0x89, 0xd2, 0xa7, 0x37, 0xea, 0x98, 0x56, 0x46, 0x37, 0x56, 0x46, - 0x77, 0x87, 0xa9, 0xc7, 0xa2, 0xa9, 0x04, 0x65, 0x61, 0xf2, 0x33, 0xb5, 0xa6, 0x5c, 0x28, 0xc5, - 0xeb, 0xfc, 0x9e, 0xb7, 0x39, 0xb5, 0x1a, 0x2d, 0x20, 0x0f, 0xeb, 0x20, 0x1f, 0x61, 0xd5, 0xb1, - 0x0b, 0x37, 0x8d, 0x38, 0xc0, 0xaa, 0x83, 0x8c, 0x07, 0x36, 0x41, 0x56, 0x71, 0xab, 0x63, 0x5f, - 0x2c, 0xda, 0x90, 0xb8, 0x3a, 0x44, 0xda, 0x44, 0x10, 0xe6, 0x93, 0x3a, 0xb0, 0xc4, 0xd9, 0x16, - 0x47, 0x59, 0xc5, 0xe1, 0x8b, 0x0c, 0x58, 0xc1, 0x3d, 0x45, 0x04, 0xc3, 0x8a, 0xd4, 0xb1, 0xdf, - 0x25, 0x2c, 0x90, 0x95, 0xbc, 0xe9, 0xfa, 0x7f, 0x4e, 0x72, 0xcb, 0x26, 0x59, 0x79, 0x70, 0x96, - 0x19, 0x9d, 0x4f, 0x06, 0x1f, 0x81, 0x7c, 0xa4, 0x27, 0x51, 0x78, 0x3b, 0xcd, 0xd7, 0x5d, 0xae, - 0x5f, 0x31, 0x3d, 0xd2, 0xbd, 0x37, 0x1c, 0xf0, 0x1b, 0x90, 0x53, 0x3d, 0x59, 0x29, 0x2e, 0x4c, - 0xd5, 0x6a, 0x34, 0xb7, 0x39, 0x6b, 0xd3, 0xb0, 0xbe, 0x3c, 0x1e, 0xd5, 0x72, 0xad, 0x46, 0x13, - 0x69, 0x86, 0x39, 0x5a, 0xb8, 0xfc, 0xff, 0xb5, 0xd0, 0xa1, 0xa0, 0x9c, 0x7a, 0x5d, 0xe0, 0x53, - 0xb0, 0x4c, 0x63, 0x11, 0xaa, 0x64, 0x4c, 0xc7, 0xbd, 0xb7, 0xd4, 0xf6, 0x44, 0x21, 0xac, 0x01, - 0x4d, 0x08, 0x9d, 0x5f, 0xc0, 0xfb, 0xf3, 0x66, 0xa3, 0xf7, 0xac, 0x4b, 0x59, 0x70, 0x76, 0x13, - 0x77, 0x29, 0x0b, 0x90, 0xf1, 0xe8, 0x08, 0x96, 0x3c, 0x51, 0xd3, 0x08, 0xf3, 0x38, 0x19, 0x0f, - 0x74, 0x40, 0xf1, 0x39, 0xa1, 0x61, 0x47, 0x99, 0x6d, 0x2c, 0xd4, 0x81, 0x16, 0xb3, 0x27, 0xc6, - 0x82, 0xac, 0xc7, 0xe1, 0xf6, 0xaa, 0xa2, 0xd9, 0xc1, 0x22, 0x80, 0x1e, 0x28, 0x49, 0xfd, 0x61, - 0x1e, 0xbf, 0x38, 0xf7, 0x8a, 0x65, 0x2e, 0x35, 0x27, 0x0e, 0x94, 0xc4, 0x68, 0x40, 0xc0, 0x64, - 0x73, 0xd0, 0x6e, 0xd3, 0x9f, 0x6d, 0x29, 0x53, 0xc0, 0xc3, 0xfd, 0x66, 0xec, 0x40, 0x49, 0x8c, - 0xf3, 0x67, 0x0e, 0x94, 0xa6, 0xd3, 0x84, 0xbb, 0xa0, 0xac, 0x88, 0xe8, 0x53, 0x86, 0xb5, 0x7e, - 0x9d, 0x79, 0x07, 0xca, 0xad, 0xc4, 0xa5, 0x27, 0xd7, 0x6a, 0x34, 0x53, 0x16, 0x33, 0xb9, 0x34, - 0x1a, 0x7e, 0x06, 0xca, 0x3e, 0x11, 0x8a, 0xb6, 0xa9, 0x8f, 0xd5, 0xa4, 0x31, 0xef, 0x4d, 0xc8, - 0xb6, 0x13, 0x17, 0x4a, 0xc7, 0xc1, 0x35, 0x90, 0xeb, 0x92, 0xa1, 0x15, 0xfd, 0xb2, 0x0d, 0xcf, - 0xed, 0x92, 0x21, 0xd2, 0x76, 0xf8, 0x25, 0xb8, 0xe6, 0xe3, 0x14, 0xd8, 0x8a, 0xfe, 0x0d, 0x1b, - 0x78, 0x6d, 0xfb, 0x41, 0x9a, 0x79, 0x36, 0x16, 0x3e, 0x03, 0x95, 0x80, 0x48, 0x65, 0x2b, 0x9c, - 0x09, 0xb5, 0xcf, 0xea, 0xba, 0xe5, 0xa9, 0x3c, 0xbc, 0x20, 0x0e, 0x5d, 0xc8, 0x00, 0x5f, 0x66, - 0xc0, 0x1a, 0x65, 0x92, 0xf8, 0x03, 0x41, 0xbe, 0x0e, 0x42, 0x92, 0xea, 0x8e, 0xfd, 0x1b, 0x8a, - 0x26, 0xc7, 0x23, 0x9b, 0x63, 0x6d, 0xe7, 0xb2, 0xe0, 0xd3, 0x51, 0xed, 0xf6, 0xa5, 0x01, 0xa6, - 0xe3, 0x97, 0x27, 0xac, 0x6f, 0x1c, 0x9f, 0x54, 0x97, 0x5e, 0x9d, 0x54, 0x97, 0x5e, 0x9f, 0x54, - 0x97, 0x5e, 0x8c, 0xab, 0x99, 0xe3, 0x71, 0x35, 0xf3, 0x6a, 0x5c, 0xcd, 0xbc, 0x1e, 0x57, 0x33, - 0x7f, 0x8d, 0xab, 0x99, 0x97, 0x7f, 0x57, 0x97, 0x9e, 0x66, 0x8f, 0x36, 0xff, 0x0d, 0x00, 0x00, - 0xff, 0xff, 0x3b, 0x9e, 0x66, 0x71, 0xfc, 0x0b, 0x00, 0x00, + // 1164 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xfa, 0x5f, 0xe2, 0x71, 0x1b, 0xc8, 0x40, 0xa9, 0x1b, 0x29, 0x76, 0xba, 0x07, 0x94, + 0xa2, 0xb2, 0x4b, 0x42, 0x81, 0x4a, 0x88, 0x43, 0x9d, 0x22, 0x48, 0xe3, 0xa4, 0xd1, 0xd8, 0xa2, + 0xa2, 0xea, 0x81, 0xc9, 0xee, 0x78, 0x3d, 0xd8, 0x9e, 0x5d, 0x66, 0xc6, 0x29, 0xbe, 0xa0, 0x4a, + 0x7c, 0x81, 0xf2, 0x6d, 0xb8, 0x73, 0xc9, 0xb1, 0xc7, 0x1e, 0x90, 0x45, 0xcc, 0x91, 0x6f, 0x90, + 0x13, 0x9a, 0xd9, 0xb1, 0x77, 0xed, 0x38, 0xa9, 0x8b, 0xb8, 0xed, 0xbc, 0xf7, 0xfb, 0xfd, 0xde, + 0x9b, 0xf7, 0xde, 0xbe, 0x01, 0xdb, 0x01, 0x95, 0xed, 0xfe, 0xb1, 0xe3, 0x85, 0x3d, 0x37, 0x8c, + 0x08, 0x13, 0x6d, 0xda, 0x92, 0x2e, 0x8e, 0xa8, 0xcb, 0xc3, 0xbe, 0x24, 0xee, 0xc9, 0xb6, 0x1b, + 0x10, 0x46, 0x38, 0x96, 0xc4, 0x77, 0x22, 0x1e, 0xca, 0x10, 0xde, 0x4e, 0x28, 0xce, 0x84, 0xe2, + 0xe0, 0x88, 0x3a, 0x9a, 0xe2, 0x9c, 0x6c, 0xaf, 0x7f, 0x9c, 0x52, 0x0d, 0xc2, 0x20, 0x74, 0x35, + 0xf3, 0xb8, 0xdf, 0xd2, 0x27, 0x7d, 0xd0, 0x5f, 0xb1, 0xe2, 0xba, 0xdd, 0xb9, 0x2f, 0x1c, 0x1a, + 0xea, 0xb0, 0x5e, 0xc8, 0xe7, 0x45, 0x5d, 0xbf, 0x97, 0x60, 0x7a, 0xd8, 0x6b, 0x53, 0x46, 0xf8, + 0xc0, 0x8d, 0x3a, 0x81, 0x32, 0x08, 0xb7, 0x47, 0x24, 0x9e, 0xc7, 0xfa, 0xfc, 0x32, 0x16, 0xef, + 0x33, 0x49, 0x7b, 0xc4, 0x15, 0x5e, 0x9b, 0xf4, 0xf0, 0x05, 0xde, 0xa7, 0x97, 0xf1, 0xfa, 0x92, + 0x76, 0x5d, 0xca, 0xa4, 0x90, 0x7c, 0x96, 0x64, 0xff, 0x96, 0x01, 0x79, 0xa4, 0x4a, 0x00, 0x7f, + 0x00, 0x2b, 0x2a, 0x23, 0x1f, 0x4b, 0x5c, 0xb6, 0x36, 0xad, 0xad, 0xd2, 0xce, 0x27, 0x4e, 0xac, + 0xe8, 0xa4, 0x15, 0x9d, 0xa8, 0x13, 0x28, 0x83, 0x70, 0x14, 0xda, 0x39, 0xd9, 0x76, 0x1e, 0x1f, + 0xff, 0x48, 0x3c, 0x79, 0x40, 0x24, 0xae, 0xc1, 0xd3, 0x61, 0x75, 0x69, 0x34, 0xac, 0x82, 0xc4, + 0x86, 0x26, 0xaa, 0xf0, 0x10, 0xe4, 0x44, 0x44, 0xbc, 0x72, 0x46, 0xab, 0xdf, 0x75, 0xde, 0xd8, + 0x13, 0x47, 0x67, 0xd6, 0x88, 0x88, 0x57, 0xbb, 0x66, 0x94, 0x73, 0xea, 0x84, 0xb4, 0x0e, 0xfc, + 0x0e, 0x14, 0x84, 0xc4, 0xb2, 0x2f, 0xca, 0x59, 0xad, 0xe8, 0x2c, 0xac, 0xa8, 0x59, 0xb5, 0x55, + 0xa3, 0x59, 0x88, 0xcf, 0xc8, 0xa8, 0xd9, 0xbf, 0x66, 0xc1, 0x35, 0x8d, 0xdb, 0x63, 0x01, 0x27, + 0x42, 0xc0, 0x4d, 0x90, 0x6b, 0x87, 0x42, 0xea, 0xb2, 0x14, 0x93, 0x54, 0xbe, 0x0d, 0x85, 0x44, + 0xda, 0x03, 0x77, 0x00, 0xd0, 0x21, 0xf8, 0x21, 0xee, 0x11, 0x7d, 0xc1, 0x62, 0x52, 0x0c, 0x34, + 0xf1, 0xa0, 0x14, 0x0a, 0x76, 0x01, 0xf0, 0x42, 0xe6, 0x53, 0x49, 0x43, 0xa6, 0xae, 0x90, 0xdd, + 0x2a, 0xed, 0xdc, 0x5f, 0xf4, 0x0a, 0x26, 0xb5, 0xdd, 0xb1, 0x40, 0x12, 0x6d, 0x62, 0x12, 0x28, + 0xa5, 0x0f, 0x9b, 0x60, 0xf5, 0x39, 0xed, 0xfa, 0x1e, 0xe6, 0xfe, 0x51, 0xd8, 0xa5, 0xde, 0xa0, + 0x9c, 0xd3, 0x59, 0xde, 0x35, 0xbc, 0xd5, 0x27, 0x53, 0xde, 0xf3, 0x61, 0x15, 0x4e, 0x5b, 0x9a, + 0x83, 0x88, 0xa0, 0x19, 0x0d, 0xf8, 0x3d, 0xb8, 0x19, 0xdf, 0x68, 0x17, 0xb3, 0x90, 0x51, 0x0f, + 0x77, 0x55, 0x51, 0x98, 0x2a, 0x42, 0x5e, 0xcb, 0x57, 0x8d, 0xfc, 0x4d, 0x34, 0x1f, 0x86, 0x2e, + 0xe3, 0xdb, 0xff, 0x64, 0xc0, 0x8d, 0xb9, 0x57, 0x85, 0x5f, 0x81, 0x9c, 0x1c, 0x44, 0xc4, 0xb4, + 0xe3, 0xce, 0xb8, 0x1d, 0x2a, 0xc1, 0xf3, 0x61, 0xf5, 0xd6, 0x5c, 0x92, 0xce, 0x5e, 0xd3, 0x60, + 0x7d, 0x32, 0x36, 0x71, 0x9f, 0xee, 0x4d, 0x8f, 0xc1, 0xf9, 0xb0, 0x3a, 0xe7, 0xdf, 0x76, 0x26, + 0x4a, 0xd3, 0xc3, 0x02, 0x3f, 0x04, 0x05, 0x4e, 0xb0, 0x08, 0x99, 0x1e, 0xc2, 0x62, 0x32, 0x54, + 0x48, 0x5b, 0x91, 0xf1, 0xc2, 0x3b, 0x60, 0xb9, 0x47, 0x84, 0xc0, 0x01, 0x31, 0x85, 0x7f, 0xc7, + 0x00, 0x97, 0x0f, 0x62, 0x33, 0x1a, 0xfb, 0x21, 0x07, 0xb0, 0x8b, 0x85, 0x6c, 0x72, 0xcc, 0x44, + 0x9c, 0x3c, 0x35, 0xf5, 0x2c, 0xed, 0x7c, 0xb4, 0xd8, 0x3f, 0xa9, 0x18, 0xb5, 0x0f, 0x46, 0xc3, + 0x2a, 0xac, 0x5f, 0x50, 0x42, 0x73, 0xd4, 0xed, 0xdf, 0x2d, 0x50, 0xd4, 0x85, 0xab, 0x53, 0x21, + 0xe1, 0xb3, 0x0b, 0xbb, 0xc0, 0x59, 0x2c, 0xae, 0x62, 0xeb, 0x4d, 0xf0, 0xae, 0xb9, 0xdd, 0xca, + 0xd8, 0x92, 0xda, 0x03, 0x07, 0x20, 0x4f, 0x25, 0xe9, 0xa9, 0xfa, 0xab, 0x99, 0xdf, 0x5a, 0x74, + 0xe6, 0x6b, 0xd7, 0x8d, 0x68, 0x7e, 0x4f, 0xd1, 0x51, 0xac, 0x62, 0xff, 0x64, 0x32, 0x3f, 0x0a, + 0xb9, 0x84, 0x3e, 0x00, 0x12, 0xf3, 0x80, 0x48, 0x75, 0x7a, 0xe3, 0x1e, 0x53, 0x9b, 0xd1, 0x89, + 0x37, 0xa3, 0xb3, 0xc7, 0xe4, 0x63, 0xde, 0x90, 0x9c, 0xb2, 0x20, 0xf9, 0x99, 0x9a, 0x13, 0x2d, + 0x94, 0xd2, 0xb5, 0xff, 0xc8, 0x99, 0x98, 0x6a, 0x1b, 0x2d, 0xb0, 0x1e, 0x36, 0x41, 0x2e, 0xc2, + 0xb2, 0x6d, 0x06, 0x6e, 0x82, 0x38, 0xc2, 0xb2, 0x8d, 0xb4, 0x07, 0x36, 0x40, 0x46, 0x86, 0x66, + 0x8f, 0x7d, 0xb1, 0x68, 0x41, 0xe2, 0xec, 0x10, 0x69, 0x11, 0x4e, 0x98, 0x47, 0x6a, 0xc0, 0x08, + 0x67, 0x9a, 0x21, 0xca, 0xc8, 0x10, 0xbe, 0xb0, 0xc0, 0x1a, 0xee, 0x4a, 0xc2, 0x19, 0x96, 0xa4, + 0x86, 0xbd, 0x0e, 0x61, 0xbe, 0x28, 0xe7, 0x74, 0xd5, 0xff, 0x73, 0x90, 0x5b, 0x26, 0xc8, 0xda, + 0x83, 0x59, 0x65, 0x74, 0x31, 0x18, 0x7c, 0x04, 0x72, 0x91, 0xea, 0x44, 0xfe, 0xed, 0x76, 0xbe, + 0xaa, 0x72, 0x6d, 0x45, 0xd7, 0x48, 0xd5, 0x5e, 0x6b, 0xc0, 0x6f, 0x40, 0x56, 0x76, 0x45, 0xb9, + 0xb0, 0xb0, 0x54, 0xb3, 0xde, 0xd8, 0x0d, 0x59, 0x8b, 0x06, 0xb5, 0xe5, 0xd1, 0xb0, 0x9a, 0x6d, + 0xd6, 0x1b, 0x48, 0x29, 0xcc, 0xd9, 0x85, 0xcb, 0xff, 0xc3, 0x2e, 0x74, 0x41, 0x51, 0xf4, 0x8f, + 0xfd, 0xb0, 0x87, 0x29, 0x2b, 0xaf, 0x68, 0xc1, 0x35, 0x23, 0x58, 0x6c, 0x8c, 0x1d, 0x28, 0xc1, + 0xd8, 0x14, 0x94, 0x52, 0xcf, 0x11, 0x7c, 0x0a, 0x96, 0x69, 0xbc, 0xb5, 0xca, 0x96, 0x6e, 0x91, + 0xfb, 0x96, 0x8f, 0x41, 0xb2, 0x52, 0x8c, 0x01, 0x8d, 0x05, 0xed, 0x5f, 0xc0, 0xfb, 0xf3, 0x9a, + 0xa9, 0x06, 0xb3, 0x43, 0x99, 0x3f, 0x3b, 0xba, 0xfb, 0x94, 0xf9, 0x48, 0x7b, 0x14, 0x82, 0x25, + 0x6f, 0xda, 0x04, 0xa1, 0x5f, 0x33, 0xed, 0x81, 0x36, 0x28, 0x3c, 0x27, 0x34, 0x68, 0x4b, 0x3d, + 0xbe, 0xf9, 0x1a, 0x50, 0xdb, 0xef, 0x89, 0xb6, 0x20, 0xe3, 0xb1, 0x43, 0x73, 0x55, 0xde, 0x68, + 0x63, 0xee, 0xeb, 0x52, 0xa9, 0x0f, 0xfd, 0x5a, 0x5a, 0x33, 0xa5, 0x1a, 0x3b, 0x50, 0x82, 0x51, + 0x04, 0x9f, 0x89, 0x46, 0xbf, 0xd5, 0xa2, 0x3f, 0x9b, 0x54, 0x26, 0x84, 0x87, 0x87, 0x8d, 0xd8, + 0x81, 0x12, 0x8c, 0xfd, 0x67, 0x16, 0x14, 0x27, 0xed, 0x87, 0xfb, 0xa0, 0x24, 0x09, 0xef, 0x51, + 0x86, 0xd5, 0xc2, 0x9b, 0x79, 0x38, 0x4a, 0xcd, 0xc4, 0xa5, 0x5a, 0xdd, 0xac, 0x37, 0x52, 0x16, + 0xdd, 0xea, 0x34, 0x1b, 0x7e, 0x06, 0x4a, 0x1e, 0xe1, 0x92, 0xb6, 0xa8, 0x87, 0xe5, 0xb8, 0x30, + 0xef, 0x8d, 0xc5, 0x76, 0x13, 0x17, 0x4a, 0xe3, 0xe0, 0x06, 0xc8, 0x76, 0xc8, 0xc0, 0xbc, 0x12, + 0x25, 0x03, 0xcf, 0xee, 0x93, 0x01, 0x52, 0x76, 0xf8, 0x25, 0xb8, 0xee, 0xe1, 0x14, 0xd9, 0xbc, + 0x12, 0x37, 0x0c, 0xf0, 0xfa, 0xee, 0x83, 0xb4, 0xf2, 0x34, 0x16, 0x3e, 0x03, 0x65, 0x9f, 0x08, + 0x69, 0x32, 0x9c, 0x82, 0x9a, 0x77, 0x78, 0xd3, 0xe8, 0x94, 0x1f, 0x5e, 0x82, 0x43, 0x97, 0x2a, + 0xc0, 0x97, 0x16, 0xd8, 0xa0, 0x4c, 0x10, 0xaf, 0xcf, 0xc9, 0xd7, 0x7e, 0x40, 0x52, 0xd5, 0x31, + 0xbf, 0x4f, 0x41, 0xc7, 0x78, 0x64, 0x62, 0x6c, 0xec, 0x5d, 0x05, 0x3e, 0x1f, 0x56, 0x6f, 0x5f, + 0x09, 0xd0, 0x15, 0xbf, 0x3a, 0x60, 0x6d, 0xeb, 0xf4, 0xac, 0xb2, 0xf4, 0xea, 0xac, 0xb2, 0xf4, + 0xfa, 0xac, 0xb2, 0xf4, 0x62, 0x54, 0xb1, 0x4e, 0x47, 0x15, 0xeb, 0xd5, 0xa8, 0x62, 0xbd, 0x1e, + 0x55, 0xac, 0xbf, 0x46, 0x15, 0xeb, 0xe5, 0xdf, 0x95, 0xa5, 0xa7, 0x99, 0x93, 0xed, 0x7f, 0x03, + 0x00, 0x00, 0xff, 0xff, 0x6b, 0xe5, 0x21, 0x65, 0x2d, 0x0c, 0x00, 0x00, } diff --git a/vendor/github.com/openshift/api/route/v1/generated.proto b/vendor/github.com/openshift/api/route/v1/generated.proto index c75f8bb025..00d9751333 100644 --- a/vendor/github.com/openshift/api/route/v1/generated.proto +++ b/vendor/github.com/openshift/api/route/v1/generated.proto @@ -121,7 +121,23 @@ message RouteSpec { // Must follow DNS952 subdomain conventions. optional string host = 1; - // Path that the router watches for, to route traffic for to the service. Optional + // subdomain is a DNS subdomain that is requested within the ingress controller's + // domain (as a subdomain). If host is set this field is ignored. An ingress + // controller may choose to ignore this suggested name, in which case the controller + // will report the assigned name in the status.ingress array or refuse to admit the + // route. If this value is set and the server does not support this field host will + // be populated automatically. Otherwise host is left empty. The field may have + // multiple parts separated by a dot, but not all ingress controllers may honor + // the request. This field may not be changed after creation except by a user with + // the update routes/custom-host permission. + // + // Example: subdomain `frontend` automatically receives the router subdomain + // `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. + // + // +optional + optional string subdomain = 8; + + // path that the router watches for, to route traffic for to the service. Optional optional string path = 2; // to is an object the route should use as the primary backend. Only the Service kind diff --git a/vendor/github.com/openshift/api/route/v1/types.go b/vendor/github.com/openshift/api/route/v1/types.go index 6c14ae7187..b7cee760ac 100644 --- a/vendor/github.com/openshift/api/route/v1/types.go +++ b/vendor/github.com/openshift/api/route/v1/types.go @@ -70,7 +70,23 @@ type RouteSpec struct { // chosen. // Must follow DNS952 subdomain conventions. Host string `json:"host" protobuf:"bytes,1,opt,name=host"` - // Path that the router watches for, to route traffic for to the service. Optional + // subdomain is a DNS subdomain that is requested within the ingress controller's + // domain (as a subdomain). If host is set this field is ignored. An ingress + // controller may choose to ignore this suggested name, in which case the controller + // will report the assigned name in the status.ingress array or refuse to admit the + // route. If this value is set and the server does not support this field host will + // be populated automatically. Otherwise host is left empty. The field may have + // multiple parts separated by a dot, but not all ingress controllers may honor + // the request. This field may not be changed after creation except by a user with + // the update routes/custom-host permission. + // + // Example: subdomain `frontend` automatically receives the router subdomain + // `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. + // + // +optional + Subdomain string `json:"subdomain" protobuf:"bytes,8,opt,name=subdomain"` + + // path that the router watches for, to route traffic for to the service. Optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // to is an object the route should use as the primary backend. Only the Service kind diff --git a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go index f0727d93fd..7d0cb5e2b4 100644 --- a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go @@ -70,7 +70,8 @@ func (RoutePort) SwaggerDoc() map[string]string { var map_RouteSpec = map[string]string{ "": "RouteSpec describes the hostname or path the route exposes, any security information, and one to four backends (services) the route points to. Requests are distributed among the backends depending on the weights assigned to each backend. When using roundrobin scheduling the portion of requests that go to each backend is the backend weight divided by the sum of all of the backend weights. When the backend has more than one endpoint the requests that end up on the backend are roundrobin distributed among the endpoints. Weights are between 0 and 256 with default 1. Weight 0 causes no requests to the backend. If all weights are zero the route will be considered to have no backends and return a standard 503 response.\n\nThe `tls` field is optional and allows specific certificates or behavior for the route. Routers typically configure a default certificate on a wildcard domain to terminate routes without explicit certificates, but custom hostnames usually must choose passthrough (send traffic directly to the backend via the TLS Server-Name- Indication field) or provide a certificate.", "host": "host is an alias/DNS that points to the service. Optional. If not specified a route name will typically be automatically chosen. Must follow DNS952 subdomain conventions.", - "path": "Path that the router watches for, to route traffic for to the service. Optional", + "subdomain": "subdomain is a DNS subdomain that is requested within the ingress controller's domain (as a subdomain). If host is set this field is ignored. An ingress controller may choose to ignore this suggested name, in which case the controller will report the assigned name in the status.ingress array or refuse to admit the route. If this value is set and the server does not support this field host will be populated automatically. Otherwise host is left empty. The field may have multiple parts separated by a dot, but not all ingress controllers may honor the request. This field may not be changed after creation except by a user with the update routes/custom-host permission.\n\nExample: subdomain `frontend` automatically receives the router subdomain `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`.", + "path": "path that the router watches for, to route traffic for to the service. Optional", "to": "to is an object the route should use as the primary backend. Only the Service kind is allowed, and it will be defaulted to Service. If the weight field (0-256 default 1) is set to zero, no traffic will be sent to this backend.", "alternateBackends": "alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference.", "port": "If specified, the port to be used by the router. Most routers will use all endpoints exposed by the service by default - set this value to instruct routers which port to use.", diff --git a/vendor/github.com/openshift/api/security/v1/generated.proto b/vendor/github.com/openshift/api/security/v1/generated.proto index 0df3a30533..0a16d746f5 100644 --- a/vendor/github.com/openshift/api/security/v1/generated.proto +++ b/vendor/github.com/openshift/api/security/v1/generated.proto @@ -178,6 +178,7 @@ message SELinuxContextStrategyOptions { // That exposure is deprecated and will be removed in a future release - users // should instead use the security.openshift.io group to manage // SecurityContextConstraints. +// +kubebuilder:singular=securitycontextconstraint message SecurityContextConstraints { // Standard object's metadata. // More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata @@ -189,6 +190,7 @@ message SecurityContextConstraints { // for multiple SCCs are equal they will be sorted from most restrictive to // least restrictive. If both priorities and restrictions are equal the // SCCs will be sorted by name. + // +nullable optional int32 priority = 2; // AllowPrivilegedContainer determines if a container can request to be run as privileged. @@ -197,16 +199,19 @@ message SecurityContextConstraints { // DefaultAddCapabilities is the default set of capabilities that will be added to the container // unless the pod spec specifically drops the capability. You may not list a capabiility in both // DefaultAddCapabilities and RequiredDropCapabilities. + // +nullable repeated string defaultAddCapabilities = 4; // RequiredDropCapabilities are the capabilities that will be dropped from the container. These // are required to be dropped and cannot be added. + // +nullable repeated string requiredDropCapabilities = 5; // AllowedCapabilities is a list of capabilities that can be requested to add to the container. // Capabilities in this field maybe added at the pod author's discretion. // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. // To allow all capabilities you may use '*'. + // +nullable repeated string allowedCapabilities = 6; // AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin @@ -216,12 +221,14 @@ message SecurityContextConstraints { // Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". // To allow no volumes, set to ["none"]. + // +nullable repeated string volumes = 8; // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes // is allowed in the "Volumes" field. // +optional + // +nullable repeated AllowedFlexVolume allowedFlexVolumes = 21; // AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. @@ -239,23 +246,29 @@ message SecurityContextConstraints { // DefaultAllowPrivilegeEscalation controls the default setting for whether a // process can gain more privileges than its parent process. // +optional + // +nullable optional bool defaultAllowPrivilegeEscalation = 22; // AllowPrivilegeEscalation determines if a pod can request to allow // privilege escalation. If unspecified, defaults to true. // +optional + // +nullable optional bool allowPrivilegeEscalation = 23; // SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. + // +nullable optional SELinuxContextStrategyOptions seLinuxContext = 13; // RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. + // +nullable optional RunAsUserStrategyOptions runAsUser = 14; // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + // +nullable optional SupplementalGroupsStrategyOptions supplementalGroups = 15; // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + // +nullable optional FSGroupStrategyOptions fsGroup = 16; // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file @@ -267,10 +280,12 @@ message SecurityContextConstraints { // The users who have permissions to use this security context constraints // +optional + // +nullable repeated string users = 18; // The groups that have permission to use this security context constraints // +optional + // +nullable repeated string groups = 19; // SeccompProfiles lists the allowed profiles that may be set for the pod or @@ -278,6 +293,7 @@ message SecurityContextConstraints { // be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When // used to generate a value for a pod the first non-wildcard profile will be used as // the default. + // +nullable repeated string seccompProfiles = 20; // AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. @@ -289,6 +305,7 @@ message SecurityContextConstraints { // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. // +optional + // +nullable repeated string allowedUnsafeSysctls = 24; // ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. @@ -299,6 +316,7 @@ message SecurityContextConstraints { // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. // +optional + // +nullable repeated string forbiddenSysctls = 25; } diff --git a/vendor/github.com/openshift/api/security/v1/types.go b/vendor/github.com/openshift/api/security/v1/types.go index ad3a3ca805..811a1820f8 100644 --- a/vendor/github.com/openshift/api/security/v1/types.go +++ b/vendor/github.com/openshift/api/security/v1/types.go @@ -20,6 +20,7 @@ var AllowAllCapabilities corev1.Capability = "*" // That exposure is deprecated and will be removed in a future release - users // should instead use the security.openshift.io group to manage // SecurityContextConstraints. +// +kubebuilder:singular=securitycontextconstraint type SecurityContextConstraints struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -32,6 +33,7 @@ type SecurityContextConstraints struct { // for multiple SCCs are equal they will be sorted from most restrictive to // least restrictive. If both priorities and restrictions are equal the // SCCs will be sorted by name. + // +nullable Priority *int32 `json:"priority" protobuf:"varint,2,opt,name=priority"` // AllowPrivilegedContainer determines if a container can request to be run as privileged. @@ -39,14 +41,17 @@ type SecurityContextConstraints struct { // DefaultAddCapabilities is the default set of capabilities that will be added to the container // unless the pod spec specifically drops the capability. You may not list a capabiility in both // DefaultAddCapabilities and RequiredDropCapabilities. + // +nullable DefaultAddCapabilities []corev1.Capability `json:"defaultAddCapabilities" protobuf:"bytes,4,rep,name=defaultAddCapabilities,casttype=Capability"` // RequiredDropCapabilities are the capabilities that will be dropped from the container. These // are required to be dropped and cannot be added. + // +nullable RequiredDropCapabilities []corev1.Capability `json:"requiredDropCapabilities" protobuf:"bytes,5,rep,name=requiredDropCapabilities,casttype=Capability"` // AllowedCapabilities is a list of capabilities that can be requested to add to the container. // Capabilities in this field maybe added at the pod author's discretion. // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. // To allow all capabilities you may use '*'. + // +nullable AllowedCapabilities []corev1.Capability `json:"allowedCapabilities" protobuf:"bytes,6,rep,name=allowedCapabilities,casttype=Capability"` // AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin // +k8s:conversion-gen=false @@ -54,11 +59,13 @@ type SecurityContextConstraints struct { // Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". // To allow no volumes, set to ["none"]. + // +nullable Volumes []FSType `json:"volumes" protobuf:"bytes,8,rep,name=volumes,casttype=FSType"` // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes // is allowed in the "Volumes" field. // +optional + // +nullable AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,21,rep,name=allowedFlexVolumes"` // AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. AllowHostNetwork bool `json:"allowHostNetwork" protobuf:"varint,9,opt,name=allowHostNetwork"` @@ -71,18 +78,24 @@ type SecurityContextConstraints struct { // DefaultAllowPrivilegeEscalation controls the default setting for whether a // process can gain more privileges than its parent process. // +optional + // +nullable DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,22,rep,name=defaultAllowPrivilegeEscalation"` // AllowPrivilegeEscalation determines if a pod can request to allow // privilege escalation. If unspecified, defaults to true. // +optional + // +nullable AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,23,rep,name=allowPrivilegeEscalation"` // SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. + // +nullable SELinuxContext SELinuxContextStrategyOptions `json:"seLinuxContext,omitempty" protobuf:"bytes,13,opt,name=seLinuxContext"` // RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. + // +nullable RunAsUser RunAsUserStrategyOptions `json:"runAsUser,omitempty" protobuf:"bytes,14,opt,name=runAsUser"` // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + // +nullable SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups,omitempty" protobuf:"bytes,15,opt,name=supplementalGroups"` // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + // +nullable FSGroup FSGroupStrategyOptions `json:"fsGroup,omitempty" protobuf:"bytes,16,opt,name=fsGroup"` // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file // system. If the container specifically requests to run with a non-read only root file system @@ -93,9 +106,11 @@ type SecurityContextConstraints struct { // The users who have permissions to use this security context constraints // +optional + // +nullable Users []string `json:"users" protobuf:"bytes,18,rep,name=users"` // The groups that have permission to use this security context constraints // +optional + // +nullable Groups []string `json:"groups" protobuf:"bytes,19,rep,name=groups"` // SeccompProfiles lists the allowed profiles that may be set for the pod or @@ -103,6 +118,7 @@ type SecurityContextConstraints struct { // be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When // used to generate a value for a pod the first non-wildcard profile will be used as // the default. + // +nullable SeccompProfiles []string `json:"seccompProfiles,omitempty" protobuf:"bytes,20,opt,name=seccompProfiles"` // AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. @@ -114,6 +130,7 @@ type SecurityContextConstraints struct { // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. // +optional + // +nullable AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,24,rep,name=allowedUnsafeSysctls"` // ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered @@ -123,6 +140,7 @@ type SecurityContextConstraints struct { // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. // +optional + // +nullable ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,25,rep,name=forbiddenSysctls"` } diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.lock b/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.lock index 33b0a07797..84d60d8bd6 100644 --- a/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.lock +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.lock @@ -364,12 +364,12 @@ version = "v1.4.2" [[projects]] - branch = "master" - digest = "1:1267d3ab59b1de9f72b0bd74615c0926814806fbf2a6136cffc0fe1ee66a22f4" + branch = "release-4.1" + digest = "1:77bab7528f0bce010caeea4ead88253b4b69cbbba5c371d96e59f34d1c64fa83" name = "github.com/openshift/api" packages = ["config/v1"] pruneopts = "" - revision = "77b8897ec79a562e85920134fc65b63300c4d27a" + revision = "9ea19f9dd57858bb3fd8ec7051a99ca5e1ae88d6" [[projects]] branch = "openshift-4.0-cluster-api-0.0.0-alpha.4" @@ -392,8 +392,8 @@ revision = "0b649f443f830353db113f92aa86db2d4d34943a" [[projects]] - branch = "master" - digest = "1:6d48343e436f36fdfacd82c48d37b891f44e44cbcdbbb57d42b8a333927716a4" + branch = "release-4.1" + digest = "1:08a57d4e03b56f4c058da4970ee3d4b711d24035a070d487231adf9795681ae8" name = "github.com/openshift/cluster-autoscaler-operator" packages = [ "pkg/apis", @@ -401,7 +401,7 @@ "pkg/apis/autoscaling/v1beta1", ] pruneopts = "" - revision = "17350a84b40565851a3f1bd21c6f8a6763b8eacb" + revision = "dfdc9d81b23009362ba8b280df8e039466bf6f9f" [[projects]] branch = "master" @@ -847,6 +847,52 @@ packages = [ "discovery", "dynamic", + "informers", + "informers/admissionregistration", + "informers/admissionregistration/v1alpha1", + "informers/admissionregistration/v1beta1", + "informers/apps", + "informers/apps/v1", + "informers/apps/v1beta1", + "informers/apps/v1beta2", + "informers/auditregistration", + "informers/auditregistration/v1alpha1", + "informers/autoscaling", + "informers/autoscaling/v1", + "informers/autoscaling/v2beta1", + "informers/autoscaling/v2beta2", + "informers/batch", + "informers/batch/v1", + "informers/batch/v1beta1", + "informers/batch/v2alpha1", + "informers/certificates", + "informers/certificates/v1beta1", + "informers/coordination", + "informers/coordination/v1beta1", + "informers/core", + "informers/core/v1", + "informers/events", + "informers/events/v1beta1", + "informers/extensions", + "informers/extensions/v1beta1", + "informers/internalinterfaces", + "informers/networking", + "informers/networking/v1", + "informers/policy", + "informers/policy/v1beta1", + "informers/rbac", + "informers/rbac/v1", + "informers/rbac/v1alpha1", + "informers/rbac/v1beta1", + "informers/scheduling", + "informers/scheduling/v1alpha1", + "informers/scheduling/v1beta1", + "informers/settings", + "informers/settings/v1alpha1", + "informers/storage", + "informers/storage/v1", + "informers/storage/v1alpha1", + "informers/storage/v1beta1", "kubernetes", "kubernetes/scheme", "kubernetes/typed/admissionregistration/v1alpha1", @@ -881,6 +927,34 @@ "kubernetes/typed/storage/v1", "kubernetes/typed/storage/v1alpha1", "kubernetes/typed/storage/v1beta1", + "listers/admissionregistration/v1alpha1", + "listers/admissionregistration/v1beta1", + "listers/apps/v1", + "listers/apps/v1beta1", + "listers/apps/v1beta2", + "listers/auditregistration/v1alpha1", + "listers/autoscaling/v1", + "listers/autoscaling/v2beta1", + "listers/autoscaling/v2beta2", + "listers/batch/v1", + "listers/batch/v1beta1", + "listers/batch/v2alpha1", + "listers/certificates/v1beta1", + "listers/coordination/v1beta1", + "listers/core/v1", + "listers/events/v1beta1", + "listers/extensions/v1beta1", + "listers/networking/v1", + "listers/policy/v1beta1", + "listers/rbac/v1", + "listers/rbac/v1alpha1", + "listers/rbac/v1beta1", + "listers/scheduling/v1alpha1", + "listers/scheduling/v1beta1", + "listers/settings/v1alpha1", + "listers/storage/v1", + "listers/storage/v1alpha1", + "listers/storage/v1beta1", "pkg/apis/clientauthentication", "pkg/apis/clientauthentication/v1alpha1", "pkg/apis/clientauthentication/v1beta1", @@ -1097,6 +1171,7 @@ "k8s.io/apimachinery/pkg/util/wait", "k8s.io/client-go/discovery", "k8s.io/client-go/dynamic", + "k8s.io/client-go/informers", "k8s.io/client-go/kubernetes", "k8s.io/client-go/kubernetes/scheme", "k8s.io/client-go/plugin/pkg/client/auth/gcp", diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.toml b/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.toml index 184352c5c6..99a0d6623f 100644 --- a/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.toml +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/Gopkg.toml @@ -43,7 +43,7 @@ required = [ [[constraint]] name = "github.com/openshift/api" - branch = "master" + branch = "release-4.1" [[constraint]] name = "github.com/openshift/cluster-api" @@ -51,7 +51,7 @@ required = [ [[constraint]] name = "github.com/openshift/cluster-autoscaler-operator" - branch = "master" + branch = "release-4.1" [[constraint]] name = "github.com/openshift/machine-api-operator" diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go index 012cd89f71..33a8528294 100644 --- a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/autoscaler.go @@ -3,6 +3,8 @@ package autoscaler import ( "context" "fmt" + "path" + "strings" "time" "github.com/golang/glog" @@ -16,21 +18,22 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/pointer" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) const ( - autoscalingTestLabel = "test.autoscaling.label" + autoscalingTestLabel = "test.autoscaling.label" + clusterAutoscalerComponent = "cluster-autoscaler" + clusterAutoscalerObjectKind = "ConfigMap" + clusterAutoscalerScaledUpGroup = "ScaledUpGroup" + clusterAutoscalerScaleDownEmpty = "ScaleDownEmpty" + clusterAutoscalerMaxNodesTotalReached = "MaxNodesTotalReached" + pollingInterval = 3 * time.Second ) func newWorkLoad() *batchv1.Job { - backoffLimit := int32(4) - completions := int32(50) - parallelism := int32(50) return &batchv1.Job{ ObjectMeta: metav1.ObjectMeta{ Name: "workload", @@ -72,59 +75,15 @@ func newWorkLoad() *batchv1.Job { }, }, }, - BackoffLimit: &backoffLimit, - Completions: &completions, - Parallelism: ¶llelism, + BackoffLimit: pointer.Int32Ptr(4), + Completions: pointer.Int32Ptr(50), + Parallelism: pointer.Int32Ptr(50), }, } } -func labelMachineSetNodes(client runtimeclient.Client, ms *mapiv1beta1.MachineSet, nodeTestLabel string) error { - return wait.PollImmediate(e2e.RetryMedium, e2e.WaitShort, func() (bool, error) { - scaledMachines := mapiv1beta1.MachineList{} - if err := client.List(context.TODO(), runtimeclient.MatchingLabels(ms.Spec.Selector.MatchLabels), &scaledMachines); err != nil { - glog.Errorf("Error querying api for machineset object: %v, retrying...", err) - return false, nil - } - - // get all linked nodes and label them - for _, machine := range scaledMachines.Items { - if machine.Status.NodeRef == nil { - glog.Errorf("Machine %q does not have node reference set", machine.Name) - return false, nil - } - node := corev1.Node{} - if err := client.Get(context.TODO(), types.NamespacedName{Name: machine.Status.NodeRef.Name}, &node); err != nil { - glog.Errorf("error querying api for node object: %v, retrying...", err) - return false, nil - } - - labelNode := false - if node.Labels == nil { - labelNode = true - } else if _, exists := node.Labels[nodeTestLabel]; !exists { - labelNode = true - } - - if labelNode { - nodeCopy := node.DeepCopy() - if nodeCopy.Labels == nil { - nodeCopy.Labels = make(map[string]string) - } - nodeCopy.Labels[nodeTestLabel] = "" - if err := client.Update(context.TODO(), nodeCopy); err != nil { - glog.Errorf("error updating api for node object: %v, retrying...", err) - return false, nil - } - glog.Infof("Labeling node %q with %q label", nodeCopy.Name, nodeTestLabel) - } - } - return true, nil - }) -} - // Build default CA resource to allow fast scaling up and down -func clusterAutoscalerResource() *caov1.ClusterAutoscaler { +func clusterAutoscalerResource(maxNodesTotal int) *caov1.ClusterAutoscaler { tenSecondString := "10s" return &caov1.ClusterAutoscaler{ ObjectMeta: metav1.ObjectMeta{ @@ -146,6 +105,9 @@ func clusterAutoscalerResource() *caov1.ClusterAutoscaler { DelayAfterFailure: &tenSecondString, UnneededTime: &tenSecondString, }, + ResourceLimits: &caov1.ResourceLimits{ + MaxNodesTotal: pointer.Int32Ptr(int32(maxNodesTotal)), + }, }, } } @@ -176,165 +138,182 @@ func machineAutoscalerResource(targetMachineSet *mapiv1beta1.MachineSet, minRepl } } -var _ = g.Describe("[Feature:Machines] Autoscaler should", func() { - defer g.GinkgoRecover() - scaleUpFunc := func(client runtimeclient.Client, targetMachineSet mapiv1beta1.MachineSet, nodeTestLabel string, initialNumberOfReplicas int32, expectedReplicas int32) { - g.By(fmt.Sprintf("Creating MachineAutoscaler object for targetMachineSet %q", targetMachineSet.Name)) - machineAutoscaler := machineAutoscalerResource(&targetMachineSet, 1, expectedReplicas) - err := client.Create(context.TODO(), machineAutoscaler) - o.Expect(err).NotTo(o.HaveOccurred()) - - g.By(fmt.Sprintf("Waiting for cluster to scale out number of replicas of targetMachineSet %q", targetMachineSet.Name)) - err = wait.PollImmediate(5*time.Second, e2e.WaitLong, func() (bool, error) { - ms, err := e2e.GetMachineSet(context.TODO(), client, targetMachineSet.Name) - if err != nil { - glog.Errorf("error getting machineset object: %v, retrying...", err) - return false, nil - } - glog.Infof("MachineSet %q. Initial number of replicas: %d. Current number of replicas: %d", targetMachineSet.Name, initialNumberOfReplicas, pointer.Int32PtrDerefOr(ms.Spec.Replicas, e2e.DefaultMachineSetReplicas)) - return pointer.Int32PtrDerefOr(ms.Spec.Replicas, e2e.DefaultMachineSetReplicas) == expectedReplicas, nil - }) - o.Expect(err).NotTo(o.HaveOccurred()) +func newScaleUpCounter(w *eventWatcher, v uint32, scaledGroups map[string]bool) *eventCounter { + isAutoscalerScaleUpEvent := func(event *corev1.Event) bool { + return event.Source.Component == clusterAutoscalerComponent && + event.Reason == clusterAutoscalerScaledUpGroup && + event.InvolvedObject.Kind == clusterAutoscalerObjectKind && + strings.HasPrefix(event.Message, "Scale-up: setting group") + } - g.By(fmt.Sprintf("Waiting for cluster to scale up nodes for targetMachineSet %q", targetMachineSet.Name)) - err = wait.PollImmediate(5*time.Second, e2e.WaitLong, func() (bool, error) { - scaledMachines := mapiv1beta1.MachineList{} - if err := client.List(context.TODO(), runtimeclient.MatchingLabels(targetMachineSet.Spec.Selector.MatchLabels), &scaledMachines); err != nil { - glog.Errorf("Error querying api for machineset object: %v, retrying...", err) - return false, nil + matchGroup := func(event *corev1.Event) bool { + if !isAutoscalerScaleUpEvent(event) { + return false + } + for k := range scaledGroups { + if !scaledGroups[k] && strings.HasPrefix(event.Message, fmt.Sprintf("Scale-up: group %s size set to", k)) { + scaledGroups[k] = true } + } + return true + } - // get all linked nodes and label them - nodeCounter := 0 - for _, machine := range scaledMachines.Items { - if machine.Status.NodeRef == nil { - glog.Errorf("Machine %q does not have node reference set", machine.Name) - return false, nil - } - glog.Infof("Machine %q is linked to node %q", machine.Name, machine.Status.NodeRef.Name) - nodeCounter++ - } + c := newEventCounter(w, matchGroup, v, increment) + c.enable() - glog.Infof("Expecting %d nodes. Current number of nodes in the group: %d", expectedReplicas, nodeCounter) - return nodeCounter == int(expectedReplicas), nil - }) - o.Expect(err).NotTo(o.HaveOccurred()) + return c +} - g.By(fmt.Sprintf("Labeling all nodes belonging to the machineset %q (after scale up phase)", targetMachineSet.Name)) - err = labelMachineSetNodes(client, &targetMachineSet, nodeTestLabel) - o.Expect(err).NotTo(o.HaveOccurred()) +func newScaleDownCounter(w *eventWatcher, v uint32) *eventCounter { + isAutoscalerScaleDownEvent := func(event *corev1.Event) bool { + return event.Source.Component == clusterAutoscalerComponent && + event.Reason == clusterAutoscalerScaleDownEmpty && + event.InvolvedObject.Kind == clusterAutoscalerObjectKind && + strings.HasPrefix(event.Message, "Scale-down: empty node") } - scaleDownFunc := func(client runtimeclient.Client, targetMachineSet string, nodeTestLabel string, initialNumberOfReplicas int32) { - g.By(fmt.Sprintf("Waiting for machineset %q to have at most initial number of replicas", targetMachineSet)) - err := wait.PollImmediate(5*time.Second, e2e.WaitLong, func() (bool, error) { - ms, err := e2e.GetMachineSet(context.TODO(), client, targetMachineSet) - if err != nil { - glog.Errorf("error getting machineset object: %v, retrying...", err) - return false, nil - } - msReplicas := pointer.Int32PtrDerefOr(ms.Spec.Replicas, e2e.DefaultMachineSetReplicas) - glog.Infof("Initial number of replicas: %d. Current number of replicas: %d", initialNumberOfReplicas, msReplicas) - if msReplicas > initialNumberOfReplicas { - return false, nil - } + c := newEventCounter(w, isAutoscalerScaleDownEvent, v, decrement) + c.enable() + return c +} - // Make sure all scaled down nodes are really gone (so they don't affect tests to be run next) - scaledNodes := corev1.NodeList{} - if err := client.List(context.TODO(), runtimeclient.MatchingLabels(map[string]string{nodeTestLabel: ""}), &scaledNodes); err != nil { - glog.Errorf("Error querying api for node objects: %v, retrying...", err) - return false, nil - } - scaledNodesLen := int32(len(scaledNodes.Items)) - glog.Infof("Current number of replicas: %d. Current number of nodes: %d", msReplicas, scaledNodesLen) - return scaledNodesLen <= msReplicas && scaledNodesLen <= initialNumberOfReplicas, nil - }) - o.Expect(err).NotTo(o.HaveOccurred()) - } +func remaining(t time.Time) time.Duration { + return t.Sub(time.Now()).Round(time.Second) +} - g.It("scale out", func() { - var err error - client, err := e2e.LoadClient() +var _ = g.Describe("[Feature:Machines][Serial] Autoscaler should", func() { + g.It("scale up and down", func() { + defer g.GinkgoRecover() + + clientset, err := e2e.LoadClientset() o.Expect(err).NotTo(o.HaveOccurred()) - nodeTestLabel0 := fmt.Sprintf("machine.openshift.io/autoscaling-test-%v", string(uuid.NewUUID())) - nodeTestLabel1 := fmt.Sprintf("machine.openshift.io/autoscaling-test-%v", string(uuid.NewUUID())) + var client runtimeclient.Client + client, err = e2e.LoadClient() + o.Expect(err).NotTo(o.HaveOccurred()) - // We want to clean up these objects on any subsequent error. + // Anything we create we must cleanup + var cleanupObjects []runtime.Object defer func() { - err = e2e.DeleteObjectsByLabels(context.TODO(), client, map[string]string{autoscalingTestLabel: ""}, &batchv1.JobList{}) - if err != nil { - // if this one fails, there are still other resources to be deleted. - glog.Warning(err) - } else { - glog.Info("Deleted workload object") - } - - err = e2e.DeleteObjectsByLabels(context.TODO(), client, map[string]string{autoscalingTestLabel: ""}, &caov1beta1.MachineAutoscalerList{}) - if err != nil { - // if this one fails, there are still other resources to be deleted. - glog.Warning(err) - } else { - glog.Info("Deleted machineAutoscaler object") - } - - err = e2e.DeleteObjectsByLabels(context.TODO(), client, map[string]string{autoscalingTestLabel: ""}, &caov1.ClusterAutoscalerList{}) - if err != nil { - // if this one fails, there is no point of returning an error as this is the last resource deletion action - glog.Warning(err) - } else { - glog.Info("Deleted clusterAutoscaler object") + cascadeDelete := metav1.DeletePropagationForeground + for _, obj := range cleanupObjects { + if err = client.Delete(context.TODO(), obj, func(opt *runtimeclient.DeleteOptions) { + opt.PropagationPolicy = &cascadeDelete + }); err != nil { + glog.Errorf("error deleting object: %v", err) + } } }() - g.By("Getting target machineSet") - machinesets, err := e2e.GetMachineSets(context.TODO(), client) + g.By("Getting machinesets") + machineSets, err := e2e.GetMachineSets(context.TODO(), client) o.Expect(err).NotTo(o.HaveOccurred()) - o.Expect(len(machinesets)).To(o.BeNumerically(">", 1)) + o.Expect(len(machineSets)).To(o.BeNumerically(">=", 2)) - targetMachineSet0 := machinesets[0] - glog.Infof("Target machineSet0 %q", targetMachineSet0.Name) - targetMachineSet1 := machinesets[1] - glog.Infof("Target machineSet1 %q", targetMachineSet1.Name) - - // When we add support for machineDeployments on the installer, cluster-autoscaler and cluster-autoscaler-operator - // we need to test against deployments instead so we skip this test. - if ownerReferences0 := targetMachineSet0.GetOwnerReferences(); len(ownerReferences0) > 0 { - // glog.Infof("MachineSet %s is owned by a machineDeployment. Please run tests against machineDeployment instead", targetMachineSet.Name) - g.Skip(fmt.Sprintf("MachineSet %q is owned by a machineDeployment. Please run tests against machineDeployment instead", targetMachineSet0.Name)) - } - - if ownerReferences1 := targetMachineSet1.GetOwnerReferences(); len(ownerReferences1) > 0 { - g.Skip(fmt.Sprintf("MachineSet %q is owned by a machineDeployment. Please run tests against machineDeployment instead", targetMachineSet1.Name)) - } - - g.By("Create ClusterAutoscaler object") - clusterAutoscaler := clusterAutoscalerResource() - err = client.Create(context.TODO(), clusterAutoscaler) + g.By("Getting nodes") + nodes, err := e2e.GetNodes(client) o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(len(nodes)).To(o.BeNumerically(">=", 1)) + + g.By(fmt.Sprintf("Creating %v machineautoscalers", len(machineSets))) + var clusterExpansionSize int + for i := range machineSets { + min := pointer.Int32PtrDerefOr(machineSets[i].Spec.Replicas, 1) + // We only want each machineautoscaler + // resource to be able to grow by one + // additional node. + max := min + 1 + clusterExpansionSize += 1 + + glog.Infof("Create MachineAutoscaler backed by MachineSet %s/%s - min:%v, max:%v", machineSets[i].Namespace, machineSets[i].Name, min, max) + asr := machineAutoscalerResource(&machineSets[i], min, max) + o.Expect(client.Create(context.TODO(), asr)).Should(o.Succeed()) + cleanupObjects = append(cleanupObjects, runtime.Object(asr)) + } + o.Expect(clusterExpansionSize).To(o.BeNumerically(">", 1)) + + // We want to scale out to max-cluster-size-1. We + // choose max-1 because we want to test that + // maxNodesTotal is respected by the + // cluster-autoscaler. If maxNodesTotal == + // max-cluster-size then no MaxNodesTotalReached + // event will be generated. + maxNodesTotal := len(nodes) + clusterExpansionSize - 1 + + eventWatcher := newEventWatcher(clientset) + o.Expect(eventWatcher.run()).Should(o.BeTrue()) + defer eventWatcher.stop() + + // Log cluster-autoscaler events + eventWatcher.onEvent(matchAnyEvent, func(e *corev1.Event) { + if e.Source.Component == clusterAutoscalerComponent { + glog.Infof("%s: %s", e.InvolvedObject.Name, e.Message) + } + }).enable() - initialNumberOfReplicas0 := pointer.Int32PtrDerefOr(targetMachineSet0.Spec.Replicas, e2e.DefaultMachineSetReplicas) - initialNumberOfReplicas1 := pointer.Int32PtrDerefOr(targetMachineSet1.Spec.Replicas, e2e.DefaultMachineSetReplicas) - glog.Infof("initialNumberOfReplicas0 %d, initialNumberOfReplicas1 %d", initialNumberOfReplicas0, initialNumberOfReplicas1) - - g.By("Creating workload") - err = client.Create(context.TODO(), newWorkLoad()) - o.Expect(err).NotTo(o.HaveOccurred()) + g.By(fmt.Sprintf("Creating ClusterAutoscaler configured with maxNodesTotal:%v", maxNodesTotal)) + clusterAutoscaler := clusterAutoscalerResource(maxNodesTotal) + o.Expect(client.Create(context.TODO(), clusterAutoscaler)).Should(o.Succeed()) + cleanupObjects = append(cleanupObjects, runtime.Object(clusterAutoscaler)) - scaleUpFunc(client, targetMachineSet1, nodeTestLabel1, initialNumberOfReplicas1, initialNumberOfReplicas1+2) - scaleUpFunc(client, targetMachineSet0, nodeTestLabel0, initialNumberOfReplicas0, initialNumberOfReplicas1+1) + g.By("Creating scale-out workload") + scaledGroups := map[string]bool{} + for i := range machineSets { + scaledGroups[path.Join(machineSets[i].Namespace, machineSets[i].Name)] = false + } + scaleUpCounter := newScaleUpCounter(eventWatcher, 0, scaledGroups) + workload := newWorkLoad() + o.Expect(client.Create(context.TODO(), workload)).Should(o.Succeed()) + cleanupObjects = append(cleanupObjects, runtime.Object(workload)) + testDuration := time.Now().Add(time.Duration(e2e.WaitLong)) + o.Eventually(func() bool { + v := scaleUpCounter.get() + glog.Infof("[%s remaining] Expecting %v %q events; observed %v", + remaining(testDuration), clusterExpansionSize-1, clusterAutoscalerScaledUpGroup, v) + return v == uint32(clusterExpansionSize-1) + }, e2e.WaitLong, pollingInterval).Should(o.BeTrue()) + + // The cluster-autoscaler can keep on generating + // ScaledUpGroup events but in this scenario we are + // expecting no more as we explicitly capped the + // cluster size with maxNodesTotal (i.e., + // clusterExpansionSize -1). We run for a period of + // time asserting that the cluster does not exceed the + // capped size. + // + // TODO(frobware): switch to matching on + // MaxNodesTotalReached when that is available in the + // cluster-autoscaler image. + testDuration = time.Now().Add(time.Duration(e2e.WaitShort)) + o.Consistently(func() bool { + v := scaleUpCounter.get() + glog.Infof("[%s remaining] At max cluster size and expecting no more %q events; currently have %v, max=%v", + remaining(testDuration), clusterAutoscalerScaledUpGroup, v, clusterExpansionSize-1) + return v == uint32(clusterExpansionSize-1) + }, e2e.WaitShort, pollingInterval).Should(o.BeTrue()) - // Delete workload g.By("Deleting workload") - err = e2e.DeleteObjectsByLabels(context.TODO(), client, map[string]string{autoscalingTestLabel: ""}, &batchv1.JobList{}) - o.Expect(err).NotTo(o.HaveOccurred()) - - // As we have just deleted the workload the autoscaler will - // start to scale down the unneeded nodes. We wait for that - // condition; if successful we assert that (a smoke test of) - // scale down is functional. - scaleDownFunc(client, targetMachineSet0.Name, nodeTestLabel0, initialNumberOfReplicas0) - scaleDownFunc(client, targetMachineSet1.Name, nodeTestLabel1, initialNumberOfReplicas1) + scaleDownCounter := newScaleDownCounter(eventWatcher, uint32(clusterExpansionSize-1)) + o.Expect(e2e.DeleteObjectsByLabels(context.TODO(), client, map[string]string{autoscalingTestLabel: ""}, &batchv1.JobList{})).Should(o.Succeed()) + if len(cleanupObjects) > 1 && cleanupObjects[len(cleanupObjects)-1] == workload { + cleanupObjects = cleanupObjects[:len(cleanupObjects)-1] + } + testDuration = time.Now().Add(time.Duration(e2e.WaitLong)) + o.Eventually(func() uint32 { + v := scaleDownCounter.get() + glog.Infof("[%s remaining] Waiting for %s to generate %v more %q events", + remaining(testDuration), clusterAutoscalerComponent, v, clusterAutoscalerScaleDownEmpty) + return v + }, e2e.WaitLong, pollingInterval).Should(o.BeZero()) + + g.By("Waiting for scaled up nodes to be deleted") + testDuration = time.Now().Add(time.Duration(e2e.WaitMedium)) + o.Eventually(func() int { + currentNodes, err := e2e.GetNodes(client) + o.Expect(err).NotTo(o.HaveOccurred()) + glog.Infof("[%s remaining] Waiting fo cluster to reach original node count of %v; currently have %v", + remaining(testDuration), len(nodes), len(currentNodes)) + return len(currentNodes) + }, e2e.WaitMedium, pollingInterval).Should(o.Equal(len(nodes))) }) - }) diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/counter.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/counter.go new file mode 100644 index 0000000000..340df55a58 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/counter.go @@ -0,0 +1,19 @@ +package autoscaler + +import ( + "sync/atomic" +) + +type u32counter uint32 + +func (c *u32counter) increment() uint32 { + return atomic.AddUint32((*uint32)(c), 1) +} + +func (c *u32counter) decrement() uint32 { + return atomic.AddUint32((*uint32)(c), ^uint32(0)) +} + +func (c *u32counter) get() uint32 { + return atomic.LoadUint32((*uint32)(c)) +} diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/event.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/event.go new file mode 100644 index 0000000000..873540c58d --- /dev/null +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler/event.go @@ -0,0 +1,130 @@ +package autoscaler + +import ( + "sync" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +const ( + increment counterType = iota + decrement +) + +type counterType int + +type eventCounter struct { + counter u32counter + handler *eventHandler +} + +type matchEventFunc func(event *corev1.Event) bool +type eventHandlerFunc func(event *corev1.Event) + +type eventWatcher struct { + stopCh chan struct{} + informerFactory informers.SharedInformerFactory + eventInformer cache.SharedIndexInformer + startTime metav1.Time + + eventHandlerLock sync.Mutex + eventHandlers []*eventHandler +} + +type eventHandler struct { + sync.Mutex + + matcher matchEventFunc + handler eventHandlerFunc + enabled bool +} + +func newEventWatcher(clientset kubernetes.Interface) *eventWatcher { + w := eventWatcher{ + stopCh: make(chan struct{}), + startTime: metav1.Now(), + informerFactory: informers.NewSharedInformerFactory(clientset, 0), + } + + w.eventInformer = w.informerFactory.Core().V1().Events().Informer() + w.eventInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + event := obj.(*corev1.Event) + if event.CreationTimestamp.Before(&w.startTime) { + return + } + + w.eventHandlerLock.Lock() + defer w.eventHandlerLock.Unlock() + + for _, h := range w.eventHandlers { + h.Lock() + if h.enabled && h.matcher(event) { + h.handler(event) + } + h.Unlock() + } + }, + }) + + return &w +} + +func (w *eventWatcher) run() bool { + w.informerFactory.Start(w.stopCh) + return cache.WaitForCacheSync(w.stopCh, w.eventInformer.HasSynced) +} + +func (w *eventWatcher) stop() { + close(w.stopCh) +} + +func (w *eventWatcher) onEvent(matcher matchEventFunc, handler eventHandlerFunc) *eventHandler { + h := &eventHandler{ + matcher: matcher, + handler: handler, + } + + w.eventHandlerLock.Lock() + defer w.eventHandlerLock.Unlock() + w.eventHandlers = append(w.eventHandlers, h) + + return h +} + +func (h *eventHandler) enable() { + h.Lock() + defer h.Unlock() + h.enabled = true +} + +func matchAnyEvent(_ *corev1.Event) bool { + return true +} + +func newEventCounter(w *eventWatcher, matcher matchEventFunc, val uint32, t counterType) *eventCounter { + c := &eventCounter{ + counter: u32counter(val), + } + c.handler = w.onEvent(matcher, func(e *corev1.Event) { + switch t { + case increment: + c.counter.increment() + case decrement: + c.counter.decrement() + } + }) + return c +} + +func (c *eventCounter) get() uint32 { + return c.counter.get() +} + +func (c *eventCounter) enable() { + c.handler.enable() +} diff --git a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/framework/framework.go b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/framework/framework.go index 1c8667ed32..ba7a95de16 100644 --- a/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/framework/framework.go +++ b/vendor/github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/framework/framework.go @@ -426,6 +426,14 @@ func LoadClient() (runtimeclient.Client, error) { return runtimeclient.New(config, runtimeclient.Options{}) } +func LoadClientset() (*kubernetes.Clientset, error) { + config, err := LoadConfig() + if err != nil { + return nil, fmt.Errorf("error creating client: %v", err.Error()) + } + return kubernetes.NewForConfig(config) +} + func IsNodeReady(node *corev1.Node) bool { for _, c := range node.Status.Conditions { if c.Type == corev1.NodeReady { diff --git a/vendor/github.com/openshift/cluster-api/Gopkg.lock b/vendor/github.com/openshift/cluster-api/Gopkg.lock index d979caf4cb..7813fae683 100644 --- a/vendor/github.com/openshift/cluster-api/Gopkg.lock +++ b/vendor/github.com/openshift/cluster-api/Gopkg.lock @@ -106,10 +106,11 @@ [[projects]] branch = "master" - digest = "1:eaa7c96baf38f6abde2f720aac540a49dfc2229b74c3c591c3f84d2ff7e84269" + digest = "1:ebbb7da8a60c10db59cbc3bb5a874b0a6a9f441bc8bef66b90085111f71c5b49" name = "github.com/go-log/log" packages = [ ".", + "capture", "info", ] pruneopts = "UT" @@ -375,14 +376,6 @@ revision = "65fb64232476ad9046e57c26cd0bff3d3a8dc6cd" version = "v1.4.3" -[[projects]] - branch = "master" - digest = "1:f7646c654e93258958dba300641f8f674d5a9ed015c11119793ba1156e2acbe9" - name = "github.com/openshift/kubernetes-drain" - packages = ["."] - pruneopts = "UT" - revision = "c2e51be1758efa30d71a4d30dc4e2db86b70a4df" - [[projects]] digest = "1:e5d0bd87abc2781d14e274807a470acd180f0499f8bf5bb18606e9ec22ad9de9" name = "github.com/pborman/uuid" @@ -915,7 +908,7 @@ revision = "b6aa1175dafa586b8042c7bfdcd1585f9ecfaa08" [[projects]] - digest = "1:9e2617b6420a04f67a7b63938a238f43dd401f90a6c4d0f1f2470cd066e38928" + digest = "1:6fce300f4b657bf253c88e2cf18b550a18e421bda6c641249aa868e2ff8441a9" name = "k8s.io/client-go" packages = [ "discovery", @@ -968,39 +961,72 @@ "informers/storage/v1alpha1", "informers/storage/v1beta1", "kubernetes", + "kubernetes/fake", "kubernetes/scheme", "kubernetes/typed/admissionregistration/v1alpha1", + "kubernetes/typed/admissionregistration/v1alpha1/fake", "kubernetes/typed/admissionregistration/v1beta1", + "kubernetes/typed/admissionregistration/v1beta1/fake", "kubernetes/typed/apps/v1", + "kubernetes/typed/apps/v1/fake", "kubernetes/typed/apps/v1beta1", + "kubernetes/typed/apps/v1beta1/fake", "kubernetes/typed/apps/v1beta2", + "kubernetes/typed/apps/v1beta2/fake", "kubernetes/typed/auditregistration/v1alpha1", + "kubernetes/typed/auditregistration/v1alpha1/fake", "kubernetes/typed/authentication/v1", + "kubernetes/typed/authentication/v1/fake", "kubernetes/typed/authentication/v1beta1", + "kubernetes/typed/authentication/v1beta1/fake", "kubernetes/typed/authorization/v1", + "kubernetes/typed/authorization/v1/fake", "kubernetes/typed/authorization/v1beta1", + "kubernetes/typed/authorization/v1beta1/fake", "kubernetes/typed/autoscaling/v1", + "kubernetes/typed/autoscaling/v1/fake", "kubernetes/typed/autoscaling/v2beta1", + "kubernetes/typed/autoscaling/v2beta1/fake", "kubernetes/typed/autoscaling/v2beta2", + "kubernetes/typed/autoscaling/v2beta2/fake", "kubernetes/typed/batch/v1", + "kubernetes/typed/batch/v1/fake", "kubernetes/typed/batch/v1beta1", + "kubernetes/typed/batch/v1beta1/fake", "kubernetes/typed/batch/v2alpha1", + "kubernetes/typed/batch/v2alpha1/fake", "kubernetes/typed/certificates/v1beta1", + "kubernetes/typed/certificates/v1beta1/fake", "kubernetes/typed/coordination/v1beta1", + "kubernetes/typed/coordination/v1beta1/fake", "kubernetes/typed/core/v1", + "kubernetes/typed/core/v1/fake", "kubernetes/typed/events/v1beta1", + "kubernetes/typed/events/v1beta1/fake", "kubernetes/typed/extensions/v1beta1", + "kubernetes/typed/extensions/v1beta1/fake", "kubernetes/typed/networking/v1", + "kubernetes/typed/networking/v1/fake", "kubernetes/typed/policy/v1beta1", + "kubernetes/typed/policy/v1beta1/fake", "kubernetes/typed/rbac/v1", + "kubernetes/typed/rbac/v1/fake", "kubernetes/typed/rbac/v1alpha1", + "kubernetes/typed/rbac/v1alpha1/fake", "kubernetes/typed/rbac/v1beta1", + "kubernetes/typed/rbac/v1beta1/fake", "kubernetes/typed/scheduling/v1alpha1", + "kubernetes/typed/scheduling/v1alpha1/fake", "kubernetes/typed/scheduling/v1beta1", + "kubernetes/typed/scheduling/v1beta1/fake", "kubernetes/typed/settings/v1alpha1", + "kubernetes/typed/settings/v1alpha1/fake", "kubernetes/typed/storage/v1", + "kubernetes/typed/storage/v1/fake", "kubernetes/typed/storage/v1alpha1", + "kubernetes/typed/storage/v1alpha1/fake", "kubernetes/typed/storage/v1beta1", + "kubernetes/typed/storage/v1beta1/fake", "listers/admissionregistration/v1alpha1", "listers/admissionregistration/v1beta1", "listers/apps/v1", @@ -1220,10 +1246,11 @@ input-imports = [ "github.com/davecgh/go-spew/spew", "github.com/emicklei/go-restful", + "github.com/go-log/log", + "github.com/go-log/log/capture", "github.com/go-log/log/info", "github.com/onsi/ginkgo", "github.com/onsi/gomega", - "github.com/openshift/kubernetes-drain", "github.com/pkg/errors", "github.com/sergi/go-diff/diffmatchpatch", "github.com/spf13/cobra", @@ -1231,7 +1258,10 @@ "golang.org/x/net/context", "k8s.io/api/apps/v1", "k8s.io/api/autoscaling/v1", + "k8s.io/api/batch/v1", "k8s.io/api/core/v1", + "k8s.io/api/extensions/v1beta1", + "k8s.io/api/policy/v1beta1", "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", "k8s.io/apimachinery/pkg/api/equality", "k8s.io/apimachinery/pkg/api/errors", @@ -1239,15 +1269,18 @@ "k8s.io/apimachinery/pkg/apis/meta/v1", "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", "k8s.io/apimachinery/pkg/apis/meta/v1/validation", + "k8s.io/apimachinery/pkg/fields", "k8s.io/apimachinery/pkg/labels", "k8s.io/apimachinery/pkg/runtime", "k8s.io/apimachinery/pkg/runtime/schema", "k8s.io/apimachinery/pkg/runtime/serializer", "k8s.io/apimachinery/pkg/types", + "k8s.io/apimachinery/pkg/util/errors", "k8s.io/apimachinery/pkg/util/intstr", "k8s.io/apimachinery/pkg/util/json", "k8s.io/apimachinery/pkg/util/rand", "k8s.io/apimachinery/pkg/util/runtime", + "k8s.io/apimachinery/pkg/util/sets", "k8s.io/apimachinery/pkg/util/validation/field", "k8s.io/apimachinery/pkg/util/wait", "k8s.io/apimachinery/pkg/util/yaml", @@ -1257,8 +1290,11 @@ "k8s.io/client-go/discovery/fake", "k8s.io/client-go/informers", "k8s.io/client-go/kubernetes", + "k8s.io/client-go/kubernetes/fake", "k8s.io/client-go/kubernetes/scheme", + "k8s.io/client-go/kubernetes/typed/apps/v1", "k8s.io/client-go/kubernetes/typed/core/v1", + "k8s.io/client-go/kubernetes/typed/policy/v1beta1", "k8s.io/client-go/plugin/pkg/client/auth", "k8s.io/client-go/plugin/pkg/client/auth/gcp", "k8s.io/client-go/rest", diff --git a/vendor/github.com/openshift/cluster-api/README.md b/vendor/github.com/openshift/cluster-api/README.md index 6f7ab30986..957d5c8793 100644 --- a/vendor/github.com/openshift/cluster-api/README.md +++ b/vendor/github.com/openshift/cluster-api/README.md @@ -1,73 +1,17 @@ # Cluster API -## What is the Cluster API? -The Cluster API is a Kubernetes project to bring declarative, Kubernetes-style -APIs to cluster creation, configuration, and management. It provides optional, -additive functionality on top of core Kubernetes. +Cluster API provides the ability to manage Kubernetes supportable hosts in the +context of OpenShift. -Note that Cluster API effort is still in the prototype stage while we get -feedback on the API types themselves. All of the code here is to experiment with -the API and demo its abilities, in order to drive more technical feedback to the -API design. Because of this, all of the prototype code is rapidly changing. +This branch contains an implementation of a machineset-controller and +machine-controller as well as their supporting libraries. -![Cluster API Architecture](./docs/book/common_code/architecture.svg "Cluster API Architecture") +Each of these controllers is deployed by the +[machine-api-operator](https://github.com/openshift/machine-api-operator) -To learn more, see the [Cluster API KEP][cluster-api-kep]. +# Upstream Implementation +Other branches of this repository may choose to track the upstream +Kubernetes [Cluster-API project](https://github.com/kubernetes-sigs/cluster-api) -## Get involved! - -* Join the [sig-cluster-lifecycle](https://groups.google.com/forum/#!forum/kubernetes-sig-cluster-lifecycle) -Google Group for access to documents and calendars. - -* Join our Cluster API working group sessions - * Weekly on Wednesdays @ 10:00 PT on [Zoom][zoomMeeting] - * Previous meetings: \[ [notes][notes] | [recordings][recordings] \] - -* Provider implementer office hours - * Weekly on Tuesdays @ 12:00 PT ([Zoom][providerZoomMeetingTues]) and Wednesdays @ 15:00 CET ([Zoom][providerZoomMeetingWed]) - * Previous meetings: \[ [notes][implementerNotes] \] - -* Chat with us on [Slack](http://slack.k8s.io/): #cluster-api - -## Provider Implementations - -The code in this repository is independent of any specific deployment environment. -Provider specific code is being developed in separate repositories, some of which -are also sponsored by SIG-cluster-lifecycle: - - * AWS, https://github.com/kubernetes-sigs/cluster-api-provider-aws - * AWS/Openshift, https://github.com/openshift/cluster-operator - * Azure, https://github.com/kubernetes-sigs/cluster-api-provider-azure - * Baidu Cloud, https://github.com/baidu/cluster-api-provider-baiducloud - * Bare Metal, https://github.com/metalkube/cluster-api-provider-baremetal - * DigitalOcean, https://github.com/kubernetes-sigs/cluster-api-provider-digitalocean - * GCE, https://github.com/kubernetes-sigs/cluster-api-provider-gcp - * OpenStack, https://github.com/kubernetes-sigs/cluster-api-provider-openstack - * Tencent Cloud, https://github.com/TencentCloud/cluster-api-provider-tencent - * vSphere, https://github.com/kubernetes-sigs/cluster-api-provider-vsphere - -## API Adoption - -Following are the implementations managed by third-parties adopting the standard cluster-api and/or machine-api being developed here. - - * Kubermatic machine-controller, https://github.com/kubermatic/machine-controller/tree/master - * Machine API Operator, https://github.com/openshift/machine-api-operator/tree/master - * Machine-controller-manager, https://github.com/gardener/machine-controller-manager/tree/cluster-api - -## Getting Started - -### Resources - -* GitBook: [kubernetes-sigs.github.io/cluster-api](https://kubernetes-sigs.github.io/cluster-api) - -### Prerequisites -* `kubectl` is required, see [here](http://kubernetes.io/docs/user-guide/prereqs/). -* `clusterctl` is a SIG-cluster-lifecycle sponsored tool to manage Cluster API clusters. See [here](cmd/clusterctl) - -[cluster-api-kep]: https://github.com/kubernetes/enhancements/blob/master/keps/sig-cluster-lifecycle/0003-cluster-api.md -[notes]: https://docs.google.com/document/d/1Ys-DOR5UsgbMEeciuG0HOgDQc8kZsaWIWJeKJ1-UfbY/edit -[recordings]: https://www.youtube.com/playlist?list=PL69nYSiGNLP29D0nYgAGWt1ZFqS9Z7lw4 -[zoomMeeting]: https://zoom.us/j/861487554 -[implementerNotes]: https://docs.google.com/document/d/1IZ2-AZhe4r3CYiJuttyciS7bGZTTx4iMppcA8_Pr3xE/edit -[providerZoomMeetingTues]: https://zoom.us/j/140808484 -[providerZoomMeetingWed]: https://zoom.us/j/424743530 +In the future, we may align the master branch with the upstream project as it +stabilizes within the community. diff --git a/vendor/github.com/openshift/cluster-api/pkg/controller/machine/BUILD.bazel b/vendor/github.com/openshift/cluster-api/pkg/controller/machine/BUILD.bazel index 20a5e72841..5ce3bd7f60 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/controller/machine/BUILD.bazel +++ b/vendor/github.com/openshift/cluster-api/pkg/controller/machine/BUILD.bazel @@ -13,9 +13,9 @@ go_library( "//pkg/apis/cluster/v1alpha1:go_default_library", "//pkg/apis/machine/v1beta1:go_default_library", "//pkg/controller/error:go_default_library", + "//pkg/drain:go_default_library", "//pkg/util:go_default_library", "//vendor/github.com/go-log/log/info:go_default_library", - "//vendor/github.com/openshift/kubernetes-drain:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/vendor/github.com/openshift/cluster-api/pkg/controller/machine/controller.go b/vendor/github.com/openshift/cluster-api/pkg/controller/machine/controller.go index 3809d8673c..6ead790729 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/controller/machine/controller.go +++ b/vendor/github.com/openshift/cluster-api/pkg/controller/machine/controller.go @@ -26,8 +26,8 @@ import ( clusterv1 "github.com/openshift/cluster-api/pkg/apis/cluster/v1alpha1" machinev1 "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1" controllerError "github.com/openshift/cluster-api/pkg/controller/error" + kubedrain "github.com/openshift/cluster-api/pkg/drain" "github.com/openshift/cluster-api/pkg/util" - kubedrain "github.com/openshift/kubernetes-drain" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -199,6 +199,7 @@ func (r *ReconcileMachine) Reconcile(request reconcile.Request) (reconcile.Resul // deleted without a manual intervention. if _, exists := m.ObjectMeta.Annotations[ExcludeNodeDrainingAnnotation]; !exists && m.Status.NodeRef != nil { if err := r.drainNode(m); err != nil { + klog.Errorf("Failed to drain node for machine %q: %v", name, err) return delayIfRequeueAfterError(err) } } @@ -259,6 +260,11 @@ func (r *ReconcileMachine) drainNode(machine *machinev1.Machine) error { } node, err := kubeClient.CoreV1().Nodes().Get(machine.Status.NodeRef.Name, metav1.GetOptions{}) if err != nil { + if apierrors.IsNotFound(err) { + // If an admin deletes the node directly, we'll end up here. + klog.Infof("Could not find node from noderef, it may have already been deleted: %v", machine.Status.NodeRef.Name) + return nil + } return fmt.Errorf("unable to get node %q: %v", machine.Status.NodeRef.Name, err) } diff --git a/vendor/github.com/openshift/cluster-api/pkg/drain/BUILD.bazel b/vendor/github.com/openshift/cluster-api/pkg/drain/BUILD.bazel new file mode 100644 index 0000000000..39893e2c11 --- /dev/null +++ b/vendor/github.com/openshift/cluster-api/pkg/drain/BUILD.bazel @@ -0,0 +1,45 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "go_default_library", + srcs = ["drain.go"], + importpath = "github.com/openshift/cluster-api/pkg/drain", + visibility = ["//visibility:public"], + deps = [ + "//vendor/github.com/go-log/log:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/policy/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/fields:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/kubernetes:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1:go_default_library", + ], +) + +go_test( + name = "go_default_test", + srcs = ["drain_test.go"], + embed = [":go_default_library"], + deps = [ + "//vendor/github.com/go-log/log/capture:go_default_library", + "//vendor/k8s.io/api/apps/v1:go_default_library", + "//vendor/k8s.io/api/batch/v1:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/api/extensions/v1beta1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", + ], +) diff --git a/vendor/github.com/openshift/kubernetes-drain/drain.go b/vendor/github.com/openshift/cluster-api/pkg/drain/drain.go similarity index 96% rename from vendor/github.com/openshift/kubernetes-drain/drain.go rename to vendor/github.com/openshift/cluster-api/pkg/drain/drain.go index d3ab141478..3f5b274e53 100644 --- a/vendor/github.com/openshift/kubernetes-drain/drain.go +++ b/vendor/github.com/openshift/cluster-api/pkg/drain/drain.go @@ -37,8 +37,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" + typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" - typedextensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" typedpolicyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" ) @@ -238,8 +238,8 @@ func (o *DrainOptions) unreplicatedFilter(pod corev1.Pod) (bool, *warning, *fata } type DaemonSetFilterOptions struct { - client typedextensionsv1beta1.ExtensionsV1beta1Interface - force bool + client typedappsv1.AppsV1Interface + force bool ignoreDaemonSets bool } @@ -328,8 +328,8 @@ func getPodsForDeletion(client kubernetes.Interface, node *corev1.Node, options fs := podStatuses{} daemonSetOptions := &DaemonSetFilterOptions{ - client: client.ExtensionsV1beta1(), - force: options.Force, + client: client.AppsV1(), + force: options.Force, ignoreDaemonSets: options.IgnoreDaemonsets, } @@ -412,9 +412,17 @@ func deleteOrEvictPods(client kubernetes.Interface, pods []corev1.Pod, options * func evictPods(client typedpolicyv1beta1.PolicyV1beta1Interface, pods []corev1.Pod, policyGroupVersion string, options *DrainOptions, getPodFn func(namespace, name string) (*corev1.Pod, error)) error { returnCh := make(chan error, 1) + stopCh := make(chan struct{}) + // 0 timeout means infinite, we use MaxInt64 to represent it. + var globalTimeout time.Duration + if options.Timeout == 0 { + globalTimeout = time.Duration(math.MaxInt64) + } else { + globalTimeout = options.Timeout + } for _, pod := range pods { - go func(pod corev1.Pod, returnCh chan error) { + go func(pod corev1.Pod, returnCh chan error, stopCh chan struct{}) { var err error for { err = evictPod(client, pod, policyGroupVersion, options.GracePeriodSeconds) @@ -424,33 +432,32 @@ func evictPods(client typedpolicyv1beta1.PolicyV1beta1Interface, pods []corev1.P returnCh <- nil return } else if apierrors.IsTooManyRequests(err) { - logf(options.Logger, "error when evicting pod %q (will retry after 5s): %v", pod.Name, err) - time.Sleep(5 * time.Second) + select { + case <-stopCh: + returnCh <- fmt.Errorf("global timeout!! Skip eviction retries for pod %q", pod.Name) + return + default: + logf(options.Logger, "error when evicting pod %q (will retry after 5s): %v", pod.Name, err) + time.Sleep(5 * time.Second) + } } else { returnCh <- fmt.Errorf("error when evicting pod %q: %v", pod.Name, err) return } } podArray := []corev1.Pod{pod} - _, err = waitForDelete(podArray, 1*time.Second, time.Duration(math.MaxInt64), true, options.Logger, getPodFn) + _, err = waitForDelete(podArray, 1*time.Second, time.Duration(globalTimeout), true, options.Logger, getPodFn) if err == nil { returnCh <- nil } else { returnCh <- fmt.Errorf("error when waiting for pod %q terminating: %v", pod.Name, err) } - }(pod, returnCh) + }(pod, returnCh, stopCh) } doneCount := 0 var errors []error - // 0 timeout means infinite, we use MaxInt64 to represent it. - var globalTimeout time.Duration - if options.Timeout == 0 { - globalTimeout = time.Duration(math.MaxInt64) - } else { - globalTimeout = options.Timeout - } globalTimeoutCh := time.After(globalTimeout) numPods := len(pods) for doneCount < numPods { @@ -461,7 +468,8 @@ func evictPods(client typedpolicyv1beta1.PolicyV1beta1Interface, pods []corev1.P errors = append(errors, err) } case <-globalTimeoutCh: - return fmt.Errorf("Drain did not complete within %v", globalTimeout) + logf(options.Logger, "Closing stopCh") + close(stopCh) } } return utilerrors.NewAggregate(errors) diff --git a/vendor/github.com/openshift/cluster-autoscaler-operator/Gopkg.lock b/vendor/github.com/openshift/cluster-autoscaler-operator/Gopkg.lock index cdc4d0fdfb..d753291732 100644 --- a/vendor/github.com/openshift/cluster-autoscaler-operator/Gopkg.lock +++ b/vendor/github.com/openshift/cluster-autoscaler-operator/Gopkg.lock @@ -339,14 +339,14 @@ [[projects]] branch = "master" - digest = "1:65f27352945521c648be4070c006062a990fd6987d536cdc275002f698e12f32" + digest = "1:eb99e2f8bbe2e179c5171dc8e66438179c2ce8ebe901f7d4c1cbe35e071cb03f" name = "github.com/openshift/api" packages = [ "config/v1", "security/v1", ] pruneopts = "NT" - revision = "d2f01e7b77a6fc78b328db20285423838419fef7" + revision = "168fd4e3c55217ad0042c4ee1ec79e9ce40d5c21" [[projects]] branch = "master" @@ -383,8 +383,8 @@ revision = "5e580f96e63e5db1fc8095ca5f587716a4a8d9e9" [[projects]] - branch = "cao-v1beta1" - digest = "1:79a6ed849848dd57a77c4c91bdd58049be2411471449c70806e7e950bd5b3921" + branch = "master" + digest = "1:9f45d3c3183bf475291b11c2f4f22d129cc2abb6bceb32216d6e5135d6526f7d" name = "github.com/openshift/cluster-api-actuator-pkg" packages = [ "pkg/e2e", @@ -397,16 +397,15 @@ "pkg/types", ] pruneopts = "N" - revision = "dfe93de1579dbaf5ca015760f22536f1e12660d0" - source = "https://github.com/bison/cluster-api-actuator-pkg.git" + revision = "dc2d4c7f4e839792f76b43e4844a5f87a09300b2" [[projects]] branch = "master" - digest = "1:3fbb49711ca3622090d276277461ecaf021df10253c0025cfc881bf316f97244" + digest = "1:be4667b1698c8e6a037a0f0ab2301a27be83a3b4b47c34c9dce5130bd0fd422a" name = "github.com/openshift/cluster-version-operator" packages = ["lib/resourcemerge"] pruneopts = "NT" - revision = "797a840d7ea27fa6985b9da4601a6fd1e9286e94" + revision = "23f7ee1372e44643d45c4006e6c102ec0bb7583e" [[projects]] branch = "master" diff --git a/vendor/github.com/openshift/cluster-autoscaler-operator/Gopkg.toml b/vendor/github.com/openshift/cluster-autoscaler-operator/Gopkg.toml index ab59a867c5..25d564ec5d 100644 --- a/vendor/github.com/openshift/cluster-autoscaler-operator/Gopkg.toml +++ b/vendor/github.com/openshift/cluster-autoscaler-operator/Gopkg.toml @@ -22,11 +22,9 @@ required = [ [[override]] name = "github.com/openshift/cluster-autoscaler-operator" -# This is temporary until the version change is merged. [[override]] name = "github.com/openshift/cluster-api-actuator-pkg" - source = "https://github.com/bison/cluster-api-actuator-pkg.git" - branch = "cao-v1beta1" + branch = "master" [[override]] name = "k8s.io/code-generator" @@ -57,10 +55,6 @@ required = [ name = "sigs.k8s.io/controller-runtime" version = "v0.1.10" -[[override]] - name = "github.com/openshift/api" - branch = "master" - [[override]] name = "github.com/openshift/client-go" branch = "master" diff --git a/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/operator/status.go b/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/operator/status.go index e905563640..1f203a1cf7 100644 --- a/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/operator/status.go +++ b/vendor/github.com/openshift/cluster-autoscaler-operator/pkg/operator/status.go @@ -167,7 +167,7 @@ func (r *StatusReporter) ApplyStatus(status configv1.ClusterOperatorStatus) erro } // available reports the operator as available, not progressing, and -// not failing -- optionally setting a reason and message. This will +// not degraded -- optionally setting a reason and message. This will // update the reported operator version. It should only be called if // the operands are fully updated and available. func (r *StatusReporter) available(reason, message string) error { @@ -184,7 +184,7 @@ func (r *StatusReporter) available(reason, message string) error { Status: configv1.ConditionFalse, }, { - Type: configv1.OperatorFailing, + Type: configv1.OperatorDegraded, Status: configv1.ConditionFalse, }, }, @@ -201,9 +201,9 @@ func (r *StatusReporter) available(reason, message string) error { return r.ApplyStatus(status) } -// failing reports the operator as failing but available, and not +// degraded reports the operator as degraded but available, and not // progressing -- optionally setting a reason and message. -func (r *StatusReporter) failing(reason, message string) error { +func (r *StatusReporter) degraded(reason, message string) error { status := configv1.ClusterOperatorStatus{ Conditions: []configv1.ClusterOperatorStatusCondition{ { @@ -215,7 +215,7 @@ func (r *StatusReporter) failing(reason, message string) error { Status: configv1.ConditionFalse, }, { - Type: configv1.OperatorFailing, + Type: configv1.OperatorDegraded, Status: configv1.ConditionTrue, Reason: reason, Message: message, @@ -223,13 +223,13 @@ func (r *StatusReporter) failing(reason, message string) error { }, } - klog.Warningf("Operator status failing: %s", message) + klog.Warningf("Operator status degraded: %s", message) return r.ApplyStatus(status) } // progressing reports the operator as progressing but available, and not -// failing -- optionally setting a reason and message. +// degraded -- optionally setting a reason and message. func (r *StatusReporter) progressing(reason, message string) error { status := configv1.ClusterOperatorStatus{ Conditions: []configv1.ClusterOperatorStatusCondition{ @@ -244,7 +244,7 @@ func (r *StatusReporter) progressing(reason, message string) error { Message: message, }, { - Type: configv1.OperatorFailing, + Type: configv1.OperatorDegraded, Status: configv1.ConditionFalse, }, }, @@ -284,12 +284,12 @@ func (r *StatusReporter) ReportStatus() (bool, error) { ok, err := r.CheckMachineAPI() if err != nil { msg := fmt.Sprintf("error checking machine-api status: %v", err) - r.failing(ReasonMissingDependency, msg) + r.degraded(ReasonMissingDependency, msg) return false, nil } if !ok { - r.failing(ReasonMissingDependency, "machine-api not ready") + r.degraded(ReasonMissingDependency, "machine-api not ready") return false, nil } @@ -297,7 +297,7 @@ func (r *StatusReporter) ReportStatus() (bool, error) { ok, err = r.CheckClusterAutoscaler() if err != nil { msg := fmt.Sprintf("error checking autoscaler status: %v", err) - r.failing(ReasonCheckAutoscaler, msg) + r.degraded(ReasonCheckAutoscaler, msg) return false, nil } @@ -315,7 +315,7 @@ func (r *StatusReporter) ReportStatus() (bool, error) { // CheckMachineAPI checks the status of the machine-api-operator as // reported to the CVO. It returns true if the operator is available -// and not failing. +// and not degraded. func (r *StatusReporter) CheckMachineAPI() (bool, error) { mao, err := r.configClient.ConfigV1().ClusterOperators(). Get("machine-api", metav1.GetOptions{}) @@ -328,7 +328,7 @@ func (r *StatusReporter) CheckMachineAPI() (bool, error) { conds := mao.Status.Conditions if cvorm.IsOperatorStatusConditionTrue(conds, configv1.OperatorAvailable) && - cvorm.IsOperatorStatusConditionFalse(conds, configv1.OperatorFailing) { + cvorm.IsOperatorStatusConditionFalse(conds, configv1.OperatorDegraded) { return true, nil } diff --git a/vendor/github.com/openshift/kubernetes-drain/LICENSE b/vendor/github.com/openshift/kubernetes-drain/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/openshift/kubernetes-drain/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License.