diff --git a/.travis.yml b/.travis.yml index 3a2b0b836..ee0ee1aad 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,16 +6,20 @@ go: - 1.10.4 install: - go get -u github.com/vbatts/git-validation + - curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl + - chmod +x kubectl + - mv kubectl ${GOPATH}/bin + - curl -Lo ./kind https://github.com/kubernetes-sigs/kind/releases/download/v0.7.0/kind-$(uname)-amd64 + - chmod +x ./kind + - mv ./kind ${GOPATH}/bin script: - git-validation -run DCO,short-subject - go fmt $(go list ./... | grep -v vendor) | wc -l | grep 0 - make docker-proto - git diff $(find . -name "*.pb.*go" -o -name "api.swagger.json" | grep -v vendor) | wc -l | grep "^0" - - make sdk-check-version - - make install - - make vet + - git grep -rw GPL vendor | grep LICENSE | egrep -v "yaml.v2" | wc -l | grep "^0" + - make install verify - bash hack/docker-integration-test.sh - - make docker-test - if [ "${TRAVIS_PULL_REQUEST}" == "false" ]; then echo "${DOCKER_PASS}" | docker login -u "${DOCKER_USER}" --password-stdin; make push-mock-sdk-server; diff --git a/Makefile b/Makefile index e34465851..0ca995541 100644 --- a/Makefile +++ b/Makefile @@ -84,6 +84,8 @@ export GO15VENDOREXPERIMENT=1 clean \ generate \ generate-mockfiles \ + e2e \ + verify \ sdk-check-version @@ -159,7 +161,7 @@ build: packr go build -tags "$(TAGS)" $(BUILDFLAGS) $(PKGS) install: packr $(OSDSANITY)-install - go install -tags "$(TAGS)" $(PKGS) + go install -gcflags="all=-N -l" -tags "$(TAGS)" $(PKGS) go install github.com/libopenstorage/openstorage/cmd/osd-token-generator $(OSDSANITY): @@ -416,3 +418,9 @@ mockgen: mockgen -destination=api/mock/mock_volume.go -package=mock github.com/libopenstorage/openstorage/api OpenStorageVolumeServer,OpenStorageVolumeClient mockgen -destination=api/mock/mock_fstrim.go -package=mock github.com/libopenstorage/openstorage/api OpenStorageFilesystemTrimServer,OpenStorageFilesystemTrimClient mockgen -destination=api/mock/mock_fscheck.go -package=mock github.com/libopenstorage/openstorage/api OpenStorageFilesystemCheckServer,OpenStorageFilesystemCheckClient + mockgen -destination=api/server/mock/mock_schedops_k8s.go -package=mock github.com/portworx/sched-ops/k8s/core Ops + +e2e: docker-build-osd + cd test && ./run.bash + +verify: vet sdk-check-version docker-test e2e \ No newline at end of file diff --git a/api/api.go b/api/api.go index c89b18e4e..5643d716b 100644 --- a/api/api.go +++ b/api/api.go @@ -169,6 +169,19 @@ const ( AutoAggregation = math.MaxUint32 ) +// The main goal of the following label keys is for the Kubernetes intree middleware +// to keep track of the source location of the PVC with labels that cannot be modified +// by the owner of the volume, but only by the storage administrator. +const ( + // KubernetesPvcNameKey is a label on the openstorage volume + // which tracks the source PVC for the volume. + KubernetesPvcNameKey = "openstorage.io/pvc-name" + + // KubernetesPvcNamespaceKey is a label on the openstorage volume + // which tracks the source PVC namespace for the volume + KubernetesPvcNamespaceKey = "openstorage.io/pvc-namespace" +) + // Node describes the state of a node. // It includes the current physical state (CPU, memory, storage, network usage) as // well as the containers running on the system. @@ -1204,4 +1217,6 @@ func (v *Volume) IsAttached() bool { type TokenSecretContext struct { SecretName string SecretNamespace string + PvcName string + PvcNamespace string } diff --git a/api/server/middleware_auth.go b/api/server/middleware_auth.go index f7528b15b..cdd9825b4 100644 --- a/api/server/middleware_auth.go +++ b/api/server/middleware_auth.go @@ -12,14 +12,14 @@ import ( "github.com/gorilla/mux" "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/pkg/auth" + "github.com/libopenstorage/openstorage/pkg/auth/secrets" osecrets "github.com/libopenstorage/openstorage/pkg/auth/secrets" + "github.com/libopenstorage/openstorage/pkg/util" "github.com/libopenstorage/openstorage/volume" volumedrivers "github.com/libopenstorage/openstorage/volume/drivers" lsecrets "github.com/libopenstorage/secrets" "github.com/portworx/sched-ops/k8s/core" "github.com/sirupsen/logrus" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) const ( @@ -29,6 +29,11 @@ const ( PVCNamespaceLabelKey = "namespace" ) +var ( + // OverrideSchedDriverName is set by osd program to override the schedule driver + OverrideSchedDriverName = "" +) + // NewAuthMiddleware returns a negroni implementation of an http middleware // which will intercept the management APIs func NewAuthMiddleware() *authMiddleware { @@ -150,31 +155,42 @@ func (a *authMiddleware) createWithAuth(w http.ResponseWriter, r *http.Request, spec := dcReq.GetSpec() locator := dcReq.GetLocator() - tokenSecretContext, err := a.parseSecret(spec.VolumeLabels, locator.VolumeLabels, true) + tokenSecretContext, err := a.parseSecret(spec.VolumeLabels, locator.VolumeLabels) if err != nil { a.log(locator.Name, fn).WithError(err).Error("failed to parse secret") dcRes.VolumeResponse = &api.VolumeResponse{Error: "failed to parse secret: " + err.Error()} json.NewEncoder(w).Encode(&dcRes) return - } - if tokenSecretContext.SecretName == "" { - errorMessage := "Access denied, no secret found in the annotations of the persistent volume claim" + - " or storage class parameters" - a.log(locator.Name, fn).Error(errorMessage) - dcRes.VolumeResponse = &api.VolumeResponse{Error: errorMessage} - json.NewEncoder(w).Encode(&dcRes) - w.WriteHeader(http.StatusUnauthorized) - return + } else if tokenSecretContext == nil { + tokenSecretContext = &api.TokenSecretContext{} } - token, err := osecrets.GetToken(tokenSecretContext) - if err != nil { - a.log(locator.Name, fn).WithError(err).Error("failed to get token") - dcRes.VolumeResponse = &api.VolumeResponse{Error: "failed to get token: " + err.Error()} - json.NewEncoder(w).Encode(&dcRes) - return + // If no secret is provided, then the caller is accessing publicly + if tokenSecretContext.SecretName != "" { + token, err := osecrets.GetToken(tokenSecretContext) + if err != nil { + a.log(locator.Name, fn).WithError(err).Error("failed to get token") + dcRes.VolumeResponse = &api.VolumeResponse{Error: "failed to get token: " + err.Error()} + json.NewEncoder(w).Encode(&dcRes) + return + } + + // Save a reference to the secret + // These values will be stored in the header for the create() server handler + // to take and place in the labels for the volume since we do not want to adjust + // the body of the request in this middleware. When create() gets these values + // from the headers, it will copy them to the labels of the volume so that + // we can track the secret in the rest of the middleware calls. + r.Header.Set(secrets.SecretNameKey, tokenSecretContext.SecretName) + r.Header.Set(secrets.SecretNamespaceKey, tokenSecretContext.SecretNamespace) + + // If the source PVC was set, save it for the next layer to store on + // the labels of the volume + if len(tokenSecretContext.PvcName) != 0 && len(tokenSecretContext.PvcNamespace) != 0 { + r.Header.Set(api.KubernetesPvcNameKey, tokenSecretContext.PvcName) + r.Header.Set(api.KubernetesPvcNamespaceKey, tokenSecretContext.PvcNamespace) + } - } else { a.insertToken(r, token) } next(w, r) @@ -195,82 +211,20 @@ func (a *authMiddleware) setWithAuth(w http.ResponseWriter, r *http.Request, nex return } - requestBody := a.getBody(r) - var ( - req api.VolumeSetRequest - resp api.VolumeSetResponse - isOpDone bool - ) - err = json.NewDecoder(requestBody).Decode(&req) + token, err := a.fetchSecretForVolume(d, volumeID) if err != nil { - a.log(volumeID, fn).WithError(err).Error("Failed to parse the request") - next(w, r) + volumeResponse := &api.VolumeResponse{} + a.log(volumeID, fn).WithError(err).Error("Failed to fetch secret") + volumeResponse.Error = err.Error() + json.NewEncoder(w).Encode(volumeResponse) return } - - // Not checking tokens for the following APIs - // - Resize - // - Attach/Detach - // - Mount/Unmount - - if req.Spec != nil && req.Spec.Size > 0 { - isOpDone = true - err = d.Set(volumeID, req.Locator, req.Spec) - } - - for err == nil && req.Action != nil { - if req.Action.Attach != api.VolumeActionParam_VOLUME_ACTION_PARAM_NONE { - isOpDone = true - if req.Action.Attach == api.VolumeActionParam_VOLUME_ACTION_PARAM_ON { - _, err = d.Attach(volumeID, req.Options) - } else { - err = d.Detach(volumeID, req.Options) - } - if err != nil { - break - } - } - - if req.Action.Mount != api.VolumeActionParam_VOLUME_ACTION_PARAM_NONE { - isOpDone = true - if req.Action.Mount == api.VolumeActionParam_VOLUME_ACTION_PARAM_ON { - if req.Action.MountPath == "" { - err = fmt.Errorf("Invalid mount path") - break - } - err = d.Mount(volumeID, req.Action.MountPath, req.Options) - } else { - err = d.Unmount(volumeID, req.Action.MountPath, req.Options) - } - if err != nil { - break - } - } - break + if len(token) != 0 { + a.insertToken(r, token) } - if isOpDone { - if err != nil { - processErrorForVolSetResponse(req.Action, err, &resp) - } else { - v, err := d.Inspect([]string{volumeID}) - if err != nil { - processErrorForVolSetResponse(req.Action, err, &resp) - } else if v == nil || len(v) != 1 { - processErrorForVolSetResponse( - req.Action, - status.Errorf(codes.NotFound, "Volume with ID: %s is not found", volumeID), - &resp) - } else { - v0 := v[0] - resp.Volume = v0 - } - } - json.NewEncoder(w).Encode(resp) - // Not calling the next handler - return - } next(w, r) + } func (a *authMiddleware) deleteWithAuth(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { @@ -288,39 +242,22 @@ func (a *authMiddleware) deleteWithAuth(w http.ResponseWriter, r *http.Request, return } + // Idempotency vols, err := d.Inspect([]string{volumeID}) if err != nil || len(vols) == 0 || vols[0] == nil { - a.log(volumeID, fn).WithError(err).Error("Failed to get volume object") next(w, r) return } - volumeResponse := &api.VolumeResponse{} - tokenSecretContext, err := a.parseSecret(vols[0].Spec.VolumeLabels, vols[0].Locator.VolumeLabels, false) + token, err := a.fetchSecretForVolume(d, volumeID) if err != nil { - a.log(volumeID, fn).WithError(err).Error("failed to parse secret") - volumeResponse.Error = "failed to parse secret: " + err.Error() + volumeResponse := &api.VolumeResponse{} + a.log(volumeID, fn).WithError(err).Error("Failed to fetch secret") + volumeResponse.Error = err.Error() json.NewEncoder(w).Encode(volumeResponse) return } - if tokenSecretContext.SecretName == "" { - errorMessage := fmt.Sprintf("Error, unable to get secret information from the volume."+ - " You may need to re-add the following keys as volume labels to point to the secret: %s and %s", - osecrets.SecretNameKey, osecrets.SecretNamespaceKey) - a.log(volumeID, fn).Error(errorMessage) - volumeResponse = &api.VolumeResponse{Error: errorMessage} - json.NewEncoder(w).Encode(volumeResponse) - w.WriteHeader(http.StatusInternalServerError) - return - } - - token, err := osecrets.GetToken(tokenSecretContext) - if err != nil { - a.log(volumeID, fn).WithError(err).Error("failed to get token") - volumeResponse.Error = "failed to get token: " + err.Error() - json.NewEncoder(w).Encode(volumeResponse) - return - } else { + if len(token) != 0 { a.insertToken(r, token) } @@ -368,39 +305,15 @@ func (a *authMiddleware) enumerateWithAuth(w http.ResponseWriter, r *http.Reques } volumeID := volIDs[0] - vols, err := d.Inspect([]string{volumeID}) - if err != nil || len(vols) == 0 || vols[0] == nil { - a.log(volumeID, fn).WithError(err).Error("Failed to get volume object") - next(w, r) - return - } - - volumeResponse := &api.VolumeResponse{} - tokenSecretContext, err := a.parseSecret(vols[0].Spec.VolumeLabels, vols[0].Locator.VolumeLabels, false) + token, err := a.fetchSecretForVolume(d, volumeID) if err != nil { - a.log(volumeID, fn).WithError(err).Error("failed to parse secret") - volumeResponse.Error = "failed to parse secret: " + err.Error() + volumeResponse := &api.VolumeResponse{} + a.log(volumeID, fn).WithError(err).Error("Failed to fetch secret") + volumeResponse.Error = err.Error() json.NewEncoder(w).Encode(volumeResponse) return } - if tokenSecretContext.SecretName == "" { - errorMessage := fmt.Sprintf("Error, unable to get secret information from the volume."+ - " You may need to re-add the following keys as volume labels to point to the secret: %s and %s", - osecrets.SecretNameKey, osecrets.SecretNamespaceKey) - a.log(volumeID, fn).Error(errorMessage) - volumeResponse = &api.VolumeResponse{Error: errorMessage} - json.NewEncoder(w).Encode(volumeResponse) - w.WriteHeader(http.StatusInternalServerError) - return - } - - token, err := osecrets.GetToken(tokenSecretContext) - if err != nil { - a.log(volumeID, fn).WithError(err).Error("failed to get token") - volumeResponse.Error = "failed to get token: " + err.Error() - json.NewEncoder(w).Encode(volumeResponse) - return - } else { + if len(token) != 0 { a.insertToken(r, token) } @@ -414,7 +327,11 @@ func (a *authMiddleware) isTokenProcessingRequired(r *http.Request) (volume.Volu clientName := strings.Split(userAgent, "/") if len(clientName) > 0 { if strings.HasSuffix(clientName[0], schedDriverPostFix) { - d, err := volumedrivers.Get(clientName[0]) + driverName := clientName[0] + if len(OverrideSchedDriverName) != 0 { + driverName = OverrideSchedDriverName + } + d, err := volumedrivers.Get(driverName) if err != nil { return nil, false } @@ -450,44 +367,83 @@ func (a *authMiddleware) parseParam(r *http.Request, param string) (string, erro return "", fmt.Errorf("could not parse %s", param) } -func (a *authMiddleware) parseSecret( +// This functions makes it possible to secure the model of accessing the secret by allowing +// the definition of secret access to come from the storage class, as done by CSI. +func (a *authMiddleware) getSecretInformationInKubernetes( specLabels, locatorLabels map[string]string, - fetchCOLabels bool, ) (*api.TokenSecretContext, error) { - if lsecrets.Instance().String() == lsecrets.TypeK8s && fetchCOLabels { - // For k8s fetch the actual annotations - pvcName, ok := locatorLabels[PVCNameLabelKey] - if !ok { - // best effort to fetch the secret - return parseSecretFromLabels(specLabels, locatorLabels) - } - pvcNamespace, ok := locatorLabels[PVCNamespaceLabelKey] - if !ok { - // best effort to fetch the secret - return parseSecretFromLabels(specLabels, locatorLabels) - } + // Get pvc location and name + // For k8s fetch the actual annotations + pvcName, ok := getVolumeLabel(PVCNameLabelKey, specLabels, locatorLabels) + if !ok { + return nil, fmt.Errorf("Unable to authenticate request due to not able to determine name of the pvc from the volume") + } + pvcNamespace, ok := getVolumeLabel(PVCNamespaceLabelKey, specLabels, locatorLabels) + if !ok { + return nil, fmt.Errorf("Unable to authenticate request due to not able to determine namespace of the pvc from the volume") + } - pvc, err := core.Instance().GetPersistentVolumeClaim(pvcName, pvcNamespace) - if err != nil { - return nil, err - } - secretName := pvc.ObjectMeta.Annotations[osecrets.SecretNameKey] + // Get pvc object + pvc, err := core.Instance().GetPersistentVolumeClaim(pvcName, pvcNamespace) + if err != nil { + return nil, fmt.Errorf("Unable to get PVC information from Kubernetes: %v", err) + } - if len(secretName) == 0 { - return parseSecretFromLabels(specLabels, locatorLabels) - } - secretNamespace := pvc.ObjectMeta.Annotations[osecrets.SecretNamespaceKey] + // Get storageclass for pvc object + sc, err := core.Instance().GetStorageClassForPVC(pvc) + if err != nil { + return nil, fmt.Errorf("Unable to get StorageClass information from Kubernetes: %v", err) + } + + // Get secret namespace + secretNamespaceValue := sc.Parameters[osecrets.SecretNamespaceKey] + secretNameValue := sc.Parameters[osecrets.SecretNameKey] + if len(secretNameValue) == 0 && len(secretNamespaceValue) == 0 { + return &api.TokenSecretContext{}, nil + } + + // Allow ${pvc.namespace} to be set in the storage class + namespaceParams := map[string]string{"pvc.namespace": pvc.GetNamespace()} + secretNamespace, err := util.ResolveTemplate(secretNamespaceValue, namespaceParams) + if err != nil { + return nil, err + } + + // Get secret name + nameParams := make(map[string]string) + // Allow ${pvc.annotations['pvcNameKey']} to be set in the storage class + // See pkg/auth/secrets/secrets.go for more information + for k, v := range pvc.Annotations { + nameParams["pvc.annotations['"+k+"']"] = v + } + secretName, err := util.ResolveTemplate(secretNameValue, nameParams) + if err != nil { + return nil, err + } + + return &api.TokenSecretContext{ + SecretName: secretName, + SecretNamespace: secretNamespace, + PvcName: pvcName, + PvcNamespace: pvcNamespace, + }, nil +} - return &api.TokenSecretContext{ - SecretName: secretName, - SecretNamespace: secretNamespace, - }, nil +func (a *authMiddleware) parseSecret( + specLabels, locatorLabels map[string]string, +) (*api.TokenSecretContext, error) { + + // Check if it is Kubernetes + if lsecrets.Instance().String() == lsecrets.TypeK8s { + return a.getSecretInformationInKubernetes(specLabels, locatorLabels) } + + // Not Kubernetes, try to get secret information from labels return parseSecretFromLabels(specLabels, locatorLabels) } func parseSecretFromLabels(specLabels, locatorLabels map[string]string) (*api.TokenSecretContext, error) { - // Locator labels take precendence + // Locator labels take precedence secretName := locatorLabels[osecrets.SecretNameKey] secretNamespace := locatorLabels[osecrets.SecretNamespaceKey] if secretName == "" { @@ -524,3 +480,43 @@ func (a *authMiddleware) getBody(r *http.Request) io.ReadCloser { r.Body = rdr2 return rdr1 } + +func getVolumeLabel(key string, specLabels, locatorLabels map[string]string) (string, bool) { + if v, ok := locatorLabels[key]; ok { + return v, true + } + v, ok := specLabels[key] + return v, ok +} + +func (a *authMiddleware) fetchSecretForVolume(d volume.VolumeDriver, id string) (string, error) { + vols, err := d.Inspect([]string{id}) + if err != nil || len(vols) == 0 || vols[0] == nil { + return "", fmt.Errorf("Volume %s does not exist", id) + } + + v := vols[0] + if v.GetLocator().GetVolumeLabels() == nil { + return "", nil + } + + tokenSecretContext := &api.TokenSecretContext{ + SecretName: v.GetLocator().GetVolumeLabels()[secrets.SecretNameKey], + SecretNamespace: v.GetLocator().GetVolumeLabels()[secrets.SecretNamespaceKey], + } + + // If no secret is provided, then the caller is accessing publicly + if tokenSecretContext.SecretName == "" || tokenSecretContext.SecretNamespace == "" { + return "", nil + } + + // Retrieve secret + token, err := osecrets.GetToken(tokenSecretContext) + if err != nil { + return "", fmt.Errorf("Failed to get token from secret %s/%s: %v", + tokenSecretContext.SecretNamespace, + tokenSecretContext.SecretName, + err) + } + return token, nil +} diff --git a/api/server/mock/mock_schedops_k8s.go b/api/server/mock/mock_schedops_k8s.go new file mode 100644 index 000000000..3e0a4b11f --- /dev/null +++ b/api/server/mock/mock_schedops_k8s.go @@ -0,0 +1,1388 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/portworx/sched-ops/k8s/core (interfaces: Ops) + +// Package mock is a generated GoMock package. +package mock + +import ( + gomock "github.com/golang/mock/gomock" + core "github.com/portworx/sched-ops/k8s/core" + v1 "k8s.io/api/core/v1" + v10 "k8s.io/api/storage/v1" + v11 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + version "k8s.io/apimachinery/pkg/version" + rest "k8s.io/client-go/rest" + reflect "reflect" + time "time" +) + +// MockOps is a mock of Ops interface +type MockOps struct { + ctrl *gomock.Controller + recorder *MockOpsMockRecorder +} + +// MockOpsMockRecorder is the mock recorder for MockOps +type MockOpsMockRecorder struct { + mock *MockOps +} + +// NewMockOps creates a new mock instance +func NewMockOps(ctrl *gomock.Controller) *MockOps { + mock := &MockOps{ctrl: ctrl} + mock.recorder = &MockOpsMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockOps) EXPECT() *MockOpsMockRecorder { + return m.recorder +} + +// AddLabelOnNode mocks base method +func (m *MockOps) AddLabelOnNode(arg0, arg1, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddLabelOnNode", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddLabelOnNode indicates an expected call of AddLabelOnNode +func (mr *MockOpsMockRecorder) AddLabelOnNode(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddLabelOnNode", reflect.TypeOf((*MockOps)(nil).AddLabelOnNode), arg0, arg1, arg2) +} + +// CordonNode mocks base method +func (m *MockOps) CordonNode(arg0 string, arg1, arg2 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CordonNode", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// CordonNode indicates an expected call of CordonNode +func (mr *MockOpsMockRecorder) CordonNode(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CordonNode", reflect.TypeOf((*MockOps)(nil).CordonNode), arg0, arg1, arg2) +} + +// CreateConfigMap mocks base method +func (m *MockOps) CreateConfigMap(arg0 *v1.ConfigMap) (*v1.ConfigMap, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateConfigMap", arg0) + ret0, _ := ret[0].(*v1.ConfigMap) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateConfigMap indicates an expected call of CreateConfigMap +func (mr *MockOpsMockRecorder) CreateConfigMap(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateConfigMap", reflect.TypeOf((*MockOps)(nil).CreateConfigMap), arg0) +} + +// CreateEndpoints mocks base method +func (m *MockOps) CreateEndpoints(arg0 *v1.Endpoints) (*v1.Endpoints, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateEndpoints", arg0) + ret0, _ := ret[0].(*v1.Endpoints) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateEndpoints indicates an expected call of CreateEndpoints +func (mr *MockOpsMockRecorder) CreateEndpoints(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateEndpoints", reflect.TypeOf((*MockOps)(nil).CreateEndpoints), arg0) +} + +// CreateEvent mocks base method +func (m *MockOps) CreateEvent(arg0 *v1.Event) (*v1.Event, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateEvent", arg0) + ret0, _ := ret[0].(*v1.Event) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateEvent indicates an expected call of CreateEvent +func (mr *MockOpsMockRecorder) CreateEvent(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateEvent", reflect.TypeOf((*MockOps)(nil).CreateEvent), arg0) +} + +// CreateNamespace mocks base method +func (m *MockOps) CreateNamespace(arg0 string, arg1 map[string]string) (*v1.Namespace, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateNamespace", arg0, arg1) + ret0, _ := ret[0].(*v1.Namespace) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateNamespace indicates an expected call of CreateNamespace +func (mr *MockOpsMockRecorder) CreateNamespace(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNamespace", reflect.TypeOf((*MockOps)(nil).CreateNamespace), arg0, arg1) +} + +// CreateNode mocks base method +func (m *MockOps) CreateNode(arg0 *v1.Node) (*v1.Node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateNode", arg0) + ret0, _ := ret[0].(*v1.Node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateNode indicates an expected call of CreateNode +func (mr *MockOpsMockRecorder) CreateNode(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNode", reflect.TypeOf((*MockOps)(nil).CreateNode), arg0) +} + +// CreatePersistentVolume mocks base method +func (m *MockOps) CreatePersistentVolume(arg0 *v1.PersistentVolume) (*v1.PersistentVolume, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreatePersistentVolume", arg0) + ret0, _ := ret[0].(*v1.PersistentVolume) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreatePersistentVolume indicates an expected call of CreatePersistentVolume +func (mr *MockOpsMockRecorder) CreatePersistentVolume(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePersistentVolume", reflect.TypeOf((*MockOps)(nil).CreatePersistentVolume), arg0) +} + +// CreatePersistentVolumeClaim mocks base method +func (m *MockOps) CreatePersistentVolumeClaim(arg0 *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreatePersistentVolumeClaim", arg0) + ret0, _ := ret[0].(*v1.PersistentVolumeClaim) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreatePersistentVolumeClaim indicates an expected call of CreatePersistentVolumeClaim +func (mr *MockOpsMockRecorder) CreatePersistentVolumeClaim(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePersistentVolumeClaim", reflect.TypeOf((*MockOps)(nil).CreatePersistentVolumeClaim), arg0) +} + +// CreatePod mocks base method +func (m *MockOps) CreatePod(arg0 *v1.Pod) (*v1.Pod, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreatePod", arg0) + ret0, _ := ret[0].(*v1.Pod) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreatePod indicates an expected call of CreatePod +func (mr *MockOpsMockRecorder) CreatePod(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePod", reflect.TypeOf((*MockOps)(nil).CreatePod), arg0) +} + +// CreateSecret mocks base method +func (m *MockOps) CreateSecret(arg0 *v1.Secret) (*v1.Secret, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateSecret", arg0) + ret0, _ := ret[0].(*v1.Secret) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateSecret indicates an expected call of CreateSecret +func (mr *MockOpsMockRecorder) CreateSecret(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSecret", reflect.TypeOf((*MockOps)(nil).CreateSecret), arg0) +} + +// CreateService mocks base method +func (m *MockOps) CreateService(arg0 *v1.Service) (*v1.Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateService", arg0) + ret0, _ := ret[0].(*v1.Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateService indicates an expected call of CreateService +func (mr *MockOpsMockRecorder) CreateService(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateService", reflect.TypeOf((*MockOps)(nil).CreateService), arg0) +} + +// CreateServiceAccount mocks base method +func (m *MockOps) CreateServiceAccount(arg0 *v1.ServiceAccount) (*v1.ServiceAccount, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateServiceAccount", arg0) + ret0, _ := ret[0].(*v1.ServiceAccount) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateServiceAccount indicates an expected call of CreateServiceAccount +func (mr *MockOpsMockRecorder) CreateServiceAccount(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateServiceAccount", reflect.TypeOf((*MockOps)(nil).CreateServiceAccount), arg0) +} + +// DeleteConfigMap mocks base method +func (m *MockOps) DeleteConfigMap(arg0, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteConfigMap", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteConfigMap indicates an expected call of DeleteConfigMap +func (mr *MockOpsMockRecorder) DeleteConfigMap(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteConfigMap", reflect.TypeOf((*MockOps)(nil).DeleteConfigMap), arg0, arg1) +} + +// DeleteEndpoints mocks base method +func (m *MockOps) DeleteEndpoints(arg0, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteEndpoints", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteEndpoints indicates an expected call of DeleteEndpoints +func (mr *MockOpsMockRecorder) DeleteEndpoints(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEndpoints", reflect.TypeOf((*MockOps)(nil).DeleteEndpoints), arg0, arg1) +} + +// DeleteNamespace mocks base method +func (m *MockOps) DeleteNamespace(arg0 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteNamespace", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteNamespace indicates an expected call of DeleteNamespace +func (mr *MockOpsMockRecorder) DeleteNamespace(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNamespace", reflect.TypeOf((*MockOps)(nil).DeleteNamespace), arg0) +} + +// DeletePersistentVolume mocks base method +func (m *MockOps) DeletePersistentVolume(arg0 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePersistentVolume", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeletePersistentVolume indicates an expected call of DeletePersistentVolume +func (mr *MockOpsMockRecorder) DeletePersistentVolume(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePersistentVolume", reflect.TypeOf((*MockOps)(nil).DeletePersistentVolume), arg0) +} + +// DeletePersistentVolumeClaim mocks base method +func (m *MockOps) DeletePersistentVolumeClaim(arg0, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePersistentVolumeClaim", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeletePersistentVolumeClaim indicates an expected call of DeletePersistentVolumeClaim +func (mr *MockOpsMockRecorder) DeletePersistentVolumeClaim(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePersistentVolumeClaim", reflect.TypeOf((*MockOps)(nil).DeletePersistentVolumeClaim), arg0, arg1) +} + +// DeletePod mocks base method +func (m *MockOps) DeletePod(arg0, arg1 string, arg2 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePod", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeletePod indicates an expected call of DeletePod +func (mr *MockOpsMockRecorder) DeletePod(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePod", reflect.TypeOf((*MockOps)(nil).DeletePod), arg0, arg1, arg2) +} + +// DeletePods mocks base method +func (m *MockOps) DeletePods(arg0 []v1.Pod, arg1 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePods", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeletePods indicates an expected call of DeletePods +func (mr *MockOpsMockRecorder) DeletePods(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePods", reflect.TypeOf((*MockOps)(nil).DeletePods), arg0, arg1) +} + +// DeleteSecret mocks base method +func (m *MockOps) DeleteSecret(arg0, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteSecret", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteSecret indicates an expected call of DeleteSecret +func (mr *MockOpsMockRecorder) DeleteSecret(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSecret", reflect.TypeOf((*MockOps)(nil).DeleteSecret), arg0, arg1) +} + +// DeleteService mocks base method +func (m *MockOps) DeleteService(arg0, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteService", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteService indicates an expected call of DeleteService +func (mr *MockOpsMockRecorder) DeleteService(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteService", reflect.TypeOf((*MockOps)(nil).DeleteService), arg0, arg1) +} + +// DeleteServiceAccount mocks base method +func (m *MockOps) DeleteServiceAccount(arg0, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteServiceAccount", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteServiceAccount indicates an expected call of DeleteServiceAccount +func (mr *MockOpsMockRecorder) DeleteServiceAccount(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteServiceAccount", reflect.TypeOf((*MockOps)(nil).DeleteServiceAccount), arg0, arg1) +} + +// DescribeService mocks base method +func (m *MockOps) DescribeService(arg0, arg1 string) (*v1.ServiceStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeService", arg0, arg1) + ret0, _ := ret[0].(*v1.ServiceStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeService indicates an expected call of DescribeService +func (mr *MockOpsMockRecorder) DescribeService(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeService", reflect.TypeOf((*MockOps)(nil).DescribeService), arg0, arg1) +} + +// DrainPodsFromNode mocks base method +func (m *MockOps) DrainPodsFromNode(arg0 string, arg1 []v1.Pod, arg2, arg3 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DrainPodsFromNode", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// DrainPodsFromNode indicates an expected call of DrainPodsFromNode +func (mr *MockOpsMockRecorder) DrainPodsFromNode(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DrainPodsFromNode", reflect.TypeOf((*MockOps)(nil).DrainPodsFromNode), arg0, arg1, arg2, arg3) +} + +// FindMyNode mocks base method +func (m *MockOps) FindMyNode() (*v1.Node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindMyNode") + ret0, _ := ret[0].(*v1.Node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FindMyNode indicates an expected call of FindMyNode +func (mr *MockOpsMockRecorder) FindMyNode() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindMyNode", reflect.TypeOf((*MockOps)(nil).FindMyNode)) +} + +// GetConfigMap mocks base method +func (m *MockOps) GetConfigMap(arg0, arg1 string) (*v1.ConfigMap, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetConfigMap", arg0, arg1) + ret0, _ := ret[0].(*v1.ConfigMap) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetConfigMap indicates an expected call of GetConfigMap +func (mr *MockOpsMockRecorder) GetConfigMap(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfigMap", reflect.TypeOf((*MockOps)(nil).GetConfigMap), arg0, arg1) +} + +// GetEndpoints mocks base method +func (m *MockOps) GetEndpoints(arg0, arg1 string) (*v1.Endpoints, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEndpoints", arg0, arg1) + ret0, _ := ret[0].(*v1.Endpoints) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetEndpoints indicates an expected call of GetEndpoints +func (mr *MockOpsMockRecorder) GetEndpoints(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEndpoints", reflect.TypeOf((*MockOps)(nil).GetEndpoints), arg0, arg1) +} + +// GetLabelsOnNode mocks base method +func (m *MockOps) GetLabelsOnNode(arg0 string) (map[string]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLabelsOnNode", arg0) + ret0, _ := ret[0].(map[string]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetLabelsOnNode indicates an expected call of GetLabelsOnNode +func (mr *MockOpsMockRecorder) GetLabelsOnNode(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLabelsOnNode", reflect.TypeOf((*MockOps)(nil).GetLabelsOnNode), arg0) +} + +// GetNamespace mocks base method +func (m *MockOps) GetNamespace(arg0 string) (*v1.Namespace, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNamespace", arg0) + ret0, _ := ret[0].(*v1.Namespace) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNamespace indicates an expected call of GetNamespace +func (mr *MockOpsMockRecorder) GetNamespace(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNamespace", reflect.TypeOf((*MockOps)(nil).GetNamespace), arg0) +} + +// GetNodeByName mocks base method +func (m *MockOps) GetNodeByName(arg0 string) (*v1.Node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNodeByName", arg0) + ret0, _ := ret[0].(*v1.Node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNodeByName indicates an expected call of GetNodeByName +func (mr *MockOpsMockRecorder) GetNodeByName(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeByName", reflect.TypeOf((*MockOps)(nil).GetNodeByName), arg0) +} + +// GetNodes mocks base method +func (m *MockOps) GetNodes() (*v1.NodeList, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNodes") + ret0, _ := ret[0].(*v1.NodeList) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetNodes indicates an expected call of GetNodes +func (mr *MockOpsMockRecorder) GetNodes() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodes", reflect.TypeOf((*MockOps)(nil).GetNodes)) +} + +// GetPVCsUsingStorageClass mocks base method +func (m *MockOps) GetPVCsUsingStorageClass(arg0 string) ([]v1.PersistentVolumeClaim, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPVCsUsingStorageClass", arg0) + ret0, _ := ret[0].([]v1.PersistentVolumeClaim) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPVCsUsingStorageClass indicates an expected call of GetPVCsUsingStorageClass +func (mr *MockOpsMockRecorder) GetPVCsUsingStorageClass(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPVCsUsingStorageClass", reflect.TypeOf((*MockOps)(nil).GetPVCsUsingStorageClass), arg0) +} + +// GetPersistentVolume mocks base method +func (m *MockOps) GetPersistentVolume(arg0 string) (*v1.PersistentVolume, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPersistentVolume", arg0) + ret0, _ := ret[0].(*v1.PersistentVolume) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPersistentVolume indicates an expected call of GetPersistentVolume +func (mr *MockOpsMockRecorder) GetPersistentVolume(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPersistentVolume", reflect.TypeOf((*MockOps)(nil).GetPersistentVolume), arg0) +} + +// GetPersistentVolumeClaim mocks base method +func (m *MockOps) GetPersistentVolumeClaim(arg0, arg1 string) (*v1.PersistentVolumeClaim, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPersistentVolumeClaim", arg0, arg1) + ret0, _ := ret[0].(*v1.PersistentVolumeClaim) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPersistentVolumeClaim indicates an expected call of GetPersistentVolumeClaim +func (mr *MockOpsMockRecorder) GetPersistentVolumeClaim(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPersistentVolumeClaim", reflect.TypeOf((*MockOps)(nil).GetPersistentVolumeClaim), arg0, arg1) +} + +// GetPersistentVolumeClaimParams mocks base method +func (m *MockOps) GetPersistentVolumeClaimParams(arg0 *v1.PersistentVolumeClaim) (map[string]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPersistentVolumeClaimParams", arg0) + ret0, _ := ret[0].(map[string]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPersistentVolumeClaimParams indicates an expected call of GetPersistentVolumeClaimParams +func (mr *MockOpsMockRecorder) GetPersistentVolumeClaimParams(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPersistentVolumeClaimParams", reflect.TypeOf((*MockOps)(nil).GetPersistentVolumeClaimParams), arg0) +} + +// GetPersistentVolumeClaimStatus mocks base method +func (m *MockOps) GetPersistentVolumeClaimStatus(arg0 *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaimStatus, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPersistentVolumeClaimStatus", arg0) + ret0, _ := ret[0].(*v1.PersistentVolumeClaimStatus) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPersistentVolumeClaimStatus indicates an expected call of GetPersistentVolumeClaimStatus +func (mr *MockOpsMockRecorder) GetPersistentVolumeClaimStatus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPersistentVolumeClaimStatus", reflect.TypeOf((*MockOps)(nil).GetPersistentVolumeClaimStatus), arg0) +} + +// GetPersistentVolumeClaims mocks base method +func (m *MockOps) GetPersistentVolumeClaims(arg0 string, arg1 map[string]string) (*v1.PersistentVolumeClaimList, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPersistentVolumeClaims", arg0, arg1) + ret0, _ := ret[0].(*v1.PersistentVolumeClaimList) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPersistentVolumeClaims indicates an expected call of GetPersistentVolumeClaims +func (mr *MockOpsMockRecorder) GetPersistentVolumeClaims(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPersistentVolumeClaims", reflect.TypeOf((*MockOps)(nil).GetPersistentVolumeClaims), arg0, arg1) +} + +// GetPersistentVolumes mocks base method +func (m *MockOps) GetPersistentVolumes() (*v1.PersistentVolumeList, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPersistentVolumes") + ret0, _ := ret[0].(*v1.PersistentVolumeList) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPersistentVolumes indicates an expected call of GetPersistentVolumes +func (mr *MockOpsMockRecorder) GetPersistentVolumes() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPersistentVolumes", reflect.TypeOf((*MockOps)(nil).GetPersistentVolumes)) +} + +// GetPodByName mocks base method +func (m *MockOps) GetPodByName(arg0, arg1 string) (*v1.Pod, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPodByName", arg0, arg1) + ret0, _ := ret[0].(*v1.Pod) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPodByName indicates an expected call of GetPodByName +func (mr *MockOpsMockRecorder) GetPodByName(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodByName", reflect.TypeOf((*MockOps)(nil).GetPodByName), arg0, arg1) +} + +// GetPodByUID mocks base method +func (m *MockOps) GetPodByUID(arg0 types.UID, arg1 string) (*v1.Pod, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPodByUID", arg0, arg1) + ret0, _ := ret[0].(*v1.Pod) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPodByUID indicates an expected call of GetPodByUID +func (mr *MockOpsMockRecorder) GetPodByUID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodByUID", reflect.TypeOf((*MockOps)(nil).GetPodByUID), arg0, arg1) +} + +// GetPods mocks base method +func (m *MockOps) GetPods(arg0 string, arg1 map[string]string) (*v1.PodList, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPods", arg0, arg1) + ret0, _ := ret[0].(*v1.PodList) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPods indicates an expected call of GetPods +func (mr *MockOpsMockRecorder) GetPods(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockOps)(nil).GetPods), arg0, arg1) +} + +// GetPodsByNode mocks base method +func (m *MockOps) GetPodsByNode(arg0, arg1 string) (*v1.PodList, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPodsByNode", arg0, arg1) + ret0, _ := ret[0].(*v1.PodList) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPodsByNode indicates an expected call of GetPodsByNode +func (mr *MockOpsMockRecorder) GetPodsByNode(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodsByNode", reflect.TypeOf((*MockOps)(nil).GetPodsByNode), arg0, arg1) +} + +// GetPodsByNodeAndLabels mocks base method +func (m *MockOps) GetPodsByNodeAndLabels(arg0, arg1 string, arg2 map[string]string) (*v1.PodList, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPodsByNodeAndLabels", arg0, arg1, arg2) + ret0, _ := ret[0].(*v1.PodList) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPodsByNodeAndLabels indicates an expected call of GetPodsByNodeAndLabels +func (mr *MockOpsMockRecorder) GetPodsByNodeAndLabels(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodsByNodeAndLabels", reflect.TypeOf((*MockOps)(nil).GetPodsByNodeAndLabels), arg0, arg1, arg2) +} + +// GetPodsByOwner mocks base method +func (m *MockOps) GetPodsByOwner(arg0 types.UID, arg1 string) ([]v1.Pod, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPodsByOwner", arg0, arg1) + ret0, _ := ret[0].([]v1.Pod) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPodsByOwner indicates an expected call of GetPodsByOwner +func (mr *MockOpsMockRecorder) GetPodsByOwner(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodsByOwner", reflect.TypeOf((*MockOps)(nil).GetPodsByOwner), arg0, arg1) +} + +// GetPodsUsingPV mocks base method +func (m *MockOps) GetPodsUsingPV(arg0 string) ([]v1.Pod, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPodsUsingPV", arg0) + ret0, _ := ret[0].([]v1.Pod) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPodsUsingPV indicates an expected call of GetPodsUsingPV +func (mr *MockOpsMockRecorder) GetPodsUsingPV(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodsUsingPV", reflect.TypeOf((*MockOps)(nil).GetPodsUsingPV), arg0) +} + +// GetPodsUsingPVByNodeName mocks base method +func (m *MockOps) GetPodsUsingPVByNodeName(arg0, arg1 string) ([]v1.Pod, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPodsUsingPVByNodeName", arg0, arg1) + ret0, _ := ret[0].([]v1.Pod) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPodsUsingPVByNodeName indicates an expected call of GetPodsUsingPVByNodeName +func (mr *MockOpsMockRecorder) GetPodsUsingPVByNodeName(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodsUsingPVByNodeName", reflect.TypeOf((*MockOps)(nil).GetPodsUsingPVByNodeName), arg0, arg1) +} + +// GetPodsUsingPVC mocks base method +func (m *MockOps) GetPodsUsingPVC(arg0, arg1 string) ([]v1.Pod, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPodsUsingPVC", arg0, arg1) + ret0, _ := ret[0].([]v1.Pod) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPodsUsingPVC indicates an expected call of GetPodsUsingPVC +func (mr *MockOpsMockRecorder) GetPodsUsingPVC(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodsUsingPVC", reflect.TypeOf((*MockOps)(nil).GetPodsUsingPVC), arg0, arg1) +} + +// GetPodsUsingPVCByNodeName mocks base method +func (m *MockOps) GetPodsUsingPVCByNodeName(arg0, arg1, arg2 string) ([]v1.Pod, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPodsUsingPVCByNodeName", arg0, arg1, arg2) + ret0, _ := ret[0].([]v1.Pod) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPodsUsingPVCByNodeName indicates an expected call of GetPodsUsingPVCByNodeName +func (mr *MockOpsMockRecorder) GetPodsUsingPVCByNodeName(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodsUsingPVCByNodeName", reflect.TypeOf((*MockOps)(nil).GetPodsUsingPVCByNodeName), arg0, arg1, arg2) +} + +// GetPodsUsingVolumePlugin mocks base method +func (m *MockOps) GetPodsUsingVolumePlugin(arg0 string) ([]v1.Pod, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPodsUsingVolumePlugin", arg0) + ret0, _ := ret[0].([]v1.Pod) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPodsUsingVolumePlugin indicates an expected call of GetPodsUsingVolumePlugin +func (mr *MockOpsMockRecorder) GetPodsUsingVolumePlugin(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodsUsingVolumePlugin", reflect.TypeOf((*MockOps)(nil).GetPodsUsingVolumePlugin), arg0) +} + +// GetPodsUsingVolumePluginByNodeName mocks base method +func (m *MockOps) GetPodsUsingVolumePluginByNodeName(arg0, arg1 string) ([]v1.Pod, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPodsUsingVolumePluginByNodeName", arg0, arg1) + ret0, _ := ret[0].([]v1.Pod) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPodsUsingVolumePluginByNodeName indicates an expected call of GetPodsUsingVolumePluginByNodeName +func (mr *MockOpsMockRecorder) GetPodsUsingVolumePluginByNodeName(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodsUsingVolumePluginByNodeName", reflect.TypeOf((*MockOps)(nil).GetPodsUsingVolumePluginByNodeName), arg0, arg1) +} + +// GetSecret mocks base method +func (m *MockOps) GetSecret(arg0, arg1 string) (*v1.Secret, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSecret", arg0, arg1) + ret0, _ := ret[0].(*v1.Secret) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSecret indicates an expected call of GetSecret +func (mr *MockOpsMockRecorder) GetSecret(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecret", reflect.TypeOf((*MockOps)(nil).GetSecret), arg0, arg1) +} + +// GetService mocks base method +func (m *MockOps) GetService(arg0, arg1 string) (*v1.Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetService", arg0, arg1) + ret0, _ := ret[0].(*v1.Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetService indicates an expected call of GetService +func (mr *MockOpsMockRecorder) GetService(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetService", reflect.TypeOf((*MockOps)(nil).GetService), arg0, arg1) +} + +// GetServiceAccount mocks base method +func (m *MockOps) GetServiceAccount(arg0, arg1 string) (*v1.ServiceAccount, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetServiceAccount", arg0, arg1) + ret0, _ := ret[0].(*v1.ServiceAccount) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetServiceAccount indicates an expected call of GetServiceAccount +func (mr *MockOpsMockRecorder) GetServiceAccount(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceAccount", reflect.TypeOf((*MockOps)(nil).GetServiceAccount), arg0, arg1) +} + +// GetServiceEndpoint mocks base method +func (m *MockOps) GetServiceEndpoint(arg0, arg1 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetServiceEndpoint", arg0, arg1) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetServiceEndpoint indicates an expected call of GetServiceEndpoint +func (mr *MockOpsMockRecorder) GetServiceEndpoint(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetServiceEndpoint", reflect.TypeOf((*MockOps)(nil).GetServiceEndpoint), arg0, arg1) +} + +// GetStorageClassForPVC mocks base method +func (m *MockOps) GetStorageClassForPVC(arg0 *v1.PersistentVolumeClaim) (*v10.StorageClass, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStorageClassForPVC", arg0) + ret0, _ := ret[0].(*v10.StorageClass) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetStorageClassForPVC indicates an expected call of GetStorageClassForPVC +func (mr *MockOpsMockRecorder) GetStorageClassForPVC(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStorageClassForPVC", reflect.TypeOf((*MockOps)(nil).GetStorageClassForPVC), arg0) +} + +// GetStorageProvisionerForPVC mocks base method +func (m *MockOps) GetStorageProvisionerForPVC(arg0 *v1.PersistentVolumeClaim) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStorageProvisionerForPVC", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetStorageProvisionerForPVC indicates an expected call of GetStorageProvisionerForPVC +func (mr *MockOpsMockRecorder) GetStorageProvisionerForPVC(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStorageProvisionerForPVC", reflect.TypeOf((*MockOps)(nil).GetStorageProvisionerForPVC), arg0) +} + +// GetVersion mocks base method +func (m *MockOps) GetVersion() (*version.Info, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVersion") + ret0, _ := ret[0].(*version.Info) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetVersion indicates an expected call of GetVersion +func (mr *MockOpsMockRecorder) GetVersion() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVersion", reflect.TypeOf((*MockOps)(nil).GetVersion)) +} + +// GetVolumeForPersistentVolumeClaim mocks base method +func (m *MockOps) GetVolumeForPersistentVolumeClaim(arg0 *v1.PersistentVolumeClaim) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVolumeForPersistentVolumeClaim", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetVolumeForPersistentVolumeClaim indicates an expected call of GetVolumeForPersistentVolumeClaim +func (mr *MockOpsMockRecorder) GetVolumeForPersistentVolumeClaim(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVolumeForPersistentVolumeClaim", reflect.TypeOf((*MockOps)(nil).GetVolumeForPersistentVolumeClaim), arg0) +} + +// IsNodeMaster mocks base method +func (m *MockOps) IsNodeMaster(arg0 v1.Node) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsNodeMaster", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsNodeMaster indicates an expected call of IsNodeMaster +func (mr *MockOpsMockRecorder) IsNodeMaster(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsNodeMaster", reflect.TypeOf((*MockOps)(nil).IsNodeMaster), arg0) +} + +// IsNodeReady mocks base method +func (m *MockOps) IsNodeReady(arg0 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsNodeReady", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// IsNodeReady indicates an expected call of IsNodeReady +func (mr *MockOpsMockRecorder) IsNodeReady(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsNodeReady", reflect.TypeOf((*MockOps)(nil).IsNodeReady), arg0) +} + +// IsPodBeingManaged mocks base method +func (m *MockOps) IsPodBeingManaged(arg0 v1.Pod) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsPodBeingManaged", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsPodBeingManaged indicates an expected call of IsPodBeingManaged +func (mr *MockOpsMockRecorder) IsPodBeingManaged(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsPodBeingManaged", reflect.TypeOf((*MockOps)(nil).IsPodBeingManaged), arg0) +} + +// IsPodReady mocks base method +func (m *MockOps) IsPodReady(arg0 v1.Pod) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsPodReady", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsPodReady indicates an expected call of IsPodReady +func (mr *MockOpsMockRecorder) IsPodReady(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsPodReady", reflect.TypeOf((*MockOps)(nil).IsPodReady), arg0) +} + +// IsPodRunning mocks base method +func (m *MockOps) IsPodRunning(arg0 v1.Pod) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsPodRunning", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsPodRunning indicates an expected call of IsPodRunning +func (mr *MockOpsMockRecorder) IsPodRunning(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsPodRunning", reflect.TypeOf((*MockOps)(nil).IsPodRunning), arg0) +} + +// ListEvents mocks base method +func (m *MockOps) ListEvents(arg0 string, arg1 v11.ListOptions) (*v1.EventList, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListEvents", arg0, arg1) + ret0, _ := ret[0].(*v1.EventList) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListEvents indicates an expected call of ListEvents +func (mr *MockOpsMockRecorder) ListEvents(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListEvents", reflect.TypeOf((*MockOps)(nil).ListEvents), arg0, arg1) +} + +// ListNamespaces mocks base method +func (m *MockOps) ListNamespaces(arg0 map[string]string) (*v1.NamespaceList, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListNamespaces", arg0) + ret0, _ := ret[0].(*v1.NamespaceList) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListNamespaces indicates an expected call of ListNamespaces +func (mr *MockOpsMockRecorder) ListNamespaces(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNamespaces", reflect.TypeOf((*MockOps)(nil).ListNamespaces), arg0) +} + +// ListServices mocks base method +func (m *MockOps) ListServices(arg0 string, arg1 v11.ListOptions) (*v1.ServiceList, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListServices", arg0, arg1) + ret0, _ := ret[0].(*v1.ServiceList) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListServices indicates an expected call of ListServices +func (mr *MockOpsMockRecorder) ListServices(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListServices", reflect.TypeOf((*MockOps)(nil).ListServices), arg0, arg1) +} + +// PatchEndpoints mocks base method +func (m *MockOps) PatchEndpoints(arg0, arg1 string, arg2 types.PatchType, arg3 []byte) (*v1.Endpoints, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PatchEndpoints", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(*v1.Endpoints) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PatchEndpoints indicates an expected call of PatchEndpoints +func (mr *MockOpsMockRecorder) PatchEndpoints(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PatchEndpoints", reflect.TypeOf((*MockOps)(nil).PatchEndpoints), arg0, arg1, arg2, arg3) +} + +// PatchService mocks base method +func (m *MockOps) PatchService(arg0, arg1 string, arg2 []byte) (*v1.Service, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PatchService", arg0, arg1, arg2) + ret0, _ := ret[0].(*v1.Service) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PatchService indicates an expected call of PatchService +func (mr *MockOpsMockRecorder) PatchService(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PatchService", reflect.TypeOf((*MockOps)(nil).PatchService), arg0, arg1, arg2) +} + +// RecordEvent mocks base method +func (m *MockOps) RecordEvent(arg0 v1.EventSource, arg1 runtime.Object, arg2, arg3, arg4 string) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RecordEvent", arg0, arg1, arg2, arg3, arg4) +} + +// RecordEvent indicates an expected call of RecordEvent +func (mr *MockOpsMockRecorder) RecordEvent(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecordEvent", reflect.TypeOf((*MockOps)(nil).RecordEvent), arg0, arg1, arg2, arg3, arg4) +} + +// RemoveLabelOnNode mocks base method +func (m *MockOps) RemoveLabelOnNode(arg0, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveLabelOnNode", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveLabelOnNode indicates an expected call of RemoveLabelOnNode +func (mr *MockOpsMockRecorder) RemoveLabelOnNode(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveLabelOnNode", reflect.TypeOf((*MockOps)(nil).RemoveLabelOnNode), arg0, arg1) +} + +// ResourceExists mocks base method +func (m *MockOps) ResourceExists(arg0 schema.GroupVersionKind) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResourceExists", arg0) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ResourceExists indicates an expected call of ResourceExists +func (mr *MockOpsMockRecorder) ResourceExists(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResourceExists", reflect.TypeOf((*MockOps)(nil).ResourceExists), arg0) +} + +// RunCommandInPod mocks base method +func (m *MockOps) RunCommandInPod(arg0 []string, arg1, arg2, arg3 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RunCommandInPod", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RunCommandInPod indicates an expected call of RunCommandInPod +func (mr *MockOpsMockRecorder) RunCommandInPod(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunCommandInPod", reflect.TypeOf((*MockOps)(nil).RunCommandInPod), arg0, arg1, arg2, arg3) +} + +// SearchNodeByAddresses mocks base method +func (m *MockOps) SearchNodeByAddresses(arg0 []string) (*v1.Node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SearchNodeByAddresses", arg0) + ret0, _ := ret[0].(*v1.Node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SearchNodeByAddresses indicates an expected call of SearchNodeByAddresses +func (mr *MockOpsMockRecorder) SearchNodeByAddresses(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SearchNodeByAddresses", reflect.TypeOf((*MockOps)(nil).SearchNodeByAddresses), arg0) +} + +// SetConfig mocks base method +func (m *MockOps) SetConfig(arg0 *rest.Config) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetConfig", arg0) +} + +// SetConfig indicates an expected call of SetConfig +func (mr *MockOpsMockRecorder) SetConfig(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetConfig", reflect.TypeOf((*MockOps)(nil).SetConfig), arg0) +} + +// UnCordonNode mocks base method +func (m *MockOps) UnCordonNode(arg0 string, arg1, arg2 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnCordonNode", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// UnCordonNode indicates an expected call of UnCordonNode +func (mr *MockOpsMockRecorder) UnCordonNode(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnCordonNode", reflect.TypeOf((*MockOps)(nil).UnCordonNode), arg0, arg1, arg2) +} + +// UpdateConfigMap mocks base method +func (m *MockOps) UpdateConfigMap(arg0 *v1.ConfigMap) (*v1.ConfigMap, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateConfigMap", arg0) + ret0, _ := ret[0].(*v1.ConfigMap) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateConfigMap indicates an expected call of UpdateConfigMap +func (mr *MockOpsMockRecorder) UpdateConfigMap(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateConfigMap", reflect.TypeOf((*MockOps)(nil).UpdateConfigMap), arg0) +} + +// UpdateNode mocks base method +func (m *MockOps) UpdateNode(arg0 *v1.Node) (*v1.Node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateNode", arg0) + ret0, _ := ret[0].(*v1.Node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateNode indicates an expected call of UpdateNode +func (mr *MockOpsMockRecorder) UpdateNode(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNode", reflect.TypeOf((*MockOps)(nil).UpdateNode), arg0) +} + +// UpdatePersistentVolumeClaim mocks base method +func (m *MockOps) UpdatePersistentVolumeClaim(arg0 *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdatePersistentVolumeClaim", arg0) + ret0, _ := ret[0].(*v1.PersistentVolumeClaim) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdatePersistentVolumeClaim indicates an expected call of UpdatePersistentVolumeClaim +func (mr *MockOpsMockRecorder) UpdatePersistentVolumeClaim(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePersistentVolumeClaim", reflect.TypeOf((*MockOps)(nil).UpdatePersistentVolumeClaim), arg0) +} + +// UpdatePod mocks base method +func (m *MockOps) UpdatePod(arg0 *v1.Pod) (*v1.Pod, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdatePod", arg0) + ret0, _ := ret[0].(*v1.Pod) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdatePod indicates an expected call of UpdatePod +func (mr *MockOpsMockRecorder) UpdatePod(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePod", reflect.TypeOf((*MockOps)(nil).UpdatePod), arg0) +} + +// UpdateSecret mocks base method +func (m *MockOps) UpdateSecret(arg0 *v1.Secret) (*v1.Secret, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateSecret", arg0) + ret0, _ := ret[0].(*v1.Secret) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateSecret indicates an expected call of UpdateSecret +func (mr *MockOpsMockRecorder) UpdateSecret(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSecret", reflect.TypeOf((*MockOps)(nil).UpdateSecret), arg0) +} + +// UpdateSecretData mocks base method +func (m *MockOps) UpdateSecretData(arg0, arg1 string, arg2 map[string][]byte) (*v1.Secret, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateSecretData", arg0, arg1, arg2) + ret0, _ := ret[0].(*v1.Secret) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateSecretData indicates an expected call of UpdateSecretData +func (mr *MockOpsMockRecorder) UpdateSecretData(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateSecretData", reflect.TypeOf((*MockOps)(nil).UpdateSecretData), arg0, arg1, arg2) +} + +// UpdateServiceAccount mocks base method +func (m *MockOps) UpdateServiceAccount(arg0 *v1.ServiceAccount) (*v1.ServiceAccount, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateServiceAccount", arg0) + ret0, _ := ret[0].(*v1.ServiceAccount) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateServiceAccount indicates an expected call of UpdateServiceAccount +func (mr *MockOpsMockRecorder) UpdateServiceAccount(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateServiceAccount", reflect.TypeOf((*MockOps)(nil).UpdateServiceAccount), arg0) +} + +// ValidateDeletedService mocks base method +func (m *MockOps) ValidateDeletedService(arg0, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateDeletedService", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ValidateDeletedService indicates an expected call of ValidateDeletedService +func (mr *MockOpsMockRecorder) ValidateDeletedService(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDeletedService", reflect.TypeOf((*MockOps)(nil).ValidateDeletedService), arg0, arg1) +} + +// ValidatePersistentVolumeClaim mocks base method +func (m *MockOps) ValidatePersistentVolumeClaim(arg0 *v1.PersistentVolumeClaim, arg1, arg2 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidatePersistentVolumeClaim", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ValidatePersistentVolumeClaim indicates an expected call of ValidatePersistentVolumeClaim +func (mr *MockOpsMockRecorder) ValidatePersistentVolumeClaim(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidatePersistentVolumeClaim", reflect.TypeOf((*MockOps)(nil).ValidatePersistentVolumeClaim), arg0, arg1, arg2) +} + +// ValidatePersistentVolumeClaimSize mocks base method +func (m *MockOps) ValidatePersistentVolumeClaimSize(arg0 *v1.PersistentVolumeClaim, arg1 int64, arg2, arg3 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidatePersistentVolumeClaimSize", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// ValidatePersistentVolumeClaimSize indicates an expected call of ValidatePersistentVolumeClaimSize +func (mr *MockOpsMockRecorder) ValidatePersistentVolumeClaimSize(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidatePersistentVolumeClaimSize", reflect.TypeOf((*MockOps)(nil).ValidatePersistentVolumeClaimSize), arg0, arg1, arg2, arg3) +} + +// ValidatePod mocks base method +func (m *MockOps) ValidatePod(arg0 *v1.Pod, arg1, arg2 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidatePod", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ValidatePod indicates an expected call of ValidatePod +func (mr *MockOpsMockRecorder) ValidatePod(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidatePod", reflect.TypeOf((*MockOps)(nil).ValidatePod), arg0, arg1, arg2) +} + +// WaitForPodDeletion mocks base method +func (m *MockOps) WaitForPodDeletion(arg0 types.UID, arg1 string, arg2 time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WaitForPodDeletion", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// WaitForPodDeletion indicates an expected call of WaitForPodDeletion +func (mr *MockOpsMockRecorder) WaitForPodDeletion(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitForPodDeletion", reflect.TypeOf((*MockOps)(nil).WaitForPodDeletion), arg0, arg1, arg2) +} + +// WatchConfigMap mocks base method +func (m *MockOps) WatchConfigMap(arg0 *v1.ConfigMap, arg1 core.WatchFunc) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WatchConfigMap", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WatchConfigMap indicates an expected call of WatchConfigMap +func (mr *MockOpsMockRecorder) WatchConfigMap(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchConfigMap", reflect.TypeOf((*MockOps)(nil).WatchConfigMap), arg0, arg1) +} + +// WatchNode mocks base method +func (m *MockOps) WatchNode(arg0 *v1.Node, arg1 core.WatchFunc) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WatchNode", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WatchNode indicates an expected call of WatchNode +func (mr *MockOpsMockRecorder) WatchNode(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchNode", reflect.TypeOf((*MockOps)(nil).WatchNode), arg0, arg1) +} + +// WatchPods mocks base method +func (m *MockOps) WatchPods(arg0 string, arg1 core.WatchFunc, arg2 v11.ListOptions) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WatchPods", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// WatchPods indicates an expected call of WatchPods +func (mr *MockOpsMockRecorder) WatchPods(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchPods", reflect.TypeOf((*MockOps)(nil).WatchPods), arg0, arg1, arg2) +} + +// WatchSecret mocks base method +func (m *MockOps) WatchSecret(arg0 *v1.Secret, arg1 core.WatchFunc) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WatchSecret", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// WatchSecret indicates an expected call of WatchSecret +func (mr *MockOpsMockRecorder) WatchSecret(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WatchSecret", reflect.TypeOf((*MockOps)(nil).WatchSecret), arg0, arg1) +} diff --git a/api/server/sdk/volume_ops.go b/api/server/sdk/volume_ops.go index 097d6588e..3c8be3a64 100644 --- a/api/server/sdk/volume_ops.go +++ b/api/server/sdk/volume_ops.go @@ -25,6 +25,7 @@ import ( "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/pkg/auth" + "github.com/libopenstorage/openstorage/pkg/auth/secrets" policy "github.com/libopenstorage/openstorage/pkg/storagepolicy" "github.com/libopenstorage/openstorage/pkg/util" "github.com/libopenstorage/openstorage/volume" @@ -33,6 +34,17 @@ import ( "google.golang.org/grpc/status" ) +var ( + // AdminOwnedLabelKeys is a set of labels that only the storage admin + // can change. + AdminOwnedLabelKeys = []string{ + secrets.SecretNameKey, + secrets.SecretNamespaceKey, + api.KubernetesPvcNameKey, + api.KubernetesPvcNamespaceKey, + } +) + // When create is called for an existing volume, this function is called to make sure // the SDK only returns that the volume is ready when the status is UP func (s *VolumeServer) waitForVolumeReady(ctx context.Context, id string) (*api.Volume, error) { @@ -563,6 +575,15 @@ func (s *VolumeServer) Update( return nil, err } + // Only the administrator can change admin-only labels + if !api.IsAdminByContext(ctx) && req.GetLabels() != nil { + for _, adminKey := range AdminOwnedLabelKeys { + if _, ok := req.GetLabels()[adminKey]; ok { + return nil, status.Errorf(codes.PermissionDenied, "Only the administrator can update label %s", adminKey) + } + } + } + // Check if the caller can update the volume if !resp.GetVolume().IsPermitted(ctx, api.Ownership_Write) { return nil, status.Errorf(codes.PermissionDenied, "Cannot update volume") diff --git a/api/server/sdk/volume_ops_test.go b/api/server/sdk/volume_ops_test.go index adcaad157..9a5f4d1dd 100644 --- a/api/server/sdk/volume_ops_test.go +++ b/api/server/sdk/volume_ops_test.go @@ -621,6 +621,80 @@ func TestSdkVolumeEnumerateWithFilters(t *testing.T) { assert.Equal(t, r.GetVolumeIds()[0], id) } +func TestSdkVolumeUpdateAdminLabels(t *testing.T) { + // This test does not use the gRPC server + mc := gomock.NewController(&utils.SafeGoroutineTester{}) + mv := mockdriver.NewMockVolumeDriver(mc) + mcluster := mockcluster.NewMockCluster(mc) + + // Setup server + s := VolumeServer{ + server: &sdkGrpcServer{ + // This will enable isAuthEnabled to return true + config: ServerConfig{ + Security: &SecurityConfig{ + Authenticators: map[string]auth.Authenticator{ + "hello": nil, + "another": nil, + }, + }, + }, + driverHandlers: map[string]volume.VolumeDriver{ + "mock": mv, + DefaultDriverName: mv, + }, + clusterHandler: mcluster, + }, + } + + id := "myid" + newlabels := map[string]string{ + api.KubernetesPvcNameKey: "hello", + } + req := &api.SdkVolumeUpdateRequest{ + VolumeId: id, + Labels: newlabels, + } + + // Check Locator + mv. + EXPECT(). + Enumerate(&api.VolumeLocator{ + VolumeIds: []string{id}, + }, nil). + Return([]*api.Volume{&api.Volume{Spec: &api.VolumeSpec{}}}, nil). + AnyTimes() + mv. + EXPECT(). + Set(gomock.Any(), gomock.Any(), gomock.Any()). + Return(nil). + AnyTimes() + + // m + ctxNoAuth := context.Background() + ctxNotAdmin := auth.ContextSaveUserInfo(context.Background(), &auth.UserInfo{ + Username: "notmyname", + }) + ctxAdmin := auth.ContextSaveUserInfo(context.Background(), &auth.UserInfo{ + Username: "admin", + Claims: auth.Claims{ + Groups: []string{"*"}, + }, + }) + + // No auth enabled + _, err := s.Update(ctxNoAuth, req) + assert.NoError(t, err) + + // Ctx has auth but not admin + _, err = s.Update(ctxNotAdmin, req) + assert.Error(t, err) + + // Ctx has auth but not admin + _, err = s.Update(ctxAdmin, req) + assert.NoError(t, err) +} + func TestSdkVolumeUpdate(t *testing.T) { // Create server and client connection diff --git a/api/server/testutils_test.go b/api/server/testutils_test.go index b3126c53b..a2419a1fa 100644 --- a/api/server/testutils_test.go +++ b/api/server/testutils_test.go @@ -17,6 +17,7 @@ import ( "github.com/kubernetes-csi/csi-test/utils" "github.com/libopenstorage/openstorage/api" mockapi "github.com/libopenstorage/openstorage/api/mock" + servermock "github.com/libopenstorage/openstorage/api/server/mock" "github.com/libopenstorage/openstorage/api/server/sdk" "github.com/libopenstorage/openstorage/cluster" clustermanager "github.com/libopenstorage/openstorage/cluster/manager" @@ -42,6 +43,8 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/metadata" + + schedopsk8s "github.com/portworx/sched-ops/k8s/core" ) const ( @@ -63,14 +66,16 @@ var ( // testServer is a simple struct used abstract // the creation and setup of the gRPC CSI service and REST server type testServer struct { - conn *grpc.ClientConn - m *mockdriver.MockVolumeDriver - c cluster.Cluster - s *mockapi.MockOpenStoragePoolServer - mc *gomock.Controller - sdk *sdk.Server - port string - gwport string + conn *grpc.ClientConn + m *mockdriver.MockVolumeDriver + c cluster.Cluster + s *mockapi.MockOpenStoragePoolServer + k8sops *servermock.MockOps + originalOps schedopsk8s.Ops + mc *gomock.Controller + sdk *sdk.Server + port string + gwport string } // Struct used for creation and setup of cluster api testing @@ -138,6 +143,10 @@ func newTestServerSdkNoAuth(t *testing.T) *testServer { tester.m = mockdriver.NewMockVolumeDriver(tester.mc) tester.c = mockcluster.NewMockCluster(tester.mc) tester.s = mockapi.NewMockOpenStoragePoolServer(tester.mc) + tester.k8sops = servermock.NewMockOps(tester.mc) + + tester.originalOps = schedopsk8s.Instance() + schedopsk8s.SetInstance(tester.k8sops) kv, err := kvdb.New(mem.Name, "test", []string{}, nil, kvdb.LogFatalErrorCB) assert.NoError(t, err) @@ -194,6 +203,10 @@ func newTestServerSdk(t *testing.T) *testServer { tester.m = mockdriver.NewMockVolumeDriver(tester.mc) tester.c = mockcluster.NewMockCluster(tester.mc) tester.s = mockapi.NewMockOpenStoragePoolServer(tester.mc) + tester.k8sops = servermock.NewMockOps(tester.mc) + + tester.originalOps = schedopsk8s.Instance() + schedopsk8s.SetInstance(tester.k8sops) // Create a role manager kv, err := kvdb.New(mem.Name, "test", []string{}, nil, kvdb.LogFatalErrorCB) @@ -308,6 +321,10 @@ func (s *testServer) MockDriver() *mockdriver.MockVolumeDriver { return s.m } +func (s *testServer) MockK8sOps() *servermock.MockOps { + return s.k8sops +} + func (s *testServer) Conn() *grpc.ClientConn { return s.conn } @@ -416,6 +433,8 @@ func (s *testServer) Stop() { // Remove from registry volumedrivers.Remove(mockDriverName) + + schedopsk8s.SetInstance(s.originalOps) } func createToken(name, role, secret string) (string, error) { diff --git a/api/server/volume.go b/api/server/volume.go index f1a2968aa..1d2a802e9 100644 --- a/api/server/volume.go +++ b/api/server/volume.go @@ -22,6 +22,7 @@ import ( "github.com/libopenstorage/openstorage/api/server/sdk" clustermanager "github.com/libopenstorage/openstorage/cluster/manager" "github.com/libopenstorage/openstorage/pkg/auth" + "github.com/libopenstorage/openstorage/pkg/auth/secrets" "github.com/libopenstorage/openstorage/pkg/grpcserver" "github.com/libopenstorage/openstorage/pkg/options" "github.com/libopenstorage/openstorage/volume" @@ -194,6 +195,12 @@ func (vd *volAPI) create(w http.ResponseWriter, r *http.Request) { if err := json.NewDecoder(r.Body).Decode(&dcReq); err != nil { fmt.Println("returning error here") vd.sendError(vd.name, method, w, err.Error(), http.StatusBadRequest) + } + if dcReq.GetSpec() == nil { + vd.sendError(vd.name, method, w, "Must supply a volume specification", http.StatusBadRequest) + return + } else if dcReq.GetLocator() == nil { + vd.sendError(vd.name, method, w, "Must supply a volume locator", http.StatusBadRequest) return } @@ -204,6 +211,26 @@ func (vd *volAPI) create(w http.ResponseWriter, r *http.Request) { return } + // Check headers for secret reference. These are set by the Kubernetes auth middleware + secretName := r.Header.Get(secrets.SecretNameKey) + secretNamespace := r.Header.Get(secrets.SecretNamespaceKey) + pvcName := r.Header.Get(api.KubernetesPvcNameKey) + pvcNamespace := r.Header.Get(api.KubernetesPvcNamespaceKey) + if len(secretName) != 0 && len(secretNamespace) != 0 { + if dcReq.GetLocator().GetVolumeLabels() == nil { + dcReq.GetLocator().VolumeLabels = make(map[string]string) + } + dcReq.GetLocator().GetVolumeLabels()[secrets.SecretNameKey] = secretName + dcReq.GetLocator().GetVolumeLabels()[secrets.SecretNamespaceKey] = secretNamespace + + // Only add the pvc name and namespace if we had the secrets and if the + // pvc values where passed + if len(pvcName) != 0 && len(pvcNamespace) != 0 { + dcReq.GetLocator().GetVolumeLabels()[api.KubernetesPvcNameKey] = pvcName + dcReq.GetLocator().GetVolumeLabels()[api.KubernetesPvcNamespaceKey] = pvcNamespace + } + } + // Get gRPC connection conn, err := vd.getConn() if err != nil { @@ -452,7 +479,6 @@ func (vd *volAPI) volumeSet(w http.ResponseWriter, r *http.Request) { } } json.NewEncoder(w).Encode(resp) - } func getVolumeUpdateSpec(spec *api.VolumeSpec, vol *api.Volume) *api.VolumeSpecUpdate { @@ -1822,14 +1848,14 @@ func (vd *volAPI) SetupRoutesWithAuth( nInspect := negroni.New() nInspect.Use(negroni.HandlerFunc(authM.inspectWithAuth)) inspectRoute := vd.volumeInspectRoute() - nSet.UseHandlerFunc(inspectRoute.fn) + nInspect.UseHandlerFunc(inspectRoute.fn) router.Methods(inspectRoute.verb).Path(inspectRoute.path).Handler(nInspect) // Setup middleware for enumerate nEnumerate := negroni.New() nEnumerate.Use(negroni.HandlerFunc(authM.enumerateWithAuth)) enumerateRoute := vd.volumeEnumerateRoute() - nSet.UseHandlerFunc(enumerateRoute.fn) + nEnumerate.UseHandlerFunc(enumerateRoute.fn) router.Methods(enumerateRoute.verb).Path(enumerateRoute.path).Handler(nEnumerate) routes := []*Route{vd.versionRoute()} diff --git a/api/server/volume_test.go b/api/server/volume_test.go index 50d7b3f03..405ca4549 100644 --- a/api/server/volume_test.go +++ b/api/server/volume_test.go @@ -17,8 +17,18 @@ import ( "github.com/stretchr/testify/assert" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + "github.com/sirupsen/logrus" + + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +func init() { + logrus.SetLevel(logrus.PanicLevel) +} + func TestVolumeNoAuth(t *testing.T) { var err error @@ -189,7 +199,8 @@ func TestMiddlewareVolumeCreateFailure(t *testing.T) { size := uint64(1234) secretName := "secret-name" namespace := "ns" - tokenKey := "token-key" + pvcName := "mypvc" + storageClassName := "storageclass1" req := &api.VolumeCreateRequest{ Locator: &api.VolumeLocator{ @@ -218,13 +229,32 @@ func TestMiddlewareVolumeCreateFailure(t *testing.T) { _, err = driverclient.Create(req.GetLocator(), req.GetSource(), req.GetSpec()) assert.Error(t, err, "Expected an error on Create") + pvc := corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName, + Namespace: namespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClassName, + }, + } + + storageClass := storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: storageClassName, + }, + Parameters: map[string]string{ + secrets.SecretNameKey: secretName, + secrets.SecretNamespaceKey: "${pvc.namespace}", + }, + } + req = &api.VolumeCreateRequest{ Locator: &api.VolumeLocator{ Name: name, VolumeLabels: map[string]string{ - secrets.SecretNameKey: secretName, - secrets.SecretTokenKey: tokenKey, - secrets.SecretNamespaceKey: namespace, + PVCNameLabelKey: pvcName, + PVCNamespaceLabelKey: namespace, }, }, Source: &api.Source{}, @@ -236,6 +266,11 @@ func TestMiddlewareVolumeCreateFailure(t *testing.T) { }, } + testVolDriver.MockK8sOps().EXPECT(). + GetPersistentVolumeClaim(pvcName, namespace).Return(&pvc, nil).AnyTimes() + testVolDriver.MockK8sOps().EXPECT(). + GetStorageClassForPVC(&pvc).Return(&storageClass, nil).AnyTimes() + // Send a request and fail to get a token mockSecret.EXPECT(). GetSecret( @@ -873,38 +908,6 @@ func TestMiddlewareVolumeSetSizeSuccess(t *testing.T) { assert.NoError(t, err) } -func TestMiddlewareVolumeSetFailure(t *testing.T) { - testVolDriver := newTestServerSdk(t) - defer testVolDriver.Stop() - - _, mockSecret, mc := getSecretsMock(t) - defer mc.Finish() - lsecrets.SetInstance(mockSecret) - - // TODO(stgleb): Fix it - unixServer, portServer, err := StartVolumeMgmtAPI(fakeWithSched, testSdkSock, testMgmtBase, testMgmtPort, true, nil) - assert.NoError(t, err, "Unexpected error on StartVolumeMgmtAPI") - defer unixServer.Close() - defer portServer.Close() - - time.Sleep(1 * time.Second) - c, err := volumeclient.NewDriverClient(testMockURL, fakeWithSched, version, fakeWithSched) - assert.NoError(t, err, "Unexpected error on NewDriverClient") - - driverclient := volumeclient.VolumeDriver(c) - id, _, _, _ := testMiddlewareCreateVolume(t, driverclient, mockSecret, testVolDriver) - - req := &api.VolumeSetRequest{ - Spec: &api.VolumeSpec{Shared: true}, - } - - // Not setting mock secrets - - err = driverclient.Set(id, &api.VolumeLocator{Name: "myvol"}, req.GetSpec()) - assert.Error(t, err, "Unexpected error on Set") - -} - func TestVolumeAttachSuccess(t *testing.T) { var err error @@ -2438,18 +2441,38 @@ func TestMiddlewareVolumeDeleteFailureIncorrectToken(t *testing.T) { size := uint64(1234) secretName := "secret-name" namespace := "ns" - tokenKey := "token-key" + pvcName := "mypvc" + storageClassName := "storageclass1" // get token token, err := createToken("test", "system.admin", testSharedSecret) assert.NoError(t, err) + pvc := corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName, + Namespace: namespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClassName, + }, + } + + storageClass := storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: storageClassName, + }, + Parameters: map[string]string{ + secrets.SecretNameKey: secretName, + secrets.SecretNamespaceKey: "${pvc.namespace}", + }, + } + req := &api.VolumeCreateRequest{ Locator: &api.VolumeLocator{ Name: name, VolumeLabels: map[string]string{ - secrets.SecretNameKey: secretName, - secrets.SecretTokenKey: tokenKey, - secrets.SecretNamespaceKey: namespace, + PVCNameLabelKey: pvcName, + PVCNamespaceLabelKey: namespace, }, }, Source: &api.Source{}, @@ -2461,6 +2484,11 @@ func TestMiddlewareVolumeDeleteFailureIncorrectToken(t *testing.T) { }, } + testVolDriver.MockK8sOps().EXPECT(). + GetPersistentVolumeClaim(pvcName, namespace).Return(&pvc, nil) + testVolDriver.MockK8sOps().EXPECT(). + GetStorageClassForPVC(&pvc).Return(&storageClass, nil) + mockSecret.EXPECT(). String(). Return(lsecrets.TypeK8s). @@ -2525,18 +2553,38 @@ func testMiddlewareCreateVolume( size := uint64(1234) secretName := "secret-name" namespace := "ns" - tokenKey := "token-key" + pvcName := "mypvc" + storageClassName := "storageclass1" // get token token, err := createToken("test", "system.admin", testSharedSecret) assert.NoError(t, err) + pvc := corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvcName, + Namespace: namespace, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + StorageClassName: &storageClassName, + }, + } + + storageClass := storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: storageClassName, + }, + Parameters: map[string]string{ + secrets.SecretNameKey: secretName, + secrets.SecretNamespaceKey: "${pvc.namespace}", + }, + } + req := &api.VolumeCreateRequest{ Locator: &api.VolumeLocator{ Name: name, VolumeLabels: map[string]string{ - secrets.SecretNameKey: secretName, - secrets.SecretTokenKey: tokenKey, - secrets.SecretNamespaceKey: namespace, + PVCNameLabelKey: pvcName, + PVCNamespaceLabelKey: namespace, }, }, Source: &api.Source{}, @@ -2547,6 +2595,11 @@ func testMiddlewareCreateVolume( Shared: true, }, } + testVolDriver.MockK8sOps().EXPECT(). + GetPersistentVolumeClaim(pvcName, namespace).Return(&pvc, nil) + testVolDriver.MockK8sOps().EXPECT(). + GetStorageClassForPVC(&pvc).Return(&storageClass, nil) + mockSecret.EXPECT(). String(). Return(lsecrets.TypeK8s). diff --git a/cmd/osd/main.go b/cmd/osd/main.go index 853d42fb2..b111381c0 100644 --- a/cmd/osd/main.go +++ b/cmd/osd/main.go @@ -36,7 +36,6 @@ import ( "github.com/codegangsta/cli" "github.com/docker/docker/pkg/reexec" "github.com/libopenstorage/openstorage/api" - "github.com/libopenstorage/openstorage/api/flexvolume" "github.com/libopenstorage/openstorage/api/server" "github.com/libopenstorage/openstorage/api/server/sdk" osdcli "github.com/libopenstorage/openstorage/cli" @@ -53,6 +52,7 @@ import ( "github.com/libopenstorage/openstorage/schedpolicy" "github.com/libopenstorage/openstorage/volume" volumedrivers "github.com/libopenstorage/openstorage/volume/drivers" + "github.com/libopenstorage/secrets" "github.com/portworx/kvdb" "github.com/portworx/kvdb/consul" etcd "github.com/portworx/kvdb/etcd/v2" @@ -183,6 +183,11 @@ func main() { Usage: "CSI Driver name", Value: "", }, + cli.StringFlag{ + Name: "secrets-type", + Usage: "Secrets manager type. For example \"k8s\"", + Value: "", + }, } app.Action = wrapAction(start) app.Commands = []cli.Command{ @@ -334,6 +339,15 @@ func start(c *cli.Context) error { return fmt.Errorf("Failed to initialize KVDB: %v", err) } + // Setup secrets type if any + if secretsType := c.String("secrets-type"); len(secretsType) > 0 { + i, err := secrets.New(secretsType, nil) + if err != nil { + return fmt.Errorf("Failed to set secrets type: %v", err) + } + secrets.SetInstance(i) + } + // Get authenticators authenticators := make(map[string]auth.Authenticator) selfSigned, err := selfSignedAuth(c) @@ -393,6 +407,9 @@ func start(c *cli.Context) error { isDefaultSet := false // Start the volume drivers. for d, v := range cfg.Osd.Drivers { + // Override sched driver with the current one + server.OverrideSchedDriverName = d + logrus.Infof("Starting volume driver: %v", d) if err := volumedrivers.Register(d, v); err != nil { return fmt.Errorf("Unable to start volume driver: %v, %v", d, err) @@ -432,11 +449,15 @@ func start(c *cli.Context) error { return fmt.Errorf("Unable to start plugin api server: %v", err) } + authEnabled := len(authenticators) > 0 + if authEnabled { + logrus.Info("Management API (deprecated) starting with authentication enabled") + } if _, _, err := server.StartVolumeMgmtAPI( d, sdksocket, volume.DriverAPIBase, uint16(mgmtPort), - false, + authEnabled, authenticators, ); err != nil { return fmt.Errorf("Unable to start volume mgmt api server: %v", err) @@ -512,10 +533,6 @@ func start(c *cli.Context) error { return fmt.Errorf("Invalid OSD config file: Default Driver specified but driver not initialized") } - if err := flexvolume.StartFlexVolumeAPI(config.FlexVolumePort, cfg.Osd.ClusterConfig.DefaultDriver); err != nil { - return fmt.Errorf("Unable to start flexvolume API: %v", err) - } - // Start the graph drivers. for d := range cfg.Osd.GraphDrivers { logrus.Infof("Starting graph driver: %v", d) diff --git a/pkg/auth/secrets/secrets.go b/pkg/auth/secrets/secrets.go index 45a040292..db3ddf8fd 100644 --- a/pkg/auth/secrets/secrets.go +++ b/pkg/auth/secrets/secrets.go @@ -12,11 +12,17 @@ const ( // SecretNameKey is a label on the openstorage.Volume object // which corresponds to the name of the secret which holds the // token information. Used for all secret providers + // This key supports the CSI compatible value of ${pvc.annotations['team.example.com/key']} + // as described in https://kubernetes-csi.github.io/docs/secrets-and-credentials-storage-class.html + // to specify to use the annotations on the pvc. SecretNameKey = "openstorage.io/auth-secret-name" // SecretNamespaceKey is a label on the openstorage.Volume object // which corresponds to the namespace of the secret which holds the // token information. Used for all secret providers + // This key supports the CSI compatible value of ${pvc.namespace} + // as described in https://kubernetes-csi.github.io/docs/secrets-and-credentials-storage-class.html + // to specify to use the namespace of the pvc SecretNamespaceKey = "openstorage.io/auth-secret-namespace" // SecretTokenKey corresponds to the key at which the auth token is stored diff --git a/pkg/util/template.go b/pkg/util/template.go new file mode 100644 index 000000000..0eedd9de2 --- /dev/null +++ b/pkg/util/template.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "os" + + "k8s.io/apimachinery/pkg/util/sets" +) + +func ResolveTemplate(template string, params map[string]string) (string, error) { + missingParams := sets.NewString() + resolved := os.Expand(template, func(k string) string { + v, ok := params[k] + if !ok { + missingParams.Insert(k) + } + return v + }) + if missingParams.Len() > 0 { + return "", fmt.Errorf("invalid tokens: %q", missingParams.List()) + } + return resolved, nil +} diff --git a/test/README.md b/test/README.md new file mode 100644 index 000000000..c4e45b68c --- /dev/null +++ b/test/README.md @@ -0,0 +1,8 @@ + + +# To update modules + +``` +npm install --save-dev bats bats-assert bats-support +``` + diff --git a/test/lib/osd.bash b/test/lib/osd.bash new file mode 100644 index 000000000..42c03a2c3 --- /dev/null +++ b/test/lib/osd.bash @@ -0,0 +1,115 @@ + +TMPDIR="${BATS_TMPDIR:-/tmp}" +KIND_CLUSTER="${KIND_CLUSTER:-lpabon-kind-csi}" + + +# Only show output of the program on failure +function osd::suppress() { + ( + local output=/tmp/output.$$ + rm --force ${output} 2> /dev/null + ${1+"$@"} > ${output} 2>&1 + result=$? + if [ $result -ne 0 ] ; then + cat ${output} + fi + rm ${output} + exit $result + ) +} + +# TAP message +function osd::echo() { + if [ $DEBUG -eq 1 ] ; then + echo "# ${1}" >&3 + fi +} + +# TAP compliant steps which can be printed out +function osd::by() { + if [ $DEBUG -eq 1 ] ; then + echo "# STEP: ${1}" >&3 + fi +} + +# Get the Kind cluster IP from docker +function osd::clusterip() { + docker inspect ${CLUSTER_CONTROL_PLANE_CONTAINER} | jq -r '.[].NetworkSettings.Networks.bridge.IPAddress' +} + +# Return the SDK REST Gateway address +function osd::getSdkRestGWEndpoint() { + local clusterip=$(osd::clusterip) + local nodeport=$(kubectl -n kube-system get svc portworx-api -o json | jq '.spec.ports[2].nodePort') + echo ${clusterip}:${nodeport} +} + +# Return the SDK gRPC endpoint +function osd::getSdkEndpoint() { + local clusterip=$(osd::clusterip) + local nodeport=$(kubectl -n kube-system get svc portworx-api -o json | jq '.spec.ports[1].nodePort') + echo ${clusterip}:${nodeport} +} + +# Creats a user in Kubernetes only. Use osd::createUserKubeconfig() instead to create a full +# kubeconfig for the new user. +function osd::createUser() { + local username="$1" + local location="$2" + + openssl req -new -newkey rsa:4096 -nodes \ + -keyout ${location}/${username}-k8s.key \ + -out ${location}/${username}-k8s.csr \ + -subj "/CN=${username}/O=openstorage" + + cat < ${location}/${username}-kubeconfig.crt +} + +# Creates a new Kubernetes user only able to access their namespace with the +# same name. The kubeconfig for this user must be passed in. +function osd::createUserKubeconfig() { + local user="$1" + local location="$2" + local kubeconfig="${location}/${user}-kubeconfig.conf" + + osd::createUser "$user" "$location" + + kind export kubeconfig --kubeconfig=${kubeconfig} --name ${KIND_CLUSTER} + kubectl config set-credentials \ + ${user} \ + --client-certificate=${location}/${user}-kubeconfig.crt \ + --client-key=${location}/${user}-k8s.key \ + --embed-certs \ + --kubeconfig=${kubeconfig} + kubectl create namespace ${user} + kubectl --kubeconfig=${kubeconfig} config set-context ${user} \ + --cluster=kind-${KIND_CLUSTER} \ + --user=${user} \ + --namespace=${user} + kubectl --kubeconfig=${kubeconfig} config use-context ${user} + kubectl create rolebinding ${user}-admin --namespace=${user} --clusterrole=admin --user=${user} +} + +# Delete an object in Kubernetes and wait until fully removed +function osd::kubeDeleteObjectAndWait() { + local secs="$1" + local kubeargs="$2" + local object="$3" + local name="$4" + + kubectl ${kubeargs} delete ${object} ${name} + + timeout $secs sh -c "while kubectl ${kubeargs} get ${object} ${name} > /dev/null 2>&1; do sleep 1; done " +} diff --git a/test/node_modules/.bin/bats b/test/node_modules/.bin/bats new file mode 120000 index 000000000..3ea23c3e6 --- /dev/null +++ b/test/node_modules/.bin/bats @@ -0,0 +1 @@ +../bats/bin/bats \ No newline at end of file diff --git a/test/node_modules/bats-assert/CHANGELOG.md b/test/node_modules/bats-assert/CHANGELOG.md new file mode 100644 index 000000000..7e326f476 --- /dev/null +++ b/test/node_modules/bats-assert/CHANGELOG.md @@ -0,0 +1,39 @@ +# Change Log + +All notable changes to this project will be documented in this file. +This project adheres to [Semantic Versioning](http://semver.org/). + + +## [0.3.0] - 2016-03-22 + +### Removed + +- Move `fail()` to `bats-support` + + +## [0.2.0] - 2016-03-11 + +### Added + +- `refute()` to complement `assert()` +- `npm` support + +### Fixed + +- Not consuming the `--` when stopping option parsing in + `assert_output`, `refute_output`, `assert_line` and `refute_line` + + +## 0.1.0 - 2016-02-16 + +### Added + +- Reporting arbitrary failures with `fail()` +- Generic assertions with `assert()` and `assert_equal()` +- Testing exit status with `assert_success()` and `assert_failure()` +- Testing output with `assert_output()` and `refute_output()` +- Testing individual lines with `assert_line()` and `refute_line()` + + +[0.3.0]: https://github.com/ztombol/bats-assert/compare/v0.2.0...v0.3.0 +[0.2.0]: https://github.com/ztombol/bats-assert/compare/v0.1.0...v0.2.0 diff --git a/test/node_modules/bats-assert/LICENSE b/test/node_modules/bats-assert/LICENSE new file mode 100644 index 000000000..670154e35 --- /dev/null +++ b/test/node_modules/bats-assert/LICENSE @@ -0,0 +1,116 @@ +CC0 1.0 Universal + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator and +subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for the +purpose of contributing to a commons of creative, cultural and scientific +works ("Commons") that the public can reliably and without fear of later +claims of infringement build upon, modify, incorporate in other works, reuse +and redistribute as freely as possible in any form whatsoever and for any +purposes, including without limitation commercial purposes. These owners may +contribute to the Commons to promote the ideal of a free culture and the +further production of creative, cultural and scientific works, or to gain +reputation or greater distribution for their Work in part through the use and +efforts of others. + +For these and/or other purposes and motivations, and without any expectation +of additional consideration or compensation, the person associating CC0 with a +Work (the "Affirmer"), to the extent that he or she is an owner of Copyright +and Related Rights in the Work, voluntarily elects to apply CC0 to the Work +and publicly distribute the Work under its terms, with knowledge of his or her +Copyright and Related Rights in the Work and the meaning and intended legal +effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not limited +to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, communicate, + and translate a Work; + + ii. moral rights retained by the original author(s) and/or performer(s); + + iii. publicity and privacy rights pertaining to a person's image or likeness + depicted in a Work; + + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + + v. rights protecting the extraction, dissemination, use and reuse of data in + a Work; + + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation thereof, + including any amended or successor version of such directive); and + + vii. other similar, equivalent or corresponding rights throughout the world + based on applicable law or treaty, and any national implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention of, +applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and +unconditionally waives, abandons, and surrenders all of Affirmer's Copyright +and Related Rights and associated claims and causes of action, whether now +known or unknown (including existing as well as future claims and causes of +action), in the Work (i) in all territories worldwide, (ii) for the maximum +duration provided by applicable law or treaty (including future time +extensions), (iii) in any current or future medium and for any number of +copies, and (iv) for any purpose whatsoever, including without limitation +commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes +the Waiver for the benefit of each member of the public at large and to the +detriment of Affirmer's heirs and successors, fully intending that such Waiver +shall not be subject to revocation, rescission, cancellation, termination, or +any other legal or equitable action to disrupt the quiet enjoyment of the Work +by the public as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason be +judged legally invalid or ineffective under applicable law, then the Waiver +shall be preserved to the maximum extent permitted taking into account +Affirmer's express Statement of Purpose. In addition, to the extent the Waiver +is so judged Affirmer hereby grants to each affected person a royalty-free, +non transferable, non sublicensable, non exclusive, irrevocable and +unconditional license to exercise Affirmer's Copyright and Related Rights in +the Work (i) in all territories worldwide, (ii) for the maximum duration +provided by applicable law or treaty (including future time extensions), (iii) +in any current or future medium and for any number of copies, and (iv) for any +purpose whatsoever, including without limitation commercial, advertising or +promotional purposes (the "License"). The License shall be deemed effective as +of the date CC0 was applied by Affirmer to the Work. Should any part of the +License for any reason be judged legally invalid or ineffective under +applicable law, such partial invalidity or ineffectiveness shall not +invalidate the remainder of the License, and in such case Affirmer hereby +affirms that he or she will not (i) exercise any of his or her remaining +Copyright and Related Rights in the Work or (ii) assert any associated claims +and causes of action with respect to the Work, in either case contrary to +Affirmer's express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + + b. Affirmer offers the Work as-is and makes no representations or warranties + of any kind concerning the Work, express, implied, statutory or otherwise, + including without limitation warranties of title, merchantability, fitness + for a particular purpose, non infringement, or the absence of latent or + other defects, accuracy, or the present or absence of errors, whether or not + discoverable, all to the greatest extent permissible under applicable law. + + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without limitation + any person's Copyright and Related Rights in the Work. Further, Affirmer + disclaims responsibility for obtaining any necessary consents, permissions + or other rights required for any use of the Work. + + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to this + CC0 or use of the Work. + +For more information, please see + diff --git a/test/node_modules/bats-assert/README.md b/test/node_modules/bats-assert/README.md new file mode 100644 index 000000000..dfe78a420 --- /dev/null +++ b/test/node_modules/bats-assert/README.md @@ -0,0 +1,712 @@ +# bats-assert + +[![License](https://img.shields.io/npm/l/bats-assert.svg)](https://github.com/jasonkarns/bats-assert-1/blob/master/LICENSE) +[![GitHub release](https://img.shields.io/github/release/jasonkarns/bats-assert-1.svg)](https://github.com/jasonkarns/bats-assert-1/releases) +[![npm release](https://img.shields.io/npm/v/bats-assert.svg)](https://www.npmjs.com/package/bats-assert) +[![Build Status](https://travis-ci.org/jasonkarns/bats-assert-1.svg?branch=master)](https://travis-ci.org/jasonkarns/bats-assert-1) + +`bats-assert` is a helper library providing common assertions for +[Bats][bats]. + +Assertions are functions that perform a test and output relevant +information on failure to help debugging. They return 1 on failure and 0 +otherwise. Output, [formatted][bats-support-output] for readability, is +sent to the standard error to make assertions usable outside of `@test` +blocks too. + +Assertions testing exit code and output operate on the results of the +most recent invocation of `run`. + +Dependencies: +- [`bats-support`][bats-support] (formerly `bats-core`) - output + formatting + +See the [shared documentation][bats-docs] to learn how to install and +load this library. + + +## Usage + +### `assert` + +Fail if the given expression evaluates to false. + +***Note:*** *The expression must be a simple command. [Compound +commands][bash-comp-cmd], such as `[[`, can be used only when executed +with `bash -c`.* + +```bash +@test 'assert()' { + touch '/var/log/test.log' + assert [ -e '/var/log/test.log' ] +} +``` + +On failure, the failed expression is displayed. + +``` +-- assertion failed -- +expression : [ -e /var/log/test.log ] +-- +``` + + +### `refute` + +Fail if the given expression evaluates to true. + +***Note:*** *The expression must be a simple command. [Compound +commands][bash-comp-cmd], such as `[[`, can be used only when executed +with `bash -c`.* + +```bash +@test 'refute()' { + rm -f '/var/log/test.log' + refute [ -e '/var/log/test.log' ] +} +``` + +On failure, the successful expression is displayed. + +``` +-- assertion succeeded, but it was expected to fail -- +expression : [ -e /var/log/test.log ] +-- +``` + + +### `assert_equal` + +Fail if the two parameters, actual and expected value respectively, do +not equal. + +```bash +@test 'assert_equal()' { + assert_equal 'have' 'want' +} +``` + +On failure, the expected and actual values are displayed. + +``` +-- values do not equal -- +expected : want +actual : have +-- +``` + +If either value is longer than one line both are displayed in +*multi-line* format. + + +### `assert_success` + +Fail if `$status` is not 0. + +```bash +@test 'assert_success() status only' { + run bash -c "echo 'Error!'; exit 1" + assert_success +} +``` + +On failure, `$status` and `$output` are displayed. + +``` +-- command failed -- +status : 1 +output : Error! +-- +``` + +If `$output` is longer than one line, it is displayed in *multi-line* +format. + + +### `assert_failure` + +Fail if `$status` is 0. + +```bash +@test 'assert_failure() status only' { + run echo 'Success!' + assert_failure +} +``` + +On failure, `$output` is displayed. + +``` +-- command succeeded, but it was expected to fail -- +output : Success! +-- +``` + +If `$output` is longer than one line, it is displayed in *multi-line* +format. + +#### Expected status + +When one parameter is specified, fail if `$status` does not equal the +expected status specified by the parameter. + +```bash +@test 'assert_failure() with expected status' { + run bash -c "echo 'Error!'; exit 1" + assert_failure 2 +} +``` + +On failure, the expected and actual status, and `$output` are displayed. + +``` +-- command failed as expected, but status differs -- +expected : 2 +actual : 1 +output : Error! +-- +``` + +If `$output` is longer than one line, it is displayed in *multi-line* +format. + + +### `assert_output` + +This function helps to verify that a command or function produces the +correct output by checking that the specified expected output matches +the actual output. Matching can be literal (default), partial or regular +expression. This function is the logical complement of `refute_output`. + +#### Literal matching + +By default, literal matching is performed. The assertion fails if +`$output` does not equal the expected output. + +```bash +@test 'assert_output()' { + run echo 'have' + assert_output 'want' +} +``` + +On failure, the expected and actual output are displayed. + +``` +-- output differs -- +expected : want +actual : have +-- +``` + +If either value is longer than one line both are displayed in +*multi-line* format. + +#### Existence + +To assert that any (non-empty) output exists at all, simply omit the matching +argument. + +```bash +@test 'assert_output()' { + run echo 'have' + assert_output +} +``` + +On failure, an error message is displayed. + +``` +-- no output -- +expected non-empty output, but output was empty +-- +``` + +#### Partial matching + +Partial matching can be enabled with the `--partial` option (`-p` for +short). When used, the assertion fails if the expected *substring* is +not found in `$output`. + +```bash +@test 'assert_output() partial matching' { + run echo 'ERROR: no such file or directory' + assert_output --partial 'SUCCESS' +} +``` + +On failure, the substring and the output are displayed. + +``` +-- output does not contain substring -- +substring : SUCCESS +output : ERROR: no such file or directory +-- +``` + +This option and regular expression matching (`--regexp` or `-e`) are +mutually exclusive. An error is displayed when used simultaneously. + +#### Regular expression matching + +Regular expression matching can be enabled with the `--regexp` option +(`-e` for short). When used, the assertion fails if the *extended +regular expression* does not match `$output`. + +*Note: The anchors `^` and `$` bind to the beginning and the end of the +entire output (not individual lines), respectively.* + +```bash +@test 'assert_output() regular expression matching' { + run echo 'Foobar 0.1.0' + assert_output --regexp '^Foobar v[0-9]+\.[0-9]+\.[0-9]$' +} +``` + +On failure, the regular expression and the output are displayed. + +``` +-- regular expression does not match output -- +regexp : ^Foobar v[0-9]+\.[0-9]+\.[0-9]$ +output : Foobar 0.1.0 +-- +``` + +An error is displayed if the specified extended regular expression is +invalid. + +This option and partial matching (`--partial` or `-p`) are mutually +exclusive. An error is displayed when used simultaneously. + +#### Standard Input, HereDocs and HereStrings + +The expected output can be specified via standard input (also +heredoc/herestring) with the `-`/`--stdin` option. + +```bash +@test 'assert_output() with pipe' { + run echo 'hello' + echo 'hello' | assert_output - +} + +@test 'assert_output() with herestring' { + run echo 'hello' + assert_output - <<< hello +} +``` + + +### `refute_output` + +This function helps to verify that a command or function produces the +correct output by checking that the specified unexpected output does not +match the actual output. Matching can be literal (default), partial or +regular expression. This function is the logical complement of +`assert_output`. + +#### Literal matching + +By default, literal matching is performed. The assertion fails if +`$output` equals the unexpected output. + +```bash +@test 'refute_output()' { + run echo 'want' + refute_output 'want' +} +``` + +On failure, the output is displayed. + +``` +-- output equals, but it was expected to differ -- +output : want +-- +``` + +If output is longer than one line it is displayed in *multi-line* +format. + +#### Existence + +To assert that there is no output at all, simply omit the matching argument. + +```bash +@test 'refute_output()' { + run foo --silent + refute_output +} +``` + +On failure, an error message is displayed. + +``` +-- unexpected output -- +expected no output, but output was non-empty +-- +``` + +#### Partial matching + +Partial matching can be enabled with the `--partial` option (`-p` for +short). When used, the assertion fails if the unexpected *substring* is +found in `$output`. + +```bash +@test 'refute_output() partial matching' { + run echo 'ERROR: no such file or directory' + refute_output --partial 'ERROR' +} +``` + +On failure, the substring and the output are displayed. + +``` +-- output should not contain substring -- +substring : ERROR +output : ERROR: no such file or directory +-- +``` + +This option and regular expression matching (`--regexp` or `-e`) are +mutually exclusive. An error is displayed when used simultaneously. + +#### Regular expression matching + +Regular expression matching can be enabled with the `--regexp` option +(`-e` for short). When used, the assertion fails if the *extended +regular expression* matches `$output`. + +*Note: The anchors `^` and `$` bind to the beginning and the end of the +entire output (not individual lines), respectively.* + +```bash +@test 'refute_output() regular expression matching' { + run echo 'Foobar v0.1.0' + refute_output --regexp '^Foobar v[0-9]+\.[0-9]+\.[0-9]$' +} +``` + +On failure, the regular expression and the output are displayed. + +``` +-- regular expression should not match output -- +regexp : ^Foobar v[0-9]+\.[0-9]+\.[0-9]$ +output : Foobar v0.1.0 +-- +``` + +An error is displayed if the specified extended regular expression is +invalid. + +This option and partial matching (`--partial` or `-p`) are mutually +exclusive. An error is displayed when used simultaneously. + +#### Standard Input, HereDocs and HereStrings + +The unexpected output can be specified via standard input (also +heredoc/herestring) with the `-`/`--stdin` option. + +```bash +@test 'refute_output() with pipe' { + run echo 'hello' + echo 'world' | refute_output - +} + +@test 'refute_output() with herestring' { + run echo 'hello' + refute_output - <<< world +} +``` + + +### `assert_line` + +Similarly to `assert_output`, this function helps to verify that a +command or function produces the correct output. It checks that the +expected line appears in the output (default) or in a specific line of +it. Matching can be literal (default), partial or regular expression. +This function is the logical complement of `refute_line`. + +***Warning:*** *Due to a [bug in Bats][bats-93], empty lines are +discarded from `${lines[@]}`, causing line indices to change and +preventing testing for empty lines.* + +[bats-93]: https://github.com/sstephenson/bats/pull/93 + +#### Looking for a line in the output + +By default, the entire output is searched for the expected line. The +assertion fails if the expected line is not found in `${lines[@]}`. + +```bash +@test 'assert_line() looking for line' { + run echo $'have-0\nhave-1\nhave-2' + assert_line 'want' +} +``` + +On failure, the expected line and the output are displayed. + +***Warning:*** *The output displayed does not contain empty lines. See +the Warning above for more.* + +``` +-- output does not contain line -- +line : want +output (3 lines): + have-0 + have-1 + have-2 +-- +``` + +If output is not longer than one line, it is displayed in *two-column* +format. + +#### Matching a specific line + +When the `--index ` option is used (`-n ` for short) , the +expected line is matched only against the line identified by the given +index. The assertion fails if the expected line does not equal +`${lines[]}`. + +```bash +@test 'assert_line() specific line' { + run echo $'have-0\nhave-1\nhave-2' + assert_line --index 1 'want-1' +} +``` + +On failure, the index and the compared lines are displayed. + +``` +-- line differs -- +index : 1 +expected : want-1 +actual : have-1 +-- +``` + +#### Partial matching + +Partial matching can be enabled with the `--partial` option (`-p` for +short). When used, a match fails if the expected *substring* is not +found in the matched line. + +```bash +@test 'assert_line() partial matching' { + run echo $'have 1\nhave 2\nhave 3' + assert_line --partial 'want' +} +``` + +On failure, the same details are displayed as for literal matching, +except that the substring replaces the expected line. + +``` +-- no output line contains substring -- +substring : want +output (3 lines): + have 1 + have 2 + have 3 +-- +``` + +This option and regular expression matching (`--regexp` or `-e`) are +mutually exclusive. An error is displayed when used simultaneously. + +#### Regular expression matching + +Regular expression matching can be enabled with the `--regexp` option +(`-e` for short). When used, a match fails if the *extended regular +expression* does not match the line being tested. + +*Note: As expected, the anchors `^` and `$` bind to the beginning and +the end of the matched line, respectively.* + +```bash +@test 'assert_line() regular expression matching' { + run echo $'have-0\nhave-1\nhave-2' + assert_line --index 1 --regexp '^want-[0-9]$' +} +``` + +On failure, the same details are displayed as for literal matching, +except that the regular expression replaces the expected line. + +``` +-- regular expression does not match line -- +index : 1 +regexp : ^want-[0-9]$ +line : have-1 +-- +``` + +An error is displayed if the specified extended regular expression is +invalid. + +This option and partial matching (`--partial` or `-p`) are mutually +exclusive. An error is displayed when used simultaneously. + + +### `refute_line` + +Similarly to `refute_output`, this function helps to verify that a +command or function produces the correct output. It checks that the +unexpected line does not appear in the output (default) or in a specific +line of it. Matching can be literal (default), partial or regular +expression. This function is the logical complement of `assert_line`. + +***Warning:*** *Due to a [bug in Bats][bats-93], empty lines are +discarded from `${lines[@]}`, causing line indices to change and +preventing testing for empty lines.* + +[bats-93]: https://github.com/sstephenson/bats/pull/93 + +#### Looking for a line in the output + +By default, the entire output is searched for the unexpected line. The +assertion fails if the unexpected line is found in `${lines[@]}`. + +```bash +@test 'refute_line() looking for line' { + run echo $'have-0\nwant\nhave-2' + refute_line 'want' +} +``` + +On failure, the unexpected line, the index of its first match and the +output with the matching line highlighted are displayed. + +***Warning:*** *The output displayed does not contain empty lines. See +the Warning above for more.* + +``` +-- line should not be in output -- +line : want +index : 1 +output (3 lines): + have-0 +> want + have-2 +-- +``` + +If output is not longer than one line, it is displayed in *two-column* +format. + +#### Matching a specific line + +When the `--index ` option is used (`-n ` for short) , the +unexpected line is matched only against the line identified by the given +index. The assertion fails if the unexpected line equals +`${lines[]}`. + +```bash +@test 'refute_line() specific line' { + run echo $'have-0\nwant-1\nhave-2' + refute_line --index 1 'want-1' +} +``` + +On failure, the index and the unexpected line are displayed. + +``` +-- line should differ -- +index : 1 +line : want-1 +-- +``` + +#### Partial matching + +Partial matching can be enabled with the `--partial` option (`-p` for +short). When used, a match fails if the unexpected *substring* is found +in the matched line. + +```bash +@test 'refute_line() partial matching' { + run echo $'have 1\nwant 2\nhave 3' + refute_line --partial 'want' +} +``` + +On failure, in addition to the details of literal matching, the +substring is also displayed. When used with `--index ` the +substring replaces the unexpected line. + +``` +-- no line should contain substring -- +substring : want +index : 1 +output (3 lines): + have 1 +> want 2 + have 3 +-- +``` + +This option and regular expression matching (`--regexp` or `-e`) are +mutually exclusive. An error is displayed when used simultaneously. + +#### Regular expression matching + +Regular expression matching can be enabled with the `--regexp` option +(`-e` for short). When used, a match fails if the *extended regular +expression* matches the line being tested. + +*Note: As expected, the anchors `^` and `$` bind to the beginning and +the end of the matched line, respectively.* + +```bash +@test 'refute_line() regular expression matching' { + run echo $'Foobar v0.1.0\nRelease date: 2015-11-29' + refute_line --index 0 --regexp '^Foobar v[0-9]+\.[0-9]+\.[0-9]$' +} +``` + +On failure, in addition to the details of literal matching, the regular +expression is also displayed. When used with `--index ` the regular +expression replaces the unexpected line. + +``` +-- regular expression should not match line -- +index : 0 +regexp : ^Foobar v[0-9]+\.[0-9]+\.[0-9]$ +line : Foobar v0.1.0 +-- +``` + +An error is displayed if the specified extended regular expression is +invalid. + +This option and partial matching (`--partial` or `-p`) are mutually +exclusive. An error is displayed when used simultaneously. + + +## Options + +For functions that have options, `--` disables option parsing for the +remaining arguments to allow using arguments identical to one of the +allowed options. + +```bash +assert_output -- '-p' +``` + +Specifying `--` as an argument is similarly simple. + +```bash +refute_line -- '--' +``` + + + + +[bats]: https://github.com/sstephenson/bats +[bats-support-output]: https://github.com/ztombol/bats-support#output-formatting +[bats-support]: https://github.com/ztombol/bats-support +[bats-docs]: https://github.com/ztombol/bats-docs +[bash-comp-cmd]: https://www.gnu.org/software/bash/manual/bash.html#Compound-Commands diff --git a/test/node_modules/bats-assert/load.bash b/test/node_modules/bats-assert/load.bash new file mode 100644 index 000000000..ac4a875a9 --- /dev/null +++ b/test/node_modules/bats-assert/load.bash @@ -0,0 +1 @@ +source "$(dirname "${BASH_SOURCE[0]}")/src/assert.bash" diff --git a/test/node_modules/bats-assert/package.json b/test/node_modules/bats-assert/package.json new file mode 100644 index 000000000..b107ce747 --- /dev/null +++ b/test/node_modules/bats-assert/package.json @@ -0,0 +1,128 @@ +{ + "_args": [ + [ + { + "raw": "bats-assert", + "scope": null, + "escapedName": "bats-assert", + "name": "bats-assert", + "rawSpec": "", + "spec": "latest", + "type": "tag" + }, + "/home/lpabon/git/golang/porx/src/github.com/libopenstorage/openstorage/test" + ] + ], + "_from": "bats-assert@latest", + "_hasShrinkwrap": false, + "_id": "bats-assert@2.0.0", + "_inCache": true, + "_location": "/bats-assert", + "_nodeVersion": "11.1.0", + "_npmOperationalInternal": { + "host": "s3://npm-registry-packages", + "tmp": "tmp/bats-assert_2.0.0_1543265794872_0.017482633148563353" + }, + "_npmUser": { + "name": "jasonkarns", + "email": "jason.karns@gmail.com" + }, + "_npmVersion": "6.4.1", + "_phantomChildren": {}, + "_requested": { + "raw": "bats-assert", + "scope": null, + "escapedName": "bats-assert", + "name": "bats-assert", + "rawSpec": "", + "spec": "latest", + "type": "tag" + }, + "_requiredBy": [ + "#DEV:/", + "#USER" + ], + "_resolved": "https://registry.npmjs.org/bats-assert/-/bats-assert-2.0.0.tgz", + "_shasum": "ba1b4eeee2c7848f1a25948b623790dd41a2b94b", + "_shrinkwrap": null, + "_spec": "bats-assert", + "_where": "/home/lpabon/git/golang/porx/src/github.com/libopenstorage/openstorage/test", + "bugs": { + "url": "https://github.com/jasonkarns/bats-assert-1/issues" + }, + "contributors": [ + { + "name": "Zoltán Tömböl", + "url": "https://github.com/ztombol" + }, + { + "name": "Sam Stephenson", + "email": "sstephenson@gmail.com", + "url": "http://sstephenson.us/" + }, + { + "name": "Jason Karns", + "email": "jason.karns@gmail.com", + "url": "http://jason.karns.name" + }, + { + "name": "Mislav Marohnić", + "email": "mislav.marohnic@gmail.com", + "url": "http://mislav.net/" + }, + { + "name": "Tim Pope", + "url": "https://github.com/tpope" + } + ], + "dependencies": {}, + "description": "Common assertions for Bats", + "devDependencies": {}, + "directories": { + "lib": "src", + "test": "test" + }, + "dist": { + "integrity": "sha512-qO3kNilWxW8iCONu9NDUfvsCiC6JzL6DPOc/DGq9z3bZ9/A7wURJ+FnFMxGbofOmWbCoy7pVhofn0o47A95qkQ==", + "shasum": "ba1b4eeee2c7848f1a25948b623790dd41a2b94b", + "tarball": "https://registry.npmjs.org/bats-assert/-/bats-assert-2.0.0.tgz", + "fileCount": 6, + "unpackedSize": 49320, + "npm-signature": "-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.4\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJb/F4DCRA9TVsSAnZWagAAeTMP/09Y5jG5lpdZTnpFrJT+\nbrHJWAkCGYT9DYjcpVY4Q9G1YCtN8ixCPrEv9Lkc2Nl1AepJTYbr5PPsDOlg\njRjQmeW7Ai4UeAdIRkQLYConiPQe1FHWmHTJxmB6lcJFjs/+BgfIBobdLBYk\ncGR7WXpD0k0/6jZFRn3JJ60EQhOE+1Fdc+PZMPp6AiaMtfo5N5jVCB/FvmQV\nFiwHQHlhkP8LwaRF00bBdtPNqKsG/ZLCj9eB7YPmuQc+GsaLk1uQldR27l+d\nmwy7Gqpcnzf+xpJoIe/Bw9O0Zr+VNU+cU6xf6+9epWrHVjBdzLUGjEOViOCl\nPQWgIgbSxy6ZoMNC4qUD60tfpAxOJHLPBVq6zIIokue2ghHBA3BwRKXzHohy\nkBKMvMmiQEHKmxGuPM5WcO04qLk3zfVDCesL2o0sW+/f9PrXisR/VrB6OvIt\nRllEZRk+QHkjJ+LTr0IYX9y/evZACMeHHTnnAMqlnffcIH+Uuzb2bSfCtLXE\ntQ75yZ23mAUPhbXd/ZMdGYQFCjhyIEJva6RepbaQQF7s/VL0hs0LSO3w8L52\n+13892vkunP0KazgQCLlLlLJ5lvuQ8soKhVYoTHQ1iKvd7KuB3IHJmcDRlCz\nKeDZqWwcYdQdgpLBp5aGzar58fsAfFh5sXIB5zHllISE8ApoQNxxfgL6y4Uc\n78A1\r\n=zs5d\r\n-----END PGP SIGNATURE-----\r\n" + }, + "gitHead": "d750c5a1b44bf6fc96726aea76f4621db5fd602f", + "homepage": "https://github.com/jasonkarns/bats-assert-1", + "keywords": [ + "bats", + "bash", + "shell", + "test", + "unit", + "assert", + "assertion", + "helper" + ], + "license": "CC0-1.0", + "maintainers": [ + { + "name": "jasonkarns", + "email": "jason@karns.name" + } + ], + "name": "bats-assert", + "optionalDependencies": {}, + "peerDependencies": { + "bats-support": "git+https://github.com/ztombol/bats-support.git#v0.2.0" + }, + "readme": "ERROR: No README data found!", + "repository": { + "type": "git", + "url": "git+https://github.com/jasonkarns/bats-assert-1.git" + }, + "scripts": { + "postversion": "npm publish", + "prepublishOnly": "npm run publish:github", + "publish:github": "git push --follow-tags" + }, + "version": "2.0.0" +} diff --git a/test/node_modules/bats-assert/src/assert.bash b/test/node_modules/bats-assert/src/assert.bash new file mode 100644 index 000000000..efe069a13 --- /dev/null +++ b/test/node_modules/bats-assert/src/assert.bash @@ -0,0 +1,755 @@ +# +# bats-assert - Common assertions for Bats +# +# Written in 2016 by Zoltan Tombol +# +# To the extent possible under law, the author(s) have dedicated all +# copyright and related and neighboring rights to this software to the +# public domain worldwide. This software is distributed without any +# warranty. +# +# You should have received a copy of the CC0 Public Domain Dedication +# along with this software. If not, see +# . +# + +# +# assert.bash +# ----------- +# +# Assertions are functions that perform a test and output relevant +# information on failure to help debugging. They return 1 on failure +# and 0 otherwise. +# +# All output is formatted for readability using the functions of +# `output.bash' and sent to the standard error. +# + +# Fail and display the expression if it evaluates to false. +# +# NOTE: The expression must be a simple command. Compound commands, such +# as `[[', can be used only when executed with `bash -c'. +# +# Globals: +# none +# Arguments: +# $1 - expression +# Returns: +# 0 - expression evaluates to TRUE +# 1 - otherwise +# Outputs: +# STDERR - details, on failure +assert() { + if ! "$@"; then + batslib_print_kv_single 10 'expression' "$*" \ + | batslib_decorate 'assertion failed' \ + | fail + fi +} + +# Fail and display the expression if it evaluates to true. +# +# NOTE: The expression must be a simple command. Compound commands, such +# as `[[', can be used only when executed with `bash -c'. +# +# Globals: +# none +# Arguments: +# $1 - expression +# Returns: +# 0 - expression evaluates to FALSE +# 1 - otherwise +# Outputs: +# STDERR - details, on failure +refute() { + if "$@"; then + batslib_print_kv_single 10 'expression' "$*" \ + | batslib_decorate 'assertion succeeded, but it was expected to fail' \ + | fail + fi +} + +# Fail and display details if the expected and actual values do not +# equal. Details include both values. +# +# Globals: +# none +# Arguments: +# $1 - actual value +# $2 - expected value +# Returns: +# 0 - values equal +# 1 - otherwise +# Outputs: +# STDERR - details, on failure +assert_equal() { + if [[ $1 != "$2" ]]; then + batslib_print_kv_single_or_multi 8 \ + 'expected' "$2" \ + 'actual' "$1" \ + | batslib_decorate 'values do not equal' \ + | fail + fi +} + +# Fail and display details if `$status' is not 0. Details include +# `$status' and `$output'. +# +# Globals: +# status +# output +# Arguments: +# none +# Returns: +# 0 - `$status' is 0 +# 1 - otherwise +# Outputs: +# STDERR - details, on failure +assert_success() { + if (( status != 0 )); then + { local -ir width=6 + batslib_print_kv_single "$width" 'status' "$status" + batslib_print_kv_single_or_multi "$width" 'output' "$output" + } | batslib_decorate 'command failed' \ + | fail + fi +} + +# Fail and display details if `$status' is 0. Details include `$output'. +# +# Optionally, when the expected status is specified, fail when it does +# not equal `$status'. In this case, details include the expected and +# actual status, and `$output'. +# +# Globals: +# status +# output +# Arguments: +# $1 - [opt] expected status +# Returns: +# 0 - `$status' is not 0, or +# `$status' equals the expected status +# 1 - otherwise +# Outputs: +# STDERR - details, on failure +assert_failure() { + (( $# > 0 )) && local -r expected="$1" + if (( status == 0 )); then + batslib_print_kv_single_or_multi 6 'output' "$output" \ + | batslib_decorate 'command succeeded, but it was expected to fail' \ + | fail + elif (( $# > 0 )) && (( status != expected )); then + { local -ir width=8 + batslib_print_kv_single "$width" \ + 'expected' "$expected" \ + 'actual' "$status" + batslib_print_kv_single_or_multi "$width" \ + 'output' "$output" + } | batslib_decorate 'command failed as expected, but status differs' \ + | fail + fi +} + +# Fail and display details if `$output' does not match the expected +# output. The expected output can be specified either by the first +# parameter or on the standard input. +# +# By default, literal matching is performed. The assertion fails if the +# expected output does not equal `$output'. Details include both values. +# +# Option `--partial' enables partial matching. The assertion fails if +# the expected substring cannot be found in `$output'. +# +# Option `--regexp' enables regular expression matching. The assertion +# fails if the extended regular expression does not match `$output'. An +# invalid regular expression causes an error to be displayed. +# +# It is an error to use partial and regular expression matching +# simultaneously. +# +# Globals: +# output +# Options: +# -p, --partial - partial matching +# -e, --regexp - extended regular expression matching +# -, --stdin - read expected output from the standard input +# Arguments: +# $1 - expected output +# Returns: +# 0 - expected matches the actual output +# 1 - otherwise +# Inputs: +# STDIN - [=$1] expected output +# Outputs: +# STDERR - details, on failure +# error message, on error +assert_output() { + local -i is_mode_partial=0 + local -i is_mode_regexp=0 + local -i is_mode_nonempty=0 + local -i use_stdin=0 + + # Handle options. + if (( $# == 0 )); then + is_mode_nonempty=1 + fi + + while (( $# > 0 )); do + case "$1" in + -p|--partial) is_mode_partial=1; shift ;; + -e|--regexp) is_mode_regexp=1; shift ;; + -|--stdin) use_stdin=1; shift ;; + --) shift; break ;; + *) break ;; + esac + done + + if (( is_mode_partial )) && (( is_mode_regexp )); then + echo "\`--partial' and \`--regexp' are mutually exclusive" \ + | batslib_decorate 'ERROR: assert_output' \ + | fail + return $? + fi + + # Arguments. + local expected + if (( use_stdin )); then + expected="$(cat -)" + else + expected="$1" + fi + + # Matching. + if (( is_mode_nonempty )); then + if [ -z "$output" ]; then + echo 'expected non-empty output, but output was empty' \ + | batslib_decorate 'no output' \ + | fail + fi + elif (( is_mode_regexp )); then + if [[ '' =~ $expected ]] || (( $? == 2 )); then + echo "Invalid extended regular expression: \`$expected'" \ + | batslib_decorate 'ERROR: assert_output' \ + | fail + elif ! [[ $output =~ $expected ]]; then + batslib_print_kv_single_or_multi 6 \ + 'regexp' "$expected" \ + 'output' "$output" \ + | batslib_decorate 'regular expression does not match output' \ + | fail + fi + elif (( is_mode_partial )); then + if [[ $output != *"$expected"* ]]; then + batslib_print_kv_single_or_multi 9 \ + 'substring' "$expected" \ + 'output' "$output" \ + | batslib_decorate 'output does not contain substring' \ + | fail + fi + else + if [[ $output != "$expected" ]]; then + batslib_print_kv_single_or_multi 8 \ + 'expected' "$expected" \ + 'actual' "$output" \ + | batslib_decorate 'output differs' \ + | fail + fi + fi +} + +# Fail and display details if `$output' matches the unexpected output. +# The unexpected output can be specified either by the first parameter +# or on the standard input. +# +# By default, literal matching is performed. The assertion fails if the +# unexpected output equals `$output'. Details include `$output'. +# +# Option `--partial' enables partial matching. The assertion fails if +# the unexpected substring is found in `$output'. The unexpected +# substring is added to details. +# +# Option `--regexp' enables regular expression matching. The assertion +# fails if the extended regular expression does matches `$output'. The +# regular expression is added to details. An invalid regular expression +# causes an error to be displayed. +# +# It is an error to use partial and regular expression matching +# simultaneously. +# +# Globals: +# output +# Options: +# -p, --partial - partial matching +# -e, --regexp - extended regular expression matching +# -, --stdin - read unexpected output from the standard input +# Arguments: +# $1 - unexpected output +# Returns: +# 0 - unexpected matches the actual output +# 1 - otherwise +# Inputs: +# STDIN - [=$1] unexpected output +# Outputs: +# STDERR - details, on failure +# error message, on error +refute_output() { + local -i is_mode_partial=0 + local -i is_mode_regexp=0 + local -i is_mode_empty=0 + local -i use_stdin=0 + + # Handle options. + if (( $# == 0 )); then + is_mode_empty=1 + fi + + while (( $# > 0 )); do + case "$1" in + -p|--partial) is_mode_partial=1; shift ;; + -e|--regexp) is_mode_regexp=1; shift ;; + -|--stdin) use_stdin=1; shift ;; + --) shift; break ;; + *) break ;; + esac + done + + if (( is_mode_partial )) && (( is_mode_regexp )); then + echo "\`--partial' and \`--regexp' are mutually exclusive" \ + | batslib_decorate 'ERROR: refute_output' \ + | fail + return $? + fi + + # Arguments. + local unexpected + if (( use_stdin )); then + unexpected="$(cat -)" + else + unexpected="$1" + fi + + if (( is_mode_regexp == 1 )) && [[ '' =~ $unexpected ]] || (( $? == 2 )); then + echo "Invalid extended regular expression: \`$unexpected'" \ + | batslib_decorate 'ERROR: refute_output' \ + | fail + return $? + fi + + # Matching. + if (( is_mode_empty )); then + if [ -n "$output" ]; then + batslib_print_kv_single_or_multi 6 \ + 'output' "$output" \ + | batslib_decorate 'output non-empty, but expected no output' \ + | fail + fi + elif (( is_mode_regexp )); then + if [[ $output =~ $unexpected ]] || (( $? == 0 )); then + batslib_print_kv_single_or_multi 6 \ + 'regexp' "$unexpected" \ + 'output' "$output" \ + | batslib_decorate 'regular expression should not match output' \ + | fail + fi + elif (( is_mode_partial )); then + if [[ $output == *"$unexpected"* ]]; then + batslib_print_kv_single_or_multi 9 \ + 'substring' "$unexpected" \ + 'output' "$output" \ + | batslib_decorate 'output should not contain substring' \ + | fail + fi + else + if [[ $output == "$unexpected" ]]; then + batslib_print_kv_single_or_multi 6 \ + 'output' "$output" \ + | batslib_decorate 'output equals, but it was expected to differ' \ + | fail + fi + fi +} + +# Fail and display details if the expected line is not found in the +# output (default) or in a specific line of it. +# +# By default, the entire output is searched for the expected line. The +# expected line is matched against every element of `${lines[@]}'. If no +# match is found, the assertion fails. Details include the expected line +# and `${lines[@]}'. +# +# When `--index ' is specified, only the -th line is matched. +# If the expected line does not match `${lines[]}', the assertion +# fails. Details include and the compared lines. +# +# By default, literal matching is performed. A literal match fails if +# the expected string does not equal the matched string. +# +# Option `--partial' enables partial matching. A partial match fails if +# the expected substring is not found in the target string. +# +# Option `--regexp' enables regular expression matching. A regular +# expression match fails if the extended regular expression does not +# match the target string. An invalid regular expression causes an error +# to be displayed. +# +# It is an error to use partial and regular expression matching +# simultaneously. +# +# Mandatory arguments to long options are mandatory for short options +# too. +# +# Globals: +# output +# lines +# Options: +# -n, --index - match the -th line +# -p, --partial - partial matching +# -e, --regexp - extended regular expression matching +# Arguments: +# $1 - expected line +# Returns: +# 0 - match found +# 1 - otherwise +# Outputs: +# STDERR - details, on failure +# error message, on error +# FIXME(ztombol): Display `${lines[@]}' instead of `$output'! +assert_line() { + local -i is_match_line=0 + local -i is_mode_partial=0 + local -i is_mode_regexp=0 + + # Handle options. + while (( $# > 0 )); do + case "$1" in + -n|--index) + if (( $# < 2 )) || ! [[ $2 =~ ^([0-9]|[1-9][0-9]+)$ ]]; then + echo "\`--index' requires an integer argument: \`$2'" \ + | batslib_decorate 'ERROR: assert_line' \ + | fail + return $? + fi + is_match_line=1 + local -ri idx="$2" + shift 2 + ;; + -p|--partial) is_mode_partial=1; shift ;; + -e|--regexp) is_mode_regexp=1; shift ;; + --) shift; break ;; + *) break ;; + esac + done + + if (( is_mode_partial )) && (( is_mode_regexp )); then + echo "\`--partial' and \`--regexp' are mutually exclusive" \ + | batslib_decorate 'ERROR: assert_line' \ + | fail + return $? + fi + + # Arguments. + local -r expected="$1" + + if (( is_mode_regexp == 1 )) && [[ '' =~ $expected ]] || (( $? == 2 )); then + echo "Invalid extended regular expression: \`$expected'" \ + | batslib_decorate 'ERROR: assert_line' \ + | fail + return $? + fi + + # Matching. + if (( is_match_line )); then + # Specific line. + if (( is_mode_regexp )); then + if ! [[ ${lines[$idx]} =~ $expected ]]; then + batslib_print_kv_single 6 \ + 'index' "$idx" \ + 'regexp' "$expected" \ + 'line' "${lines[$idx]}" \ + | batslib_decorate 'regular expression does not match line' \ + | fail + fi + elif (( is_mode_partial )); then + if [[ ${lines[$idx]} != *"$expected"* ]]; then + batslib_print_kv_single 9 \ + 'index' "$idx" \ + 'substring' "$expected" \ + 'line' "${lines[$idx]}" \ + | batslib_decorate 'line does not contain substring' \ + | fail + fi + else + if [[ ${lines[$idx]} != "$expected" ]]; then + batslib_print_kv_single 8 \ + 'index' "$idx" \ + 'expected' "$expected" \ + 'actual' "${lines[$idx]}" \ + | batslib_decorate 'line differs' \ + | fail + fi + fi + else + # Contained in output. + if (( is_mode_regexp )); then + local -i idx + for (( idx = 0; idx < ${#lines[@]}; ++idx )); do + [[ ${lines[$idx]} =~ $expected ]] && return 0 + done + { local -ar single=( + 'regexp' "$expected" + ) + local -ar may_be_multi=( + 'output' "$output" + ) + local -ir width="$( batslib_get_max_single_line_key_width \ + "${single[@]}" "${may_be_multi[@]}" )" + batslib_print_kv_single "$width" "${single[@]}" + batslib_print_kv_single_or_multi "$width" "${may_be_multi[@]}" + } | batslib_decorate 'no output line matches regular expression' \ + | fail + elif (( is_mode_partial )); then + local -i idx + for (( idx = 0; idx < ${#lines[@]}; ++idx )); do + [[ ${lines[$idx]} == *"$expected"* ]] && return 0 + done + { local -ar single=( + 'substring' "$expected" + ) + local -ar may_be_multi=( + 'output' "$output" + ) + local -ir width="$( batslib_get_max_single_line_key_width \ + "${single[@]}" "${may_be_multi[@]}" )" + batslib_print_kv_single "$width" "${single[@]}" + batslib_print_kv_single_or_multi "$width" "${may_be_multi[@]}" + } | batslib_decorate 'no output line contains substring' \ + | fail + else + local -i idx + for (( idx = 0; idx < ${#lines[@]}; ++idx )); do + [[ ${lines[$idx]} == "$expected" ]] && return 0 + done + { local -ar single=( + 'line' "$expected" + ) + local -ar may_be_multi=( + 'output' "$output" + ) + local -ir width="$( batslib_get_max_single_line_key_width \ + "${single[@]}" "${may_be_multi[@]}" )" + batslib_print_kv_single "$width" "${single[@]}" + batslib_print_kv_single_or_multi "$width" "${may_be_multi[@]}" + } | batslib_decorate 'output does not contain line' \ + | fail + fi + fi +} + +# Fail and display details if the unexpected line is found in the output +# (default) or in a specific line of it. +# +# By default, the entire output is searched for the unexpected line. The +# unexpected line is matched against every element of `${lines[@]}'. If +# a match is found, the assertion fails. Details include the unexpected +# line, the index of the first match and `${lines[@]}' with the matching +# line highlighted if `${lines[@]}' is longer than one line. +# +# When `--index ' is specified, only the -th line is matched. +# If the unexpected line matches `${lines[]}', the assertion fails. +# Details include and the unexpected line. +# +# By default, literal matching is performed. A literal match fails if +# the unexpected string does not equal the matched string. +# +# Option `--partial' enables partial matching. A partial match fails if +# the unexpected substring is found in the target string. When used with +# `--index ', the unexpected substring is also displayed on +# failure. +# +# Option `--regexp' enables regular expression matching. A regular +# expression match fails if the extended regular expression matches the +# target string. When used with `--index ', the regular expression +# is also displayed on failure. An invalid regular expression causes an +# error to be displayed. +# +# It is an error to use partial and regular expression matching +# simultaneously. +# +# Mandatory arguments to long options are mandatory for short options +# too. +# +# Globals: +# output +# lines +# Options: +# -n, --index - match the -th line +# -p, --partial - partial matching +# -e, --regexp - extended regular expression matching +# Arguments: +# $1 - unexpected line +# Returns: +# 0 - match not found +# 1 - otherwise +# Outputs: +# STDERR - details, on failure +# error message, on error +# FIXME(ztombol): Display `${lines[@]}' instead of `$output'! +refute_line() { + local -i is_match_line=0 + local -i is_mode_partial=0 + local -i is_mode_regexp=0 + + # Handle options. + while (( $# > 0 )); do + case "$1" in + -n|--index) + if (( $# < 2 )) || ! [[ $2 =~ ^([0-9]|[1-9][0-9]+)$ ]]; then + echo "\`--index' requires an integer argument: \`$2'" \ + | batslib_decorate 'ERROR: refute_line' \ + | fail + return $? + fi + is_match_line=1 + local -ri idx="$2" + shift 2 + ;; + -p|--partial) is_mode_partial=1; shift ;; + -e|--regexp) is_mode_regexp=1; shift ;; + --) shift; break ;; + *) break ;; + esac + done + + if (( is_mode_partial )) && (( is_mode_regexp )); then + echo "\`--partial' and \`--regexp' are mutually exclusive" \ + | batslib_decorate 'ERROR: refute_line' \ + | fail + return $? + fi + + # Arguments. + local -r unexpected="$1" + + if (( is_mode_regexp == 1 )) && [[ '' =~ $unexpected ]] || (( $? == 2 )); then + echo "Invalid extended regular expression: \`$unexpected'" \ + | batslib_decorate 'ERROR: refute_line' \ + | fail + return $? + fi + + # Matching. + if (( is_match_line )); then + # Specific line. + if (( is_mode_regexp )); then + if [[ ${lines[$idx]} =~ $unexpected ]] || (( $? == 0 )); then + batslib_print_kv_single 6 \ + 'index' "$idx" \ + 'regexp' "$unexpected" \ + 'line' "${lines[$idx]}" \ + | batslib_decorate 'regular expression should not match line' \ + | fail + fi + elif (( is_mode_partial )); then + if [[ ${lines[$idx]} == *"$unexpected"* ]]; then + batslib_print_kv_single 9 \ + 'index' "$idx" \ + 'substring' "$unexpected" \ + 'line' "${lines[$idx]}" \ + | batslib_decorate 'line should not contain substring' \ + | fail + fi + else + if [[ ${lines[$idx]} == "$unexpected" ]]; then + batslib_print_kv_single 5 \ + 'index' "$idx" \ + 'line' "${lines[$idx]}" \ + | batslib_decorate 'line should differ' \ + | fail + fi + fi + else + # Line contained in output. + if (( is_mode_regexp )); then + local -i idx + for (( idx = 0; idx < ${#lines[@]}; ++idx )); do + if [[ ${lines[$idx]} =~ $unexpected ]]; then + { local -ar single=( + 'regexp' "$unexpected" + 'index' "$idx" + ) + local -a may_be_multi=( + 'output' "$output" + ) + local -ir width="$( batslib_get_max_single_line_key_width \ + "${single[@]}" "${may_be_multi[@]}" )" + batslib_print_kv_single "$width" "${single[@]}" + if batslib_is_single_line "${may_be_multi[1]}"; then + batslib_print_kv_single "$width" "${may_be_multi[@]}" + else + may_be_multi[1]="$( printf '%s' "${may_be_multi[1]}" \ + | batslib_prefix \ + | batslib_mark '>' "$idx" )" + batslib_print_kv_multi "${may_be_multi[@]}" + fi + } | batslib_decorate 'no line should match the regular expression' \ + | fail + return $? + fi + done + elif (( is_mode_partial )); then + local -i idx + for (( idx = 0; idx < ${#lines[@]}; ++idx )); do + if [[ ${lines[$idx]} == *"$unexpected"* ]]; then + { local -ar single=( + 'substring' "$unexpected" + 'index' "$idx" + ) + local -a may_be_multi=( + 'output' "$output" + ) + local -ir width="$( batslib_get_max_single_line_key_width \ + "${single[@]}" "${may_be_multi[@]}" )" + batslib_print_kv_single "$width" "${single[@]}" + if batslib_is_single_line "${may_be_multi[1]}"; then + batslib_print_kv_single "$width" "${may_be_multi[@]}" + else + may_be_multi[1]="$( printf '%s' "${may_be_multi[1]}" \ + | batslib_prefix \ + | batslib_mark '>' "$idx" )" + batslib_print_kv_multi "${may_be_multi[@]}" + fi + } | batslib_decorate 'no line should contain substring' \ + | fail + return $? + fi + done + else + local -i idx + for (( idx = 0; idx < ${#lines[@]}; ++idx )); do + if [[ ${lines[$idx]} == "$unexpected" ]]; then + { local -ar single=( + 'line' "$unexpected" + 'index' "$idx" + ) + local -a may_be_multi=( + 'output' "$output" + ) + local -ir width="$( batslib_get_max_single_line_key_width \ + "${single[@]}" "${may_be_multi[@]}" )" + batslib_print_kv_single "$width" "${single[@]}" + if batslib_is_single_line "${may_be_multi[1]}"; then + batslib_print_kv_single "$width" "${may_be_multi[@]}" + else + may_be_multi[1]="$( printf '%s' "${may_be_multi[1]}" \ + | batslib_prefix \ + | batslib_mark '>' "$idx" )" + batslib_print_kv_multi "${may_be_multi[@]}" + fi + } | batslib_decorate 'line should not be in output' \ + | fail + return $? + fi + done + fi + fi +} diff --git a/test/node_modules/bats-support/CHANGELOG.md b/test/node_modules/bats-support/CHANGELOG.md new file mode 100644 index 000000000..324d247a7 --- /dev/null +++ b/test/node_modules/bats-support/CHANGELOG.md @@ -0,0 +1,46 @@ +# Change Log + +All notable changes to this project will be documented in this file. +This project adheres to [Semantic Versioning](http://semver.org/). + + +## [0.3.0] - 2016-11-29 + +### Added + +- Restricting invocation to specific locations with + `batslib_is_caller()` + + +## [0.2.0] - 2016-03-22 + +### Added + +- `npm` support +- Reporting arbitrary failures with `fail()` (moved from `bats-assert`) + +### Changed + +- Library renamed to `bats-support` + + +## 0.1.0 - 2016-02-16 + +### Added + +- Two-column key-value formatting with `batslib_print_kv_single()` +- Multi-line key-value formatting with `batslib_print_kv_multi()` +- Mixed formatting with `batslib_print_kv_single_or_multi()` +- Header and footer decoration with `batslib_decorate()` +- Prefixing lines with `batslib_prefix()` +- Marking lines with `batslib_mark()` +- Common output function `batslib_err()` +- Line counting with `batslib_count_lines()` +- Checking whether a text is one line long with + `batslib_is_single_line()` +- Determining key width for two-column and mixed formatting with + `batslib_get_max_single_line_key_width()` + + +[0.3.0]: https://github.com/ztombol/bats-support/compare/v0.2.0...v0.3.0 +[0.2.0]: https://github.com/ztombol/bats-support/compare/v0.1.0...v0.2.0 diff --git a/test/node_modules/bats-support/LICENSE b/test/node_modules/bats-support/LICENSE new file mode 100644 index 000000000..670154e35 --- /dev/null +++ b/test/node_modules/bats-support/LICENSE @@ -0,0 +1,116 @@ +CC0 1.0 Universal + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator and +subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for the +purpose of contributing to a commons of creative, cultural and scientific +works ("Commons") that the public can reliably and without fear of later +claims of infringement build upon, modify, incorporate in other works, reuse +and redistribute as freely as possible in any form whatsoever and for any +purposes, including without limitation commercial purposes. These owners may +contribute to the Commons to promote the ideal of a free culture and the +further production of creative, cultural and scientific works, or to gain +reputation or greater distribution for their Work in part through the use and +efforts of others. + +For these and/or other purposes and motivations, and without any expectation +of additional consideration or compensation, the person associating CC0 with a +Work (the "Affirmer"), to the extent that he or she is an owner of Copyright +and Related Rights in the Work, voluntarily elects to apply CC0 to the Work +and publicly distribute the Work under its terms, with knowledge of his or her +Copyright and Related Rights in the Work and the meaning and intended legal +effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not limited +to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, communicate, + and translate a Work; + + ii. moral rights retained by the original author(s) and/or performer(s); + + iii. publicity and privacy rights pertaining to a person's image or likeness + depicted in a Work; + + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + + v. rights protecting the extraction, dissemination, use and reuse of data in + a Work; + + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation thereof, + including any amended or successor version of such directive); and + + vii. other similar, equivalent or corresponding rights throughout the world + based on applicable law or treaty, and any national implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention of, +applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and +unconditionally waives, abandons, and surrenders all of Affirmer's Copyright +and Related Rights and associated claims and causes of action, whether now +known or unknown (including existing as well as future claims and causes of +action), in the Work (i) in all territories worldwide, (ii) for the maximum +duration provided by applicable law or treaty (including future time +extensions), (iii) in any current or future medium and for any number of +copies, and (iv) for any purpose whatsoever, including without limitation +commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes +the Waiver for the benefit of each member of the public at large and to the +detriment of Affirmer's heirs and successors, fully intending that such Waiver +shall not be subject to revocation, rescission, cancellation, termination, or +any other legal or equitable action to disrupt the quiet enjoyment of the Work +by the public as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason be +judged legally invalid or ineffective under applicable law, then the Waiver +shall be preserved to the maximum extent permitted taking into account +Affirmer's express Statement of Purpose. In addition, to the extent the Waiver +is so judged Affirmer hereby grants to each affected person a royalty-free, +non transferable, non sublicensable, non exclusive, irrevocable and +unconditional license to exercise Affirmer's Copyright and Related Rights in +the Work (i) in all territories worldwide, (ii) for the maximum duration +provided by applicable law or treaty (including future time extensions), (iii) +in any current or future medium and for any number of copies, and (iv) for any +purpose whatsoever, including without limitation commercial, advertising or +promotional purposes (the "License"). The License shall be deemed effective as +of the date CC0 was applied by Affirmer to the Work. Should any part of the +License for any reason be judged legally invalid or ineffective under +applicable law, such partial invalidity or ineffectiveness shall not +invalidate the remainder of the License, and in such case Affirmer hereby +affirms that he or she will not (i) exercise any of his or her remaining +Copyright and Related Rights in the Work or (ii) assert any associated claims +and causes of action with respect to the Work, in either case contrary to +Affirmer's express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + + b. Affirmer offers the Work as-is and makes no representations or warranties + of any kind concerning the Work, express, implied, statutory or otherwise, + including without limitation warranties of title, merchantability, fitness + for a particular purpose, non infringement, or the absence of latent or + other defects, accuracy, or the present or absence of errors, whether or not + discoverable, all to the greatest extent permissible under applicable law. + + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without limitation + any person's Copyright and Related Rights in the Work. Further, Affirmer + disclaims responsibility for obtaining any necessary consents, permissions + or other rights required for any use of the Work. + + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to this + CC0 or use of the Work. + +For more information, please see + diff --git a/test/node_modules/bats-support/README.md b/test/node_modules/bats-support/README.md new file mode 100644 index 000000000..71c02ba9c --- /dev/null +++ b/test/node_modules/bats-support/README.md @@ -0,0 +1,189 @@ +*__Important:__ `bats-core` has been renamed to `bats-support`. GitHub +automatically redirects all references, e.g. submodules and clones will +continue to work, but you are encouraged to [update][github-rename] +them. Version numbering continues where `bats-core` left off.* + +[github-rename]: https://help.github.com/articles/renaming-a-repository/ + +- - - - - + +# bats-support + +[![GitHub license](https://img.shields.io/badge/license-CC0-blue.svg)](https://raw.githubusercontent.com/ztombol/bats-support/master/LICENSE) +[![GitHub release](https://img.shields.io/github/release/ztombol/bats-support.svg)](https://github.com/ztombol/bats-support/releases/latest) +[![Build Status](https://travis-ci.org/ztombol/bats-support.svg?branch=master)](https://travis-ci.org/ztombol/bats-support) + +`bats-support` is a supporting library providing common functions to +test helper libraries written for [Bats][bats]. + +Features: +- [error reporting](#error-reporting) +- [output formatting](#output-formatting) +- [language tools](#language-and-execution) + +See the [shared documentation][bats-docs] to learn how to install and +load this library. + +If you want to use this library in your own helpers or just want to +learn about its internals see the developer documentation in the [source +files](src). + + +## Error reporting + +### `fail` + +Display an error message and fail. This function provides a convenient +way to report failure in arbitrary situations. You can use it to +implement your own helpers when the ones available do not meet your +needs. Other functions use it internally as well. + +```bash +@test 'fail()' { + fail 'this test always fails' +} +``` + +The message can also be specified on the standard input. + +```bash +@test 'fail() with pipe' { + echo 'this test always fails' | fail +} +``` + +This function always fails and simply outputs the given message. + +``` +this test always fails +``` + + +## Output formatting + +Many test helpers need to produce human readable output. This library +provides a simple way to format simple messages and key value pairs, and +display them on the standard error. + + +### Simple message + +Simple messages without structure, e.g. one-line error messages, are +simply wrapped in a header and a footer to help them stand out. + +``` +-- ERROR: assert_output -- +`--partial' and `--regexp' are mutually exclusive +-- +``` + + +### Key-Value pairs + +Some helpers, e.g. [assertions][bats-assert], structure output as +key-value pairs. This library provides two ways to format them. + +When the value is one line long, a pair can be displayed in a columnar +fashion called ***two-column*** format. + +``` +-- output differs -- +expected : want +actual : have +-- +``` + +When the value is longer than one line, the key and value must be +displayed on separate lines. First, the key is displayed along with the +number of lines in the value. Then, the value, indented by two spaces +for added readability, starting on the next line. This is called +***multi-line*** format. + +``` +-- command failed -- +status : 1 +output (2 lines): + Error! Something went terribly wrong! + Our engineers are panicing... \`>`;/ +-- +``` + +Sometimes, for clarity, it is a good idea to display related values also +in this format, even if they are just one line long. + +``` +-- output differs -- +expected (1 lines): + want +actual (3 lines): + have 1 + have 2 + have 3 +-- +``` + +## Language and Execution + +### Restricting invocation to specific locations + +Sometimes a helper may work properly only when called from a certain +location. Because it depends on variables to be set or some other side +effect. + +A good example is cleaning up temporary files only if the test has +succeeded. The outcome of a test is only available in `teardown`. Thus, +to avoid programming mistakes, it makes sense to restrict such a +clean-up helper to that function. + +`batslib_is_caller` checks the call stack and returns `0` if the caller +was invoked from a given function, and `1` otherwise. This function +becomes really useful with the `--indirect` option, which allows calls +through intermediate functions, e.g. the calling function may be called +from a function that was called from the given function. + +Staying with the example above, the following code snippet implements a +helper that is restricted to `teardown` or any function called +indirectly from it. + +```shell +clean_up() { + # Check caller. + if batslib_is_caller --indirect 'teardown'; then + echo "Must be called from \`teardown'" \ + | batslib_decorate 'ERROR: clean_up' \ + | fail + return $? + fi + + # Body goes here... +} +``` + +In some cases a helper may be called from multiple locations. For +example, a logging function that uses the test name, description or +number, information only available in `setup`, `@test` or `teardown`, to +distinguish entries. The following snippet implements this restriction. + +```shell +log_test() { + # Check caller. + if ! ( batslib_is_caller --indirect 'setup' \ + || batslib_is_caller --indirect "$BATS_TEST_NAME" \ + || batslib_is_caller --indirect 'teardown' ) + then + echo "Must be called from \`setup', \`@test' or \`teardown'" \ + | batslib_decorate 'ERROR: log_test' \ + | fail + return $? + fi + + # Body goes here... +} +``` + + + + +[bats]: https://github.com/sstephenson/bats +[bats-docs]: https://github.com/ztombol/bats-docs +[bats-assert]: https://github.com/ztombol/bats-assert diff --git a/test/node_modules/bats-support/load.bash b/test/node_modules/bats-support/load.bash new file mode 100644 index 000000000..0727aebae --- /dev/null +++ b/test/node_modules/bats-support/load.bash @@ -0,0 +1,3 @@ +source "$(dirname "${BASH_SOURCE[0]}")/src/output.bash" +source "$(dirname "${BASH_SOURCE[0]}")/src/error.bash" +source "$(dirname "${BASH_SOURCE[0]}")/src/lang.bash" diff --git a/test/node_modules/bats-support/package.json b/test/node_modules/bats-support/package.json new file mode 100644 index 000000000..6424e46f4 --- /dev/null +++ b/test/node_modules/bats-support/package.json @@ -0,0 +1,104 @@ +{ + "_args": [ + [ + { + "raw": "bats-support", + "scope": null, + "escapedName": "bats-support", + "name": "bats-support", + "rawSpec": "", + "spec": "latest", + "type": "tag" + }, + "/home/lpabon/git/golang/porx/src/github.com/libopenstorage/openstorage/test" + ] + ], + "_from": "bats-support@latest", + "_hasShrinkwrap": false, + "_id": "bats-support@0.3.0", + "_inCache": true, + "_location": "/bats-support", + "_nodeVersion": "11.7.0", + "_npmOperationalInternal": { + "host": "s3://npm-registry-packages", + "tmp": "tmp/bats-support_0.3.0_1548869878445_0.5883888996145032" + }, + "_npmUser": { + "name": "jasonkarns", + "email": "jason.karns@gmail.com" + }, + "_npmVersion": "6.5.0", + "_phantomChildren": {}, + "_requested": { + "raw": "bats-support", + "scope": null, + "escapedName": "bats-support", + "name": "bats-support", + "rawSpec": "", + "spec": "latest", + "type": "tag" + }, + "_requiredBy": [ + "#DEV:/", + "#USER" + ], + "_resolved": "https://registry.npmjs.org/bats-support/-/bats-support-0.3.0.tgz", + "_shasum": "a1f6b8878d2a51837911fdffa0750036f60701ef", + "_shrinkwrap": null, + "_spec": "bats-support", + "_where": "/home/lpabon/git/golang/porx/src/github.com/libopenstorage/openstorage/test", + "author": { + "name": "Zoltán Tömböl", + "url": "https://github.com/ztombol" + }, + "bugs": { + "url": "https://github.com/jasonkarns/bats-support/issues" + }, + "contributors": [ + { + "name": "Jason Karns", + "email": "jason.karns@gmail.com", + "url": "http://jason.karns.name" + } + ], + "dependencies": {}, + "description": "Supporting library for Bats test helpers", + "devDependencies": { + "bats": "^1" + }, + "directories": { + "lib": "src", + "test": "test" + }, + "dist": { + "integrity": "sha512-z+2WzXbI4OZgLnynydqH8GpI3+DcOtepO66PlK47SfEzTkiuV9hxn9eIQX+uLVFbt2Oqoc7Ky3TJ/N83lqD+cg==", + "shasum": "a1f6b8878d2a51837911fdffa0750036f60701ef", + "tarball": "https://registry.npmjs.org/bats-support/-/bats-support-0.3.0.tgz", + "fileCount": 8, + "unpackedSize": 23699, + "npm-signature": "-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.4\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJcUeD2CRA9TVsSAnZWagAAF9UP/iJatCCuX3FyQ1+P52Ky\nIeLaB30IpXphtQahti2BWJSVKtlnLx3+vWzBtdGMEXVJDPYKPD/Uao7Wdb0e\n8hWayAmGIlFELokh9Idco1huX0fRLpQa1hrigHJuQ9Slydn2E9+vvNy9HVR7\nM1lyMxOUPEctNTvmxWWJg8JYt6LewEbTJrEbpqjEdg0EaM1hQ7ORfhWoWnQZ\nnqB8kqmtNSoo7e9ntMqo6jQqOaF2QK2URD74ryVmpkeuB5L9XC+l2yafld0B\niQCHIRcU2Y/qR0DtV/9TzKDZZ+DkW1w1gMp1L6wggReMonUK6id9n91s5Mmk\nFvgz5hKfYt7JjuXWnHk4SmFyKrldkth59nGH9Xq3GGnPq7HpZip9mBiAnToa\nH7TqS/lwzxBQ5BS5FPKWcAYXcze4PPwvu7bfanl1gLDOWnvsSqcVuJ7xxjDL\nLG0nKmspO3tGzx7XMo5plyVbR1t3lxKL4bgoH8YISfUgv9/7+h3/E9dUY8G9\nnyKyWbbCu29YeZgpyZTClrPIv+7mrcDBfquWqJL/S1mc1cnlFFjVef2A7BbE\nsLHA2TT6YahZqafpJXX1Yb6F/HlCE/LsqvwJ8wXO3OTXTDZq8SwRad/r4h2U\nYbr5SH8puEmpshv+eBAkMvkQEghDx/xwUqJTTDqFv4zbv0UM/EnIlA4pkz32\nd39+\r\n=BZJ1\r\n-----END PGP SIGNATURE-----\r\n" + }, + "gitHead": "d140a65044b2d6810381935ae7f0c94c7023c8c3", + "homepage": "https://github.com/jasonkarns/bats-support", + "license": "CC0-1.0", + "maintainers": [ + { + "name": "jasonkarns", + "email": "jason.karns@gmail.com" + } + ], + "name": "bats-support", + "optionalDependencies": {}, + "peerDependencies": { + "bats": "0.4 || ^1" + }, + "readme": "ERROR: No README data found!", + "repository": { + "type": "git", + "url": "git+https://github.com/jasonkarns/bats-support.git" + }, + "scripts": { + "test": "bats ${CI+-t} test" + }, + "version": "0.3.0" +} diff --git a/test/node_modules/bats-support/src/error.bash b/test/node_modules/bats-support/src/error.bash new file mode 100644 index 000000000..e5d97912e --- /dev/null +++ b/test/node_modules/bats-support/src/error.bash @@ -0,0 +1,41 @@ +# +# bats-support - Supporting library for Bats test helpers +# +# Written in 2016 by Zoltan Tombol +# +# To the extent possible under law, the author(s) have dedicated all +# copyright and related and neighboring rights to this software to the +# public domain worldwide. This software is distributed without any +# warranty. +# +# You should have received a copy of the CC0 Public Domain Dedication +# along with this software. If not, see +# . +# + +# +# error.bash +# ---------- +# +# Functions implementing error reporting. Used by public helper +# functions or test suits directly. +# + +# Fail and display a message. When no parameters are specified, the +# message is read from the standard input. Other functions use this to +# report failure. +# +# Globals: +# none +# Arguments: +# $@ - [=STDIN] message +# Returns: +# 1 - always +# Inputs: +# STDIN - [=$@] message +# Outputs: +# STDERR - message +fail() { + (( $# == 0 )) && batslib_err || batslib_err "$@" + return 1 +} diff --git a/test/node_modules/bats-support/src/lang.bash b/test/node_modules/bats-support/src/lang.bash new file mode 100644 index 000000000..c57e299c6 --- /dev/null +++ b/test/node_modules/bats-support/src/lang.bash @@ -0,0 +1,73 @@ +# +# bats-util - Various auxiliary functions for Bats +# +# Written in 2016 by Zoltan Tombol +# +# To the extent possible under law, the author(s) have dedicated all +# copyright and related and neighboring rights to this software to the +# public domain worldwide. This software is distributed without any +# warranty. +# +# You should have received a copy of the CC0 Public Domain Dedication +# along with this software. If not, see +# . +# + +# +# lang.bash +# --------- +# +# Bash language and execution related functions. Used by public helper +# functions. +# + +# Check whether the calling function was called from a given function. +# +# By default, direct invocation is checked. The function succeeds if the +# calling function was called directly from the given function. In other +# words, if the given function is the next element on the call stack. +# +# When `--indirect' is specified, indirect invocation is checked. The +# function succeeds if the calling function was called from the given +# function with any number of intermediate calls. In other words, if the +# given function can be found somewhere on the call stack. +# +# Direct invocation is a form of indirect invocation with zero +# intermediate calls. +# +# Globals: +# FUNCNAME +# Options: +# -i, --indirect - check indirect invocation +# Arguments: +# $1 - calling function's name +# Returns: +# 0 - current function was called from the given function +# 1 - otherwise +batslib_is_caller() { + local -i is_mode_direct=1 + + # Handle options. + while (( $# > 0 )); do + case "$1" in + -i|--indirect) is_mode_direct=0; shift ;; + --) shift; break ;; + *) break ;; + esac + done + + # Arguments. + local -r func="$1" + + # Check call stack. + if (( is_mode_direct )); then + [[ $func == "${FUNCNAME[2]}" ]] && return 0 + else + local -i depth + for (( depth=2; depth<${#FUNCNAME[@]}; ++depth )); do + [[ $func == "${FUNCNAME[$depth]}" ]] && return 0 + done + fi + + return 1 +} diff --git a/test/node_modules/bats-support/src/output.bash b/test/node_modules/bats-support/src/output.bash new file mode 100644 index 000000000..c6cf6a6b8 --- /dev/null +++ b/test/node_modules/bats-support/src/output.bash @@ -0,0 +1,279 @@ +# +# bats-support - Supporting library for Bats test helpers +# +# Written in 2016 by Zoltan Tombol +# +# To the extent possible under law, the author(s) have dedicated all +# copyright and related and neighboring rights to this software to the +# public domain worldwide. This software is distributed without any +# warranty. +# +# You should have received a copy of the CC0 Public Domain Dedication +# along with this software. If not, see +# . +# + +# +# output.bash +# ----------- +# +# Private functions implementing output formatting. Used by public +# helper functions. +# + +# Print a message to the standard error. When no parameters are +# specified, the message is read from the standard input. +# +# Globals: +# none +# Arguments: +# $@ - [=STDIN] message +# Returns: +# none +# Inputs: +# STDIN - [=$@] message +# Outputs: +# STDERR - message +batslib_err() { + { if (( $# > 0 )); then + echo "$@" + else + cat - + fi + } >&2 +} + +# Count the number of lines in the given string. +# +# TODO(ztombol): Fix tests and remove this note after #93 is resolved! +# NOTE: Due to a bug in Bats, `batslib_count_lines "$output"' does not +# give the same result as `${#lines[@]}' when the output contains +# empty lines. +# See PR #93 (https://github.com/sstephenson/bats/pull/93). +# +# Globals: +# none +# Arguments: +# $1 - string +# Returns: +# none +# Outputs: +# STDOUT - number of lines +batslib_count_lines() { + local -i n_lines=0 + local line + while IFS='' read -r line || [[ -n $line ]]; do + (( ++n_lines )) + done < <(printf '%s' "$1") + echo "$n_lines" +} + +# Determine whether all strings are single-line. +# +# Globals: +# none +# Arguments: +# $@ - strings +# Returns: +# 0 - all strings are single-line +# 1 - otherwise +batslib_is_single_line() { + for string in "$@"; do + (( $(batslib_count_lines "$string") > 1 )) && return 1 + done + return 0 +} + +# Determine the length of the longest key that has a single-line value. +# +# This function is useful in determining the correct width of the key +# column in two-column format when some keys may have multi-line values +# and thus should be excluded. +# +# Globals: +# none +# Arguments: +# $odd - key +# $even - value of the previous key +# Returns: +# none +# Outputs: +# STDOUT - length of longest key +batslib_get_max_single_line_key_width() { + local -i max_len=-1 + while (( $# != 0 )); do + local -i key_len="${#1}" + batslib_is_single_line "$2" && (( key_len > max_len )) && max_len="$key_len" + shift 2 + done + echo "$max_len" +} + +# Print key-value pairs in two-column format. +# +# Keys are displayed in the first column, and their corresponding values +# in the second. To evenly line up values, the key column is fixed-width +# and its width is specified with the first parameter (possibly computed +# using `batslib_get_max_single_line_key_width'). +# +# Globals: +# none +# Arguments: +# $1 - width of key column +# $even - key +# $odd - value of the previous key +# Returns: +# none +# Outputs: +# STDOUT - formatted key-value pairs +batslib_print_kv_single() { + local -ir col_width="$1"; shift + while (( $# != 0 )); do + printf '%-*s : %s\n' "$col_width" "$1" "$2" + shift 2 + done +} + +# Print key-value pairs in multi-line format. +# +# The key is displayed first with the number of lines of its +# corresponding value in parenthesis. Next, starting on the next line, +# the value is displayed. For better readability, it is recommended to +# indent values using `batslib_prefix'. +# +# Globals: +# none +# Arguments: +# $odd - key +# $even - value of the previous key +# Returns: +# none +# Outputs: +# STDOUT - formatted key-value pairs +batslib_print_kv_multi() { + while (( $# != 0 )); do + printf '%s (%d lines):\n' "$1" "$( batslib_count_lines "$2" )" + printf '%s\n' "$2" + shift 2 + done +} + +# Print all key-value pairs in either two-column or multi-line format +# depending on whether all values are single-line. +# +# If all values are single-line, print all pairs in two-column format +# with the specified key column width (identical to using +# `batslib_print_kv_single'). +# +# Otherwise, print all pairs in multi-line format after indenting values +# with two spaces for readability (identical to using `batslib_prefix' +# and `batslib_print_kv_multi') +# +# Globals: +# none +# Arguments: +# $1 - width of key column (for two-column format) +# $even - key +# $odd - value of the previous key +# Returns: +# none +# Outputs: +# STDOUT - formatted key-value pairs +batslib_print_kv_single_or_multi() { + local -ir width="$1"; shift + local -a pairs=( "$@" ) + + local -a values=() + local -i i + for (( i=1; i < ${#pairs[@]}; i+=2 )); do + values+=( "${pairs[$i]}" ) + done + + if batslib_is_single_line "${values[@]}"; then + batslib_print_kv_single "$width" "${pairs[@]}" + else + local -i i + for (( i=1; i < ${#pairs[@]}; i+=2 )); do + pairs[$i]="$( batslib_prefix < <(printf '%s' "${pairs[$i]}") )" + done + batslib_print_kv_multi "${pairs[@]}" + fi +} + +# Prefix each line read from the standard input with the given string. +# +# Globals: +# none +# Arguments: +# $1 - [= ] prefix string +# Returns: +# none +# Inputs: +# STDIN - lines +# Outputs: +# STDOUT - prefixed lines +batslib_prefix() { + local -r prefix="${1:- }" + local line + while IFS='' read -r line || [[ -n $line ]]; do + printf '%s%s\n' "$prefix" "$line" + done +} + +# Mark select lines of the text read from the standard input by +# overwriting their beginning with the given string. +# +# Usually the input is indented by a few spaces using `batslib_prefix' +# first. +# +# Globals: +# none +# Arguments: +# $1 - marking string +# $@ - indices (zero-based) of lines to mark +# Returns: +# none +# Inputs: +# STDIN - lines +# Outputs: +# STDOUT - lines after marking +batslib_mark() { + local -r symbol="$1"; shift + # Sort line numbers. + set -- $( sort -nu <<< "$( printf '%d\n' "$@" )" ) + + local line + local -i idx=0 + while IFS='' read -r line || [[ -n $line ]]; do + if (( ${1:--1} == idx )); then + printf '%s\n' "${symbol}${line:${#symbol}}" + shift + else + printf '%s\n' "$line" + fi + (( ++idx )) + done +} + +# Enclose the input text in header and footer lines. +# +# The header contains the given string as title. The output is preceded +# and followed by an additional newline to make it stand out more. +# +# Globals: +# none +# Arguments: +# $1 - title +# Returns: +# none +# Inputs: +# STDIN - text +# Outputs: +# STDOUT - decorated text +batslib_decorate() { + echo + echo "-- $1 --" + cat - + echo '--' + echo +} diff --git a/test/node_modules/bats/LICENSE.md b/test/node_modules/bats/LICENSE.md new file mode 100644 index 000000000..0c7429978 --- /dev/null +++ b/test/node_modules/bats/LICENSE.md @@ -0,0 +1,53 @@ +Copyright (c) 2017 bats-core contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +--- + +* [bats-core] is a continuation of [bats]. Copyright for portions of the + bats-core project are held by Sam Stephenson, 2014 as part of the project + [bats], licensed under MIT: + +Copyright (c) 2014 Sam Stephenson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +For details, please see the [version control history][commits]. + +[bats-core]: https://github.com/bats-core/bats-core +[bats]:https://github.com/sstephenson/bats +[commits]:https://github.com/bats-core/bats-core/commits/master diff --git a/test/node_modules/bats/README.md b/test/node_modules/bats/README.md new file mode 100644 index 000000000..a52ca13b3 --- /dev/null +++ b/test/node_modules/bats/README.md @@ -0,0 +1,604 @@ +# Bats-core: Bash Automated Testing System (2018) + +[![Latest release](https://img.shields.io/github/release/bats-core/bats-core.svg)](https://github.com/bats-core/bats-core/releases/latest) +[![npm package](https://img.shields.io/npm/v/bats.svg)](https://www.npmjs.com/package/bats) +[![License](https://img.shields.io/github/license/bats-core/bats-core.svg)](https://github.com/bats-core/bats-core/blob/master/LICENSE.md) +[![Continuous integration status for Linux and macOS](https://img.shields.io/travis/bats-core/bats-core/master.svg?label=travis%20build)](https://travis-ci.org/bats-core/bats-core) +[![Continuous integration status for Windows](https://img.shields.io/appveyor/ci/bats-core/bats-core/master.svg?label=appveyor%20build)](https://ci.appveyor.com/project/bats-core/bats-core) + +[![Join the chat in bats-core/bats-core on gitter](https://badges.gitter.im/bats-core/bats-core.svg)][gitter] + +Bats is a [TAP][]-compliant testing framework for Bash. It provides a simple +way to verify that the UNIX programs you write behave as expected. + +[TAP]: https://testanything.org + +A Bats test file is a Bash script with special syntax for defining test cases. +Under the hood, each test case is just a function with a description. + +```bash +#!/usr/bin/env bats + +@test "addition using bc" { + result="$(echo 2+2 | bc)" + [ "$result" -eq 4 ] +} + +@test "addition using dc" { + result="$(echo 2 2+p | dc)" + [ "$result" -eq 4 ] +} +``` + +Bats is most useful when testing software written in Bash, but you can use it to +test any UNIX program. + +Test cases consist of standard shell commands. Bats makes use of Bash's +`errexit` (`set -e`) option when running test cases. If every command in the +test case exits with a `0` status code (success), the test passes. In this way, +each line is an assertion of truth. + +**Tuesday, September 19, 2017:** This is a mirrored fork of [Bats][bats-orig] at +commit [0360811][]. It was created via `git clone --bare` and `git push +--mirror`. See the [Background](#background) section below for more information. + +[bats-orig]: https://github.com/sstephenson/bats +[0360811]: https://github.com/sstephenson/bats/commit/03608115df2071fff4eaaff1605768c275e5f81f + +## Table of contents + +- [Installation](#installation) + - [Supported Bash versions](#supported-bash-versions) + - [Homebrew](#homebrew) + - [npm](#npm) + - [Installing Bats from source](#installing-bats-from-source) + - [Running Bats in Docker](#running-bats-in-docker) + - [Building a Docker image](#building-a-docker-image) +- [Usage](#usage) +- [Writing tests](#writing-tests) + - [`run`: Test other commands](#run-test-other-commands) + - [`load`: Share common code](#load-share-common-code) + - [`skip`: Easily skip tests](#skip-easily-skip-tests) + - [`setup` and `teardown`: Pre- and post-test hooks](#setup-and-teardown-pre--and-post-test-hooks) + - [Code outside of test cases](#code-outside-of-test-cases) + - [File descriptor 3 (read this if Bats hangs)](#file-descriptor-3-read-this-if-bats-hangs) + - [Printing to the terminal](#printing-to-the-terminal) + - [Special variables](#special-variables) +- [Support](#support) +- [Version history](#version-history) +- [Background](#background) + - [Why was this fork created?](#why-was-this-fork-created) + - [What's the plan and why?](#whats-the-plan-and-why) + - [Contact us](#contact-us) +- [Copyright](#copyright) + +## Installation + +### Supported Bash versions + +The following is a list of Bash versions that are currently supported by Bats. +This list is composed of platforms that Bats has been tested on and is known to +work on without issues. + +- Bash versions: + - Everything from `3.2.57(1)` and higher (macOS's highest version) + +- Operating systems: + - Arch Linux + - Alpine Linux + - Ubuntu Linux + - FreeBSD `10.x` and `11.x` + - macOS + - Windows 10 + +- Latest version for the following Windows platforms: + - Git for Windows Bash (MSYS2 based) + - Windows Subsystem for Linux + - MSYS2 + - Cygwin + +### Homebrew + +On macOS, you can install [Homebrew](https://brew.sh/) if you haven't already, +then run: + +```bash +$ brew install bats-core +``` + +### npm + +You can install the [Bats npm package](https://www.npmjs.com/package/bats) via: + +``` +# To install globally: +$ npm install -g bats + +# To install into your project and save it as one of the "devDependencies" in +# your package.json: +$ npm install --save-dev bats +``` + +### Installing Bats from source + +Check out a copy of the Bats repository. Then, either add the Bats `bin` +directory to your `$PATH`, or run the provided `install.sh` command with the +location to the prefix in which you want to install Bats. For example, to +install Bats into `/usr/local`, + + $ git clone https://github.com/bats-core/bats-core.git + $ cd bats-core + $ ./install.sh /usr/local + +Note that you may need to run `install.sh` with `sudo` if you do not have +permission to write to the installation prefix. + +### Running Bats in Docker + +There is an official image on the Docker Hub: + + $ docker run -it bats/bats:latest --version + +#### Building a Docker image + +Check out a copy of the Bats repository, then build a container image: + + $ git clone https://github.com/bats-core/bats-core.git + $ cd bats-core + $ docker build --tag bats/bats:latest . + +This creates a local Docker image called `bats/bats:latest` based on [Alpine +Linux](https://github.com/gliderlabs/docker-alpine/blob/master/docs/usage.md) +(to push to private registries, tag it with another organisation, e.g. +`my-org/bats:latest`). + +To run Bats' internal test suite (which is in the container image at +`/opt/bats/test`): + + $ docker run -it bats/bats:latest /opt/bats/test + +To run a test suite from your local machine, mount in a volume and direct Bats +to its path inside the container: + + $ docker run -it -v "$(pwd):/code" bats/bats:latest /code/test + +This is a minimal Docker image. If more tools are required this can be used as a +base image in a Dockerfile using `FROM `. In the future there may +be images based on Debian, and/or with more tools installed (`curl` and `openssl`, +for example). If you require a specific configuration please search and +1 an +issue or [raise a new issue](https://github.com/bats-core/bats-core/issues). + +Further usage examples are in [the wiki](https://github.com/bats-core/bats-core/wiki/Docker-Usage-Examples). + +## Usage + +Bats comes with two manual pages. After installation you can view them with `man +1 bats` (usage manual) and `man 7 bats` (writing test files manual). Also, you +can view the available command line options that Bats supports by calling Bats +with the `-h` or `--help` options. These are the options that Bats currently +supports: + +``` +Bats x.y.z +Usage: bats [-c] [-r] [-p | -t] [ ...] + + is the path to a Bats test file, or the path to a directory + containing Bats test files. + + -c, --count Count the number of test cases without running any tests + -h, --help Display this help message + -p, --pretty Show results in pretty format (default for terminals) + -r, --recursive Include tests in subdirectories + -t, --tap Show results in TAP format + -v, --version Display the version number +``` + +To run your tests, invoke the `bats` interpreter with one or more paths to test +files ending with the `.bats` extension, or paths to directories containing test +files. (`bats` will not only discover `.bats` files at the top level of each +directory; it will not recurse.) + +Test cases from each file are run sequentially and in isolation. If all the test +cases pass, `bats` exits with a `0` status code. If there are any failures, +`bats` exits with a `1` status code. + +When you run Bats from a terminal, you'll see output as each test is performed, +with a check-mark next to the test's name if it passes or an "X" if it fails. + + $ bats addition.bats + ✓ addition using bc + ✓ addition using dc + + 2 tests, 0 failures + +If Bats is not connected to a terminal—in other words, if you run it from a +continuous integration system, or redirect its output to a file—the results are +displayed in human-readable, machine-parsable [TAP format][TAP]. + +You can force TAP output from a terminal by invoking Bats with the `--tap` +option. + + $ bats --tap addition.bats + 1..2 + ok 1 addition using bc + ok 2 addition using dc + +## Writing tests + +Each Bats test file is evaluated _n+1_ times, where _n_ is the number of +test cases in the file. The first run counts the number of test cases, +then iterates over the test cases and executes each one in its own +process. + +For more details about how Bats evaluates test files, see [Bats Evaluation +Process][bats-eval] on the wiki. + +[bats-eval]: https://github.com/bats-core/bats-core/wiki/Bats-Evaluation-Process + +### `run`: Test other commands + +Many Bats tests need to run a command and then make assertions about its exit +status and output. Bats includes a `run` helper that invokes its arguments as a +command, saves the exit status and output into special global variables, and +then returns with a `0` status code so you can continue to make assertions in +your test case. + +For example, let's say you're testing that the `foo` command, when passed a +nonexistent filename, exits with a `1` status code and prints an error message. + +```bash +@test "invoking foo with a nonexistent file prints an error" { + run foo nonexistent_filename + [ "$status" -eq 1 ] + [ "$output" = "foo: no such file 'nonexistent_filename'" ] +} +``` + +The `$status` variable contains the status code of the command, and the +`$output` variable contains the combined contents of the command's standard +output and standard error streams. + +A third special variable, the `$lines` array, is available for easily accessing +individual lines of output. For example, if you want to test that invoking `foo` +without any arguments prints usage information on the first line: + +```bash +@test "invoking foo without arguments prints usage" { + run foo + [ "$status" -eq 1 ] + [ "${lines[0]}" = "usage: foo " ] +} +``` + +### `load`: Share common code + +You may want to share common code across multiple test files. Bats includes a +convenient `load` command for sourcing a Bash source file relative to the +location of the current test file. For example, if you have a Bats test in +`test/foo.bats`, the command + +```bash +load test_helper +``` + +will source the script `test/test_helper.bash` in your test file. This can be +useful for sharing functions to set up your environment or load fixtures. + +### `skip`: Easily skip tests + +Tests can be skipped by using the `skip` command at the point in a test you wish +to skip. + +```bash +@test "A test I don't want to execute for now" { + skip + run foo + [ "$status" -eq 0 ] +} +``` + +Optionally, you may include a reason for skipping: + +```bash +@test "A test I don't want to execute for now" { + skip "This command will return zero soon, but not now" + run foo + [ "$status" -eq 0 ] +} +``` + +Or you can skip conditionally: + +```bash +@test "A test which should run" { + if [ foo != bar ]; then + skip "foo isn't bar" + fi + + run foo + [ "$status" -eq 0 ] +} +``` + +### `setup` and `teardown`: Pre- and post-test hooks + +You can define special `setup` and `teardown` functions, which run before and +after each test case, respectively. Use these to load fixtures, set up your +environment, and clean up when you're done. + +### Code outside of test cases + +You can include code in your test file outside of `@test` functions. For +example, this may be useful if you want to check for dependencies and fail +immediately if they're not present. However, any output that you print in code +outside of `@test`, `setup` or `teardown` functions must be redirected to +`stderr` (`>&2`). Otherwise, the output may cause Bats to fail by polluting the +TAP stream on `stdout`. + +### File descriptor 3 (read this if Bats hangs) + +Bats makes a separation between output from the code under test and output that +forms the TAP stream (which is produced by Bats internals). This is done in +order to produce TAP-compliant output. In the [Printing to the +terminal](#printing-to-the-terminal) section, there are details on how to use +file descriptor 3 to print custom text properly. + +A side effect of using file descriptor 3 is that, under some circumstances, it +can cause Bats to block and execution to seem dead without reason. This can +happen if a child process is spawned in the background from a test. In this +case, the child process will inherit file descriptor 3. Bats, as the parent +process, will wait for the file descriptor to be closed by the child process +before continuing execution. If the child process takes a lot of time to +complete (eg if the child process is a `sleep 100` command or a background +service that will run indefinitely), Bats will be similarly blocked for the same +amount of time. + +**To prevent this from happening, close FD 3 explicitly when running any command +that may launch long-running child processes**, e.g. `command_name 3>- &`. + +### Printing to the terminal + +Bats produces output compliant with [version 12 of the TAP protocol][TAP]. The +produced TAP stream is by default piped to a pretty formatter for human +consumption, but if Bats is called with the `-t` flag, then the TAP stream is +directly printed to the console. + +This has implications if you try to print custom text to the terminal. As +mentioned in [File descriptor 3](#file-descriptor-3), bats provides a special +file descriptor, `&3`, that you should use to print your custom text. Here are +some detailed guidelines to refer to: + +- Printing **from within a test function**: + - To have text printed from within a test function you need to redirect the + output to file descriptor 3, eg `echo 'text' >&3`. This output will become + part of the TAP stream. You are encouraged to prepend text printed this way + with a hash (eg `echo '# text' >&3`) in order to produce 100% TAP compliant + output. Otherwise, depending on the 3rd-party tools you use to analyze the + TAP stream, you can encounter unexpected behavior or errors. + + - The pretty formatter that Bats uses by default to process the TAP stream + will filter out and not print text output to file descriptor 3. + + - Text that is output directly to stdout or stderr (file descriptor 1 or 2), + ie `echo 'text'` is considered part of the test function output and is + printed only on test failures for diagnostic purposes, regardless of the + formatter used (TAP or pretty). + +- Printing **from within the `setup` or `teardown` functions**: The same hold + true as for printing with test functions. + +- Printing **outside test or `setup`/`teardown` functions**: + - Regardless of where text is redirected to (stdout, stderr or file descriptor + 3) text is immediately visible in the terminal. + + - Text printed in such a way, will disable pretty formatting. Also, it will + make output non-compliant with the TAP spec. The reason for this is that + each test file is evaluated n+1 times (as metioned + [earlier](#writing-tests)). The first run will cause such output to be + produced before the [_plan line_][tap-plan] is printed, contrary to the spec + that requires the _plan line_ to be either the first or the last line of the + output. + + - Due to internal pipes/redirects, output to stderr is always printed first. + +[tap-plan]: https://testanything.org/tap-specification.html#the-plan + +### Special variables + +There are several global variables you can use to introspect on Bats tests: + +* `$BATS_TEST_FILENAME` is the fully expanded path to the Bats test file. +* `$BATS_TEST_DIRNAME` is the directory in which the Bats test file is located. +* `$BATS_TEST_NAMES` is an array of function names for each test case. +* `$BATS_TEST_NAME` is the name of the function containing the current test + case. +* `$BATS_TEST_DESCRIPTION` is the description of the current test case. +* `$BATS_TEST_NUMBER` is the (1-based) index of the current test case in the + test file. +* `$BATS_TMPDIR` is the location to a directory that may be used to store + temporary files. + +## Support + +The Bats source code repository is [hosted on +GitHub](https://github.com/bats-core/bats-core). There you can file bugs on the +issue tracker or submit tested pull requests for review. + +For real-world examples from open-source projects using Bats, see [Projects +Using Bats](https://github.com/bats-core/bats-core/wiki/Projects-Using-Bats) on +the wiki. + +To learn how to set up your editor for Bats syntax highlighting, see [Syntax +Highlighting](https://github.com/bats-core/bats-core/wiki/Syntax-Highlighting) +on the wiki. + +## Version history + +Bats is [SemVer compliant](https://semver.org/). + +*1.1.0* (July 8, 2018) + +This is the first release with new features relative to the original Bats 0.4.0. + +Added: +* The `-r, --recursive` flag to scan directory arguments recursively for + `*.bats` files (#109) +* The `contrib/rpm/bats.spec` file to build RPMs (#111) + +Changed: +* Travis exercises latest versions of Bash from 3.2 through 4.4 (#116, #117) +* Error output highlights invalid command line options (#45, #46, #118) +* Replaced `echo` with `printf` (#120) + +Fixed: +* Fixed `BATS_ERROR_STATUS` getting lost when `bats_error_trap` fired multiple + times under Bash 4.2.x (#110) +* Updated `bin/bats` symlink resolution, handling the case on CentOS where + `/bin` is a symlink to `/usr/bin` (#113, #115) + +*1.0.2* (June 18, 2018) + +* Fixed sstephenson/bats#240, whereby `skip` messages containing parentheses + were truncated (#48) +* Doc improvements: + * Docker usage (#94) + * Better README badges (#101) + * Better installation instructions (#102, #104) +* Packaging/installation improvements: + * package.json update (#100) + * Moved `libexec/` files to `libexec/bats-core/`, improved `install.sh` (#105) + +*1.0.1* (June 9, 2018) + +* Fixed a `BATS_CWD` bug introduced in #91 whereby it was set to the parent of + `PWD`, when it should've been set to `PWD` itself (#98). This caused file + names in stack traces to contain the basename of `PWD` as a prefix, when the + names should've been purely relative to `PWD`. +* Ensure the last line of test output prints when it doesn't end with a newline + (#99). This was a quasi-bug introduced by replacing `sed` with `while` in #88. + +*1.0.0* (June 8, 2018) + +`1.0.0` generally preserves compatibility with `0.4.0`, but with some Bash +compatibility improvements and a massive performance boost. In other words: + +- all existing tests should remain compatible +- tests that might've failed or exhibited unexpected behavior on earlier + versions of Bash should now also pass or behave as expected + +Changes: + +* Added support for Docker. +* Added support for test scripts that have the [unofficial strict + mode](http://redsymbol.net/articles/unofficial-bash-strict-mode/) enabled. +* Improved stability on Windows and macOS platforms. +* Massive performance improvements, especially on Windows (#8) +* Workarounds for inconsistent behavior between Bash versions (#82) +* Workaround for preserving stack info after calling an exported function under + Bash < 4.4 (#87) +* Fixed TAP compliance for skipped tests +* Added support for tabs in test names. +* `bin/bats` and `install.sh` now work reliably on Windows (#91) + +*0.4.0* (August 13, 2014) + +* Improved the display of failing test cases. Bats now shows the source code of + failing test lines, along with full stack traces including function names, + filenames, and line numbers. +* Improved the display of the pretty-printed test summary line to include the + number of skipped tests, if any. +* Improved the speed of the preprocessor, dramatically shortening test and suite + startup times. +* Added support for absolute pathnames to the `load` helper. +* Added support for single-line `@test` definitions. +* Added bats(1) and bats(7) manual pages. +* Modified the `bats` command to default to TAP output when the `$CI` variable + is set, to better support environments such as Travis CI. + +*0.3.1* (October 28, 2013) + +* Fixed an incompatibility with the pretty formatter in certain environments + such as tmux. +* Fixed a bug where the pretty formatter would crash if the first line of a test + file's output was invalid TAP. + +*0.3.0* (October 21, 2013) + +* Improved formatting for tests run from a terminal. Failing tests are now + colored in red, and the total number of failing tests is displayed at the end + of the test run. When Bats is not connected to a terminal (e.g. in CI runs), + or when invoked with the `--tap` flag, output is displayed in standard TAP + format. +* Added the ability to skip tests using the `skip` command. +* Added a message to failing test case output indicating the file and line + number of the statement that caused the test to fail. +* Added "ad-hoc" test suite support. You can now invoke `bats` with multiple + filename or directory arguments to run all the specified tests in aggregate. +* Added support for test files with Windows line endings. +* Fixed regular expression warnings from certain versions of Bash. +* Fixed a bug running tests containing lines that begin with `-e`. + +*0.2.0* (November 16, 2012) + +* Added test suite support. The `bats` command accepts a directory name + containing multiple test files to be run in aggregate. +* Added the ability to count the number of test cases in a file or suite by + passing the `-c` flag to `bats`. +* Preprocessed sources are cached between test case runs in the same file for + better performance. + +*0.1.0* (December 30, 2011) + +* Initial public release. + +--- + +## Background + +### Why was this fork created? + +The original Bats repository needed new maintainers, and has not been actively +maintained since 2013. While there were volunteers for maintainers, attempts to +organize issues, and outstanding PRs, the lack of write-access to the repo +hindered progress severely. + +### What's the plan and why? + +The rough plan, originally [outlined +here](https://github.com/sstephenson/bats/issues/150#issuecomment-323845404) is +to create a new, mirrored mainline (this repo!). An excerpt: + +> **1. Roadmap 1.0:** +> There are already existing high-quality PRs, and often-requested features and +> issues, especially here at +> [#196](https://github.com/sstephenson/bats/issues/196). Leverage these and +> **consolidate into a single roadmap**. +> +> **2. Create or choose a fork or *mirror* of this repo to use as the new +> mainline:** +> Repoint existing PRs (whichever ones are possible) to the new mainline, get +> that repo to a stable 1.0. IMO we should create an organization and grant 2-3 +> people admin and write access. + +Doing it this way accomplishes a number of things: + +1. Removes the dependency on the original maintainer +1. Enables collaboration and contribution flow again +1. Allows the possibility of merging back to original, or merging from original + if or when the need arises +1. Prevents lock-out by giving administrative access to more than one person, + increases transferability + +### Contact us + +- We are `#bats` on freenode + +## Copyright + +© 2018 bats-core organization + +© 2014 Sam Stephenson + +Bats is released under an MIT-style license; see `LICENSE.md` for details. + +[gitter]: https://gitter.im/bats-core/bats-core?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge diff --git a/test/node_modules/bats/bin/bats b/test/node_modules/bats/bin/bats new file mode 100755 index 000000000..a852306c9 --- /dev/null +++ b/test/node_modules/bats/bin/bats @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +set -e + +export BATS_READLINK='true' +if command -v 'greadlink' >/dev/null; then + BATS_READLINK='greadlink' +elif command -v 'readlink' >/dev/null; then + BATS_READLINK='readlink' +fi + +bats_resolve_link() { + if ! "$BATS_READLINK" "$1"; then + return 0 + fi +} + +bats_resolve_absolute_root_dir() { + local cwd="$PWD" + local path="$1" + local result="$2" + local target_dir + local target_name + local original_shell_options="$-" + + # Resolve the parent directory, e.g. /bin => /usr/bin on CentOS (#113). + set -P + + while true; do + target_dir="${path%/*}" + target_name="${path##*/}" + + if [[ "$target_dir" != "$path" ]]; then + cd "$target_dir" + fi + + if [[ -L "$target_name" ]]; then + path="$(bats_resolve_link "$target_name")" + else + printf -v "$result" -- '%s' "${PWD%/*}" + set +P "-$original_shell_options" + cd "$cwd" + return + fi + done +} + +export BATS_ROOT +bats_resolve_absolute_root_dir "$0" 'BATS_ROOT' +exec "$BATS_ROOT/libexec/bats-core/bats" "$@" diff --git a/test/node_modules/bats/libexec/bats-core/bats b/test/node_modules/bats/libexec/bats-core/bats new file mode 100755 index 000000000..256af43e5 --- /dev/null +++ b/test/node_modules/bats/libexec/bats-core/bats @@ -0,0 +1,158 @@ +#!/usr/bin/env bash +set -e + +version() { + printf 'Bats 1.1.0\n' +} + +usage() { + version + printf 'Usage: bats [-c] [-r] [-p | -t] [ ...]\n' +} + +abort() { + printf 'Error: %s\n' "$1" >&2 + usage >&2 + exit 1 +} + +help() { + local line + usage + while read -r line; do + printf '%s\n' "$line" + done < is the path to a Bats test file, or the path to a directory + containing Bats test files. + + -c, --count Count the number of test cases without running any tests + -h, --help Display this help message + -p, --pretty Show results in pretty format (default for terminals) + -r, --recursive Include tests in subdirectories + -t, --tap Show results in TAP format + -v, --version Display the version number + + For more information, see https://github.com/bats-core/bats-core + +END_OF_HELP_TEXT +} + +expand_path() { + local path="${1%/}" + local dirname="${path%/*}" + local result="$2" + + if [[ "$dirname" == "$path" ]]; then + dirname="$PWD" + else + cd "$dirname" + dirname="$PWD" + cd "$OLDPWD" + fi + printf -v "$result" '%s/%s' "$dirname" "${path##*/}" +} + +export BATS_CWD="$PWD" +export BATS_TEST_PATTERN="^[[:blank:]]*@test[[:blank:]]+(.*[^[:blank:]])[[:blank:]]+\{(.*)\$" +export PATH="$BATS_ROOT/libexec/bats-core:$PATH" + +options=() +arguments=() +for arg in "$@"; do + if [[ "${arg:0:1}" = "-" ]]; then + if [[ "${arg:1:1}" = "-" ]]; then + options[${#options[*]}]="${arg:2}" + else + index=1 + while option="${arg:$index:1}"; do + if [[ -z "$option" ]]; then + break + fi + options[${#options[*]}]="$option" + let index+=1 + done + fi + else + arguments[${#arguments[*]}]="$arg" + fi +done + +unset count_flag pretty recursive +count_flag='' +pretty='' +recursive='' +if [[ -z "${CI:-}" && -t 0 && -t 1 ]]; then + pretty=1 +fi + +if [[ "${#options[@]}" -ne 0 ]]; then + for option in "${options[@]}"; do + case "$option" in + "h" | "help" ) + help + exit 0 + ;; + "v" | "version" ) + version + exit 0 + ;; + "c" | "count" ) + count_flag="-c" + ;; + "r" | "recursive" ) + recursive=1 + ;; + "t" | "tap" ) + pretty="" + ;; + "p" | "pretty" ) + pretty=1 + ;; + * ) + abort "Bad command line option '-$option'" + ;; + esac + done +fi + +if [[ "${#arguments[@]}" -eq 0 ]]; then + abort 'Must specify at least one ' +fi + +filenames=() +for filename in "${arguments[@]}"; do + expand_path "$filename" 'filename' + + if [[ -d "$filename" ]]; then + shopt -s nullglob + if [[ "$recursive" -eq 1 ]]; then + while IFS= read -r -d $'\0' file; do + filenames["${#filenames[@]}"]="$file" + done < <(find "$filename" -type f -name "*.bats" -print0 | sort -z) + else + for suite_filename in "$filename"/*.bats; do + filenames["${#filenames[@]}"]="$suite_filename" + done + fi + shopt -u nullglob + else + filenames["${#filenames[@]}"]="$filename" + fi +done + +if [[ "${#filenames[@]}" -eq 1 ]]; then + command="bats-exec-test" +else + command="bats-exec-suite" +fi + +set -o pipefail execfail +if [[ -z "$pretty" ]]; then + exec "$command" $count_flag "${filenames[@]}" +else + extended_syntax_flag="-x" + formatter="bats-format-tap-stream" + exec "$command" $count_flag $extended_syntax_flag "${filenames[@]}" | + "$formatter" +fi diff --git a/test/node_modules/bats/libexec/bats-core/bats-exec-suite b/test/node_modules/bats/libexec/bats-core/bats-exec-suite new file mode 100755 index 000000000..4f288273e --- /dev/null +++ b/test/node_modules/bats/libexec/bats-core/bats-exec-suite @@ -0,0 +1,63 @@ +#!/usr/bin/env bash +set -e + +count_only_flag="" +if [[ "$1" = "-c" ]]; then + count_only_flag=1 + shift +fi + +extended_syntax_flag="" +if [[ "$1" = "-x" ]]; then + extended_syntax_flag="-x" + shift +fi + +trap "kill 0; exit 1" int + +count=0 +for filename in "$@"; do + while IFS= read -r line; do + if [[ "$line" =~ $BATS_TEST_PATTERN ]]; then + let count+=1 + fi + done <"$filename" +done + +if [[ -n "$count_only_flag" ]]; then + printf '%d\n' "$count" + exit +fi + +printf '1..%d\n' "$count" +status=0 +offset=0 +for filename in "$@"; do + index=0 + { + IFS= read -r # 1..n + while IFS= read -r line; do + case "$line" in + "begin "* ) + let index+=1 + printf '%s\n' "${line/ $index / $(($offset + $index)) }" + ;; + "ok "* | "not ok "* ) + if [[ -z "$extended_syntax_flag" ]]; then + let index+=1 + fi + printf '%s\n' "${line/ $index / $(($offset + $index)) }" + if [[ "${line:0:6}" == "not ok" ]]; then + status=1 + fi + ;; + * ) + printf '%s\n' "$line" + ;; + esac + done + } < <( bats-exec-test $extended_syntax_flag "$filename" ) + offset=$(($offset + $index)) +done + +exit "$status" diff --git a/test/node_modules/bats/libexec/bats-core/bats-exec-test b/test/node_modules/bats/libexec/bats-core/bats-exec-test new file mode 100755 index 000000000..f91d521af --- /dev/null +++ b/test/node_modules/bats/libexec/bats-core/bats-exec-test @@ -0,0 +1,421 @@ +#!/usr/bin/env bash +set -eET + +BATS_COUNT_ONLY="" +if [[ "$1" = "-c" ]]; then + BATS_COUNT_ONLY=1 + shift +fi + +BATS_EXTENDED_SYNTAX="" +if [[ "$1" = "-x" ]]; then + BATS_EXTENDED_SYNTAX="$1" + shift +fi + +BATS_TEST_FILENAME="$1" +if [[ -z "$BATS_TEST_FILENAME" ]]; then + printf 'usage: bats-exec-test \n' >&2 + exit 1 +elif [[ ! -f "$BATS_TEST_FILENAME" ]]; then + printf 'bats: %s does not exist\n' "$BATS_TEST_FILENAME" >&2 + exit 1 +else + shift +fi + +BATS_TEST_DIRNAME="${BATS_TEST_FILENAME%/*}" +BATS_TEST_NAMES=() + +load() { + local name="$1" + local filename + + if [[ "${name:0:1}" = "/" ]]; then + filename="${name}" + else + filename="$BATS_TEST_DIRNAME/${name}.bash" + fi + + if [[ ! -f "$filename" ]]; then + printf 'bats: %s does not exist\n' "$filename" >&2 + exit 1 + fi + + source "${filename}" +} + +run() { + local origFlags="$-" + set +eET + local origIFS="$IFS" + output="$("$@" 2>&1)" + status="$?" + IFS=$'\n' lines=($output) + IFS="$origIFS" + set "-$origFlags" +} + +setup() { + return 0 +} + +teardown() { + return 0 +} + +BATS_TEST_SKIPPED='' +skip() { + BATS_TEST_SKIPPED="${1:-1}" + BATS_TEST_COMPLETED=1 + exit 0 +} + +bats_test_begin() { + BATS_TEST_DESCRIPTION="$1" + if [[ -n "$BATS_EXTENDED_SYNTAX" ]]; then + printf 'begin %d %s\n' "$BATS_TEST_NUMBER" "$BATS_TEST_DESCRIPTION" >&3 + fi + setup +} + +bats_test_function() { + local test_name="$1" + BATS_TEST_NAMES+=("$test_name") +} + +BATS_CURRENT_STACK_TRACE=() +BATS_PREVIOUS_STACK_TRACE=() +BATS_ERROR_STACK_TRACE=() + +bats_capture_stack_trace() { + if [[ "${#BATS_CURRENT_STACK_TRACE[@]}" -ne 0 ]]; then + BATS_PREVIOUS_STACK_TRACE=("${BATS_CURRENT_STACK_TRACE[@]}") + fi + BATS_CURRENT_STACK_TRACE=() + + local test_pattern=" $BATS_TEST_NAME $BATS_TEST_SOURCE" + local setup_pattern=" setup $BATS_TEST_SOURCE" + local teardown_pattern=" teardown $BATS_TEST_SOURCE" + + local source_file + local frame + local i + + for ((i=2; i != ${#FUNCNAME[@]}; ++i)); do + # Use BATS_TEST_SOURCE if necessary to work around Bash < 4.4 bug whereby + # calling an exported function erases the test file's BASH_SOURCE entry. + source_file="${BASH_SOURCE[$i]:-$BATS_TEST_SOURCE}" + frame="${BASH_LINENO[$((i-1))]} ${FUNCNAME[$i]} $source_file" + BATS_CURRENT_STACK_TRACE["${#BATS_CURRENT_STACK_TRACE[@]}"]="$frame" + if [[ "$frame" = *"$test_pattern" || \ + "$frame" = *"$setup_pattern" || \ + "$frame" = *"$teardown_pattern" ]]; then + break + fi + done + + bats_frame_filename "${BATS_CURRENT_STACK_TRACE[0]}" 'BATS_SOURCE' + bats_frame_lineno "${BATS_CURRENT_STACK_TRACE[0]}" 'BATS_LINENO' +} + +bats_print_stack_trace() { + local frame + local index=1 + local count="${#@}" + local filename + local lineno + + for frame in "$@"; do + bats_frame_filename "$frame" 'filename' + bats_trim_filename "$filename" 'filename' + bats_frame_lineno "$frame" 'lineno' + + if [[ $index -eq 1 ]]; then + printf '# (' + else + printf '# ' + fi + + local fn + bats_frame_function "$frame" 'fn' + if [[ "$fn" != "$BATS_TEST_NAME" ]]; then + printf "from function \`%s' " "$fn" + fi + + if [[ $index -eq $count ]]; then + printf 'in test file %s, line %d)\n' "$filename" "$lineno" + else + printf 'in file %s, line %d,\n' "$filename" "$lineno" + fi + + let index+=1 + done +} + +bats_print_failed_command() { + local frame="$1" + local status="$2" + local filename + local lineno + local failed_line + local failed_command + + bats_frame_filename "$frame" 'filename' + bats_frame_lineno "$frame" 'lineno' + bats_extract_line "$filename" "$lineno" 'failed_line' + bats_strip_string "$failed_line" 'failed_command' + printf '%s' "# \`${failed_command}' " + + if [[ $status -eq 1 ]]; then + printf 'failed\n' + else + printf 'failed with status %d\n' "$status" + fi +} + +bats_frame_lineno() { + printf -v "$2" '%s' "${1%% *}" +} + +bats_frame_function() { + local __bff_function="${1#* }" + printf -v "$2" '%s' "${__bff_function%% *}" +} + +bats_frame_filename() { + local __bff_filename="${1#* }" + __bff_filename="${__bff_filename#* }" + + if [[ "$__bff_filename" = "$BATS_TEST_SOURCE" ]]; then + __bff_filename="$BATS_TEST_FILENAME" + fi + printf -v "$2" '%s' "$__bff_filename" +} + +bats_extract_line() { + local __bats_extract_line_line + local __bats_extract_line_index=0 + + while IFS= read -r __bats_extract_line_line; do + if [[ "$((++__bats_extract_line_index))" -eq "$2" ]]; then + printf -v "$3" '%s' "${__bats_extract_line_line%$'\r'}" + break + fi + done <"$1" +} + +bats_strip_string() { + [[ "$1" =~ ^[[:space:]]*(.*)[[:space:]]*$ ]] + printf -v "$2" '%s' "${BASH_REMATCH[1]}" +} + +bats_trim_filename() { + printf -v "$2" '%s' "${1#$BATS_CWD/}" +} + +bats_debug_trap() { + if [[ "$BASH_SOURCE" != "$1" ]]; then + bats_capture_stack_trace + fi +} + +# For some versions of Bash, the `ERR` trap may not always fire for every +# command failure, but the `EXIT` trap will. Also, some command failures may not +# set `$?` properly. See #72 and #81 for details. +# +# For this reason, we call `bats_error_trap` at the very beginning of +# `bats_teardown_trap` (the `DEBUG` trap for the call will move +# `BATS_CURRENT_STACK_TRACE` to `BATS_PREVIOUS_STACK_TRACE`) and check the value +# of `$BATS_TEST_COMPLETED` before taking other actions. We also adjust the exit +# status value if needed. +# +# See `bats_exit_trap` for an additional EXIT error handling case when `$?` +# isn't set properly during `teardown()` errors. +bats_error_trap() { + local status="$?" + if [[ -z "$BATS_TEST_COMPLETED" ]]; then + BATS_ERROR_STATUS="${BATS_ERROR_STATUS:-$status}" + if [[ "$BATS_ERROR_STATUS" -eq 0 ]]; then + BATS_ERROR_STATUS=1 + fi + BATS_ERROR_STACK_TRACE=( "${BATS_PREVIOUS_STACK_TRACE[@]}" ) + trap - debug + fi +} + +bats_teardown_trap() { + bats_error_trap + local status=0 + teardown >>"$BATS_OUT" 2>&1 || status="$?" + + if [[ $status -eq 0 ]]; then + BATS_TEARDOWN_COMPLETED=1 + elif [[ -n "$BATS_TEST_COMPLETED" ]]; then + BATS_ERROR_STATUS="$status" + BATS_ERROR_STACK_TRACE=( "${BATS_CURRENT_STACK_TRACE[@]}" ) + fi + + bats_exit_trap +} + +bats_exit_trap() { + local line + local status + local skipped='' + trap - err exit + + if [[ -n "$BATS_TEST_SKIPPED" ]]; then + skipped=" # skip" + if [[ "$BATS_TEST_SKIPPED" != '1' ]]; then + skipped+=" $BATS_TEST_SKIPPED" + fi + fi + + if [[ -z "$BATS_TEST_COMPLETED" || -z "$BATS_TEARDOWN_COMPLETED" ]]; then + if [[ "${#BATS_ERROR_STACK_TRACE[@]}" -eq 0 ]]; then + # For some versions of bash, `$?` may not be set properly for some error + # conditions before triggering the EXIT trap directly (see #72 and #81). + # Thanks to the `BATS_TEARDOWN_COMPLETED` signal, this will pinpoint such + # errors if they happen during `teardown()` when `bats_perform_test` calls + # `bats_teardown_trap` directly after the test itself passes. + # + # If instead the test fails, and the `teardown()` error happens while + # `bats_teardown_trap` runs as the EXIT trap, the test will fail with no + # output, since there's no way to reach the `bats_exit_trap` call. + BATS_ERROR_STACK_TRACE=( "${BATS_PREVIOUS_STACK_TRACE[@]}" ) + BATS_ERROR_STATUS=1 + fi + printf 'not ok %d %s\n' "$BATS_TEST_NUMBER" "$BATS_TEST_DESCRIPTION" >&3 + bats_print_stack_trace "${BATS_ERROR_STACK_TRACE[@]}" >&3 + bats_print_failed_command \ + "${BATS_ERROR_STACK_TRACE[${#BATS_ERROR_STACK_TRACE[@]}-1]}" \ + "$BATS_ERROR_STATUS" >&3 + + while IFS= read -r line; do + printf '# %s\n' "$line" + done <"$BATS_OUT" >&3 + if [[ -n "$line" ]]; then + printf '# %s\n' "$line" + fi + status=1 + else + printf 'ok %d %s%s\n' "$BATS_TEST_NUMBER" "$BATS_TEST_DESCRIPTION" \ + "$skipped" >&3 + status=0 + fi + + rm -f "$BATS_OUT" + exit "$status" +} + +bats_perform_tests() { + printf '1..%d\n' "$#" + test_number=1 + status=0 + for test_name in "$@"; do + if ! "$0" $BATS_EXTENDED_SYNTAX "$BATS_TEST_FILENAME" "$test_name" \ + "$test_number"; then + status=1 + fi + let test_number+=1 + done + exit "$status" +} + +bats_perform_test() { + BATS_TEST_NAME="$1" + if declare -F "$BATS_TEST_NAME" >/dev/null; then + BATS_TEST_NUMBER="$2" + if [[ -z "$BATS_TEST_NUMBER" ]]; then + printf '1..1\n' + BATS_TEST_NUMBER=1 + fi + + BATS_TEST_COMPLETED="" + BATS_TEARDOWN_COMPLETED="" + BATS_ERROR_STATUS="" + trap "bats_debug_trap \"\$BASH_SOURCE\"" debug + trap "bats_error_trap" err + trap "bats_teardown_trap" exit + "$BATS_TEST_NAME" >>"$BATS_OUT" 2>&1 + BATS_TEST_COMPLETED=1 + trap "bats_exit_trap" exit + bats_teardown_trap + + else + printf "bats: unknown test name \`%s'\n" "$BATS_TEST_NAME" >&2 + exit 1 + fi +} + +if [[ -z "$TMPDIR" ]]; then + BATS_TMPDIR="/tmp" +else + BATS_TMPDIR="${TMPDIR%/}" +fi + +BATS_TMPNAME="$BATS_TMPDIR/bats.$$" +BATS_PARENT_TMPNAME="$BATS_TMPDIR/bats.$PPID" +BATS_OUT="${BATS_TMPNAME}.out" + +bats_preprocess_source() { + BATS_TEST_SOURCE="${BATS_TMPNAME}.src" + . bats-preprocess <<< "$(< "$BATS_TEST_FILENAME")"$'\n' > "$BATS_TEST_SOURCE" + trap "bats_cleanup_preprocessed_source" err exit + trap "bats_cleanup_preprocessed_source; exit 1" int + + bats_detect_duplicate_test_case_names +} + +bats_cleanup_preprocessed_source() { + rm -f "$BATS_TEST_SOURCE" +} + +bats_detect_duplicate_test_case_names() { + local test_names=() + local test_dupes=() + local line + + while read -r line; do + if [[ ! "$line" =~ ^bats_test_function\ ]]; then + continue + fi + line="${line%$'\r'}" + line="${line#* }" + + if [[ " ${test_names[*]} " == *" $line "* ]]; then + test_dupes+=("$line") + continue + fi + test_names+=("$line") + done <"$BATS_TEST_SOURCE" + + if [[ "${#test_dupes[@]}" -ne 0 ]]; then + printf 'bats warning: duplicate test name(s) in %s: %s\n' \ + "$BATS_TEST_FILENAME" "${test_dupes[*]}" >&2 + fi +} + +bats_evaluate_preprocessed_source() { + if [[ -z "$BATS_TEST_SOURCE" ]]; then + BATS_TEST_SOURCE="${BATS_PARENT_TMPNAME}.src" + fi + source "$BATS_TEST_SOURCE" +} + +exec 3<&1 + +if [[ "$#" -eq 0 ]]; then + bats_preprocess_source + bats_evaluate_preprocessed_source + + if [[ -n "$BATS_COUNT_ONLY" ]]; then + printf '%d\n' "${#BATS_TEST_NAMES[@]}" + else + bats_perform_tests "${BATS_TEST_NAMES[@]}" + fi +else + bats_evaluate_preprocessed_source + bats_perform_test "$@" +fi diff --git a/test/node_modules/bats/libexec/bats-core/bats-format-tap-stream b/test/node_modules/bats/libexec/bats-core/bats-format-tap-stream new file mode 100755 index 000000000..c57defa20 --- /dev/null +++ b/test/node_modules/bats/libexec/bats-core/bats-format-tap-stream @@ -0,0 +1,177 @@ +#!/usr/bin/env bash +set -e + +# Just stream the TAP output (sans extended syntax) if tput is missing +if ! command -v tput >/dev/null; then + exec grep -v "^begin " +fi + +header_pattern='[0-9]+\.\.[0-9]+' +IFS= read -r header + +if [[ "$header" =~ $header_pattern ]]; then + count="${header:3}" + index=0 + failures=0 + skipped=0 + name="" + count_column_width=$(( ${#count} * 2 + 2 )) +else + # If the first line isn't a TAP plan, print it and pass the rest through + printf "%s\n" "$header" + exec cat +fi + +update_screen_width() { + screen_width="$(tput cols)" + count_column_left=$(( $screen_width - $count_column_width )) +} + +trap update_screen_width WINCH +update_screen_width + +begin() { + go_to_column 0 + printf_with_truncation $(( $count_column_left - 1 )) " %s" "$name" + clear_to_end_of_line + go_to_column $count_column_left + printf "%${#count}s/${count}" "$index" + go_to_column 1 +} + +pass() { + go_to_column 0 + printf " ✓ %s" "$name" + advance +} + +skip() { + local reason="$1" + if [[ -n "$reason" ]]; then + reason=": $reason" + fi + go_to_column 0 + printf " - %s (skipped%s)" "$name" "$reason" + advance +} + +fail() { + go_to_column 0 + set_color 1 bold + printf " ✗ %s" "$name" + advance +} + +log() { + set_color 1 + printf " %s\n" "$1" + clear_color +} + +summary() { + printf "\n%d test" "$count" + if [[ "$count" -ne 1 ]]; then + printf 's' + fi + + printf ", %d failure" "$failures" + if [[ "$failures" -ne 1 ]]; then + printf 's' + fi + + if [[ "$skipped" -gt 0 ]]; then + printf ", %d skipped" "$skipped" + fi + + printf "\n" +} + +printf_with_truncation() { + local width="$1" + shift + local string + + printf -v 'string' -- "$@" + + if [[ "${#string}" -gt "$width" ]]; then + printf "%s..." "${string:0:$(( $width - 4 ))}" + else + printf "%s" "$string" + fi +} + +go_to_column() { + local column="$1" + printf "\x1B[%dG" $(( $column + 1 )) +} + +clear_to_end_of_line() { + printf "\x1B[K" +} + +advance() { + clear_to_end_of_line + printf '\n' + clear_color +} + +set_color() { + local color="$1" + local weight=22 + + if [[ "$2" == 'bold' ]]; then + weight=1 + fi + printf "\x1B[%d;%dm" $(( 30 + $color )) "$weight" +} + +clear_color() { + printf "\x1B[0m" +} + +_buffer="" + +buffer() { + _buffer="${_buffer}$("$@")" +} + +flush() { + printf "%s" "$_buffer" + _buffer="" +} + +finish() { + flush + printf "\n" +} + +trap finish EXIT + +while IFS= read -r line; do + case "$line" in + "begin "* ) + let index+=1 + name="${line#* $index }" + buffer begin + flush + ;; + "ok "* ) + skip_expr="ok $index (.*) # skip ?(([[:print:]]*))?" + if [[ "$line" =~ $skip_expr ]]; then + let skipped+=1 + buffer skip "${BASH_REMATCH[2]}" + else + buffer pass + fi + ;; + "not ok "* ) + let failures+=1 + buffer fail + ;; + "# "* ) + buffer log "${line:2}" + ;; + esac +done + +buffer summary diff --git a/test/node_modules/bats/libexec/bats-core/bats-preprocess b/test/node_modules/bats/libexec/bats-core/bats-preprocess new file mode 100755 index 000000000..fee418259 --- /dev/null +++ b/test/node_modules/bats/libexec/bats-core/bats-preprocess @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +set -e + +encode_name() { + local name="$1" + local result="test_" + local hex_code + + if [[ ! "$name" =~ [^[:alnum:]\ _-] ]]; then + name="${name//_/-5f}" + name="${name//-/-2d}" + name="${name// /_}" + result+="$name" + else + local length="${#name}" + local char i + + for ((i=0; i [ ...] + + is the path to a Bats test file, or the path to a directory +containing Bats test files. + + +DESCRIPTION +----------- + +Bats is a TAP-compliant testing framework for Bash. It provides a simple +way to verify that the UNIX programs you write behave as expected. + +A Bats test file is a Bash script with special syntax for defining +test cases. Under the hood, each test case is just a function with a +description. + +Test cases consist of standard shell commands. Bats makes use of +Bash's `errexit` (`set -e`) option when running test cases. If every +command in the test case exits with a `0` status code (success), the +test passes. In this way, each line is an assertion of truth. + +See `bats`(7) for more information on writing Bats tests. + + +RUNNING TESTS +------------- + +To run your tests, invoke the `bats` interpreter with a path to a test +file. The file's test cases are run sequentially and in isolation. If +all the test cases pass, `bats` exits with a `0` status code. If there +are any failures, `bats` exits with a `1` status code. + +You can invoke the `bats` interpreter with multiple test file arguments, +or with a path to a directory containing multiple `.bats` files. Bats +will run each test file individually and aggregate the results. If any +test case fails, `bats` exits with a `1` status code. + + +OPTIONS +------- + + * `-c`, `--count`: + Count the number of test cases without running any tests + * `-h`, `--help`: + Display help message + * `-p`, `--pretty`: + Show results in pretty format (default for terminals) + * `-r`, `--recursive`: + Include tests in subdirectories + * `-t`, `--tap`: + Show results in TAP format + * `-v`, `--version`: + Display the version number + + +OUTPUT +------ + +When you run Bats from a terminal, you'll see output as each test is +performed, with a check-mark next to the test's name if it passes or +an "X" if it fails. + + $ bats addition.bats + ✓ addition using bc + ✓ addition using dc + + 2 tests, 0 failures + +If Bats is not connected to a terminal--in other words, if you run it +from a continuous integration system or redirect its output to a +file--the results are displayed in human-readable, machine-parsable +TAP format. You can force TAP output from a terminal by invoking Bats +with the `--tap` option. + + $ bats --tap addition.bats + 1..2 + ok 1 addition using bc + ok 2 addition using dc + + +EXIT STATUS +----------- + +The `bats` interpreter exits with a value of `0` if all test cases pass, +or `1` if one or more test cases fail. + + +SEE ALSO +-------- + +Bats wiki: _https://github.com/bats\-core/bats\-core/wiki/_ + +`bash`(1), `bats`(7) + + +COPYRIGHT +--------- + +(c) 2017 Bianca Tamayo (bats-core organization) +(c) 2014 Sam Stephenson + +Bats is released under the terms of an MIT-style license. + + + diff --git a/test/node_modules/bats/man/bats.7 b/test/node_modules/bats/man/bats.7 new file mode 100644 index 000000000..d0836e525 --- /dev/null +++ b/test/node_modules/bats/man/bats.7 @@ -0,0 +1,178 @@ +.\" generated with Ronn/v0.7.3 +.\" http://github.com/rtomayko/ronn/tree/0.7.3 +. +.TH "BATS" "7" "November 2013" "" "" +. +.SH "NAME" +\fBbats\fR \- Bats test file format +. +.SH "DESCRIPTION" +A Bats test file is a Bash script with special syntax for defining test cases\. Under the hood, each test case is just a function with a description\. +. +.IP "" 4 +. +.nf + +#!/usr/bin/env bats + +@test "addition using bc" { + result="$(echo 2+2 | bc)" + [ "$result" \-eq 4 ] +} + +@test "addition using dc" { + result="$(echo 2 2+p | dc)" + [ "$result" \-eq 4 ] +} +. +.fi +. +.IP "" 0 +. +.P +Each Bats test file is evaluated n+1 times, where \fIn\fR is the number of test cases in the file\. The first run counts the number of test cases, then iterates over the test cases and executes each one in its own process\. +. +.SH "THE RUN HELPER" +Many Bats tests need to run a command and then make assertions about its exit status and output\. Bats includes a \fBrun\fR helper that invokes its arguments as a command, saves the exit status and output into special global variables, and then returns with a \fB0\fR status code so you can continue to make assertions in your test case\. +. +.P +For example, let\'s say you\'re testing that the \fBfoo\fR command, when passed a nonexistent filename, exits with a \fB1\fR status code and prints an error message\. +. +.IP "" 4 +. +.nf + +@test "invoking foo with a nonexistent file prints an error" { + run foo nonexistent_filename + [ "$status" \-eq 1 ] + [ "$output" = "foo: no such file \'nonexistent_filename\'" ] +} +. +.fi +. +.IP "" 0 +. +.P +The \fB$status\fR variable contains the status code of the command, and the \fB$output\fR variable contains the combined contents of the command\'s standard output and standard error streams\. +. +.P +A third special variable, the \fB$lines\fR array, is available for easily accessing individual lines of output\. For example, if you want to test that invoking \fBfoo\fR without any arguments prints usage information on the first line: +. +.IP "" 4 +. +.nf + +@test "invoking foo without arguments prints usage" { + run foo + [ "$status" \-eq 1 ] + [ "${lines[0]}" = "usage: foo " ] +} +. +.fi +. +.IP "" 0 +. +.SH "THE LOAD COMMAND" +You may want to share common code across multiple test files\. Bats includes a convenient \fBload\fR command for sourcing a Bash source file relative to the location of the current test file\. For example, if you have a Bats test in \fBtest/foo\.bats\fR, the command +. +.IP "" 4 +. +.nf + +load test_helper +. +.fi +. +.IP "" 0 +. +.P +will source the script \fBtest/test_helper\.bash\fR in your test file\. This can be useful for sharing functions to set up your environment or load fixtures\. +. +.SH "THE SKIP COMMAND" +Tests can be skipped by using the \fBskip\fR command at the point in a test you wish to skip\. +. +.IP "" 4 +. +.nf + +@test "A test I don\'t want to execute for now" { + skip + run foo + [ "$status" \-eq 0 ] +} +. +.fi +. +.IP "" 0 +. +.P +Optionally, you may include a reason for skipping: +. +.IP "" 4 +. +.nf + +@test "A test I don\'t want to execute for now" { + skip "This command will return zero soon, but not now" + run foo + [ "$status" \-eq 0 ] +} +. +.fi +. +.IP "" 0 +. +.P +Or you can skip conditionally: +. +.IP "" 4 +. +.nf + +@test "A test which should run" { + if [ foo != bar ]; then + skip "foo isn\'t bar" + fi + + run foo + [ "$status" \-eq 0 ] +} +. +.fi +. +.IP "" 0 +. +.SH "SETUP AND TEARDOWN FUNCTIONS" +You can define special \fBsetup\fR and \fBteardown\fR functions which run before and after each test case, respectively\. Use these to load fixtures, set up your environment, and clean up when you\'re done\. +. +.SH "CODE OUTSIDE OF TEST CASES" +You can include code in your test file outside of \fB@test\fR functions\. For example, this may be useful if you want to check for dependencies and fail immediately if they\'re not present\. However, any output that you print in code outside of \fB@test\fR, \fBsetup\fR or \fBteardown\fR functions must be redirected to \fBstderr\fR (\fB>&2\fR)\. Otherwise, the output may cause Bats to fail by polluting the TAP stream on \fBstdout\fR\. +. +.SH "SPECIAL VARIABLES" +There are several global variables you can use to introspect on Bats tests: +. +.IP "\(bu" 4 +\fB$BATS_TEST_FILENAME\fR is the fully expanded path to the Bats test file\. +. +.IP "\(bu" 4 +\fB$BATS_TEST_DIRNAME\fR is the directory in which the Bats test file is located\. +. +.IP "\(bu" 4 +\fB$BATS_TEST_NAMES\fR is an array of function names for each test case\. +. +.IP "\(bu" 4 +\fB$BATS_TEST_NAME\fR is the name of the function containing the current test case\. +. +.IP "\(bu" 4 +\fB$BATS_TEST_DESCRIPTION\fR is the description of the current test case\. +. +.IP "\(bu" 4 +\fB$BATS_TEST_NUMBER\fR is the (1\-based) index of the current test case in the test file\. +. +.IP "\(bu" 4 +\fB$BATS_TMPDIR\fR is the location to a directory that may be used to store temporary files\. +. +.IP "" 0 +. +.SH "SEE ALSO" +\fBbash\fR(1), \fBbats\fR(1) diff --git a/test/node_modules/bats/man/bats.7.ronn b/test/node_modules/bats/man/bats.7.ronn new file mode 100644 index 000000000..7f6dd1848 --- /dev/null +++ b/test/node_modules/bats/man/bats.7.ronn @@ -0,0 +1,156 @@ +bats(7) -- Bats test file format +================================ + + +DESCRIPTION +----------- + +A Bats test file is a Bash script with special syntax for defining +test cases. Under the hood, each test case is just a function with a +description. + + #!/usr/bin/env bats + + @test "addition using bc" { + result="$(echo 2+2 | bc)" + [ "$result" -eq 4 ] + } + + @test "addition using dc" { + result="$(echo 2 2+p | dc)" + [ "$result" -eq 4 ] + } + + +Each Bats test file is evaluated n+1 times, where _n_ is the number of +test cases in the file. The first run counts the number of test cases, +then iterates over the test cases and executes each one in its own +process. + + +THE RUN HELPER +-------------- + +Many Bats tests need to run a command and then make assertions about +its exit status and output. Bats includes a `run` helper that invokes +its arguments as a command, saves the exit status and output into +special global variables, and then returns with a `0` status code so +you can continue to make assertions in your test case. + +For example, let's say you're testing that the `foo` command, when +passed a nonexistent filename, exits with a `1` status code and prints +an error message. + + @test "invoking foo with a nonexistent file prints an error" { + run foo nonexistent_filename + [ "$status" -eq 1 ] + [ "$output" = "foo: no such file 'nonexistent_filename'" ] + } + +The `$status` variable contains the status code of the command, and +the `$output` variable contains the combined contents of the command's +standard output and standard error streams. + +A third special variable, the `$lines` array, is available for easily +accessing individual lines of output. For example, if you want to test +that invoking `foo` without any arguments prints usage information on +the first line: + + @test "invoking foo without arguments prints usage" { + run foo + [ "$status" -eq 1 ] + [ "${lines[0]}" = "usage: foo " ] + } + + +THE LOAD COMMAND +---------------- + +You may want to share common code across multiple test files. Bats +includes a convenient `load` command for sourcing a Bash source file +relative to the location of the current test file. For example, if you +have a Bats test in `test/foo.bats`, the command + + load test_helper + +will source the script `test/test_helper.bash` in your test file. This +can be useful for sharing functions to set up your environment or load +fixtures. + + +THE SKIP COMMAND +---------------- + +Tests can be skipped by using the `skip` command at the point in a +test you wish to skip. + + @test "A test I don't want to execute for now" { + skip + run foo + [ "$status" -eq 0 ] + } + +Optionally, you may include a reason for skipping: + + @test "A test I don't want to execute for now" { + skip "This command will return zero soon, but not now" + run foo + [ "$status" -eq 0 ] + } + +Or you can skip conditionally: + + @test "A test which should run" { + if [ foo != bar ]; then + skip "foo isn't bar" + fi + + run foo + [ "$status" -eq 0 ] + } + + +SETUP AND TEARDOWN FUNCTIONS +---------------------------- + +You can define special `setup` and `teardown` functions which run +before and after each test case, respectively. Use these to load +fixtures, set up your environment, and clean up when you're done. + + +CODE OUTSIDE OF TEST CASES +-------------------------- + +You can include code in your test file outside of `@test` functions. +For example, this may be useful if you want to check for dependencies +and fail immediately if they're not present. However, any output that +you print in code outside of `@test`, `setup` or `teardown` functions +must be redirected to `stderr` (`>&2`). Otherwise, the output may +cause Bats to fail by polluting the TAP stream on `stdout`. + + +SPECIAL VARIABLES +----------------- + +There are several global variables you can use to introspect on Bats +tests: + +* `$BATS_TEST_FILENAME` is the fully expanded path to the Bats test +file. +* `$BATS_TEST_DIRNAME` is the directory in which the Bats test file is +located. +* `$BATS_TEST_NAMES` is an array of function names for each test case. +* `$BATS_TEST_NAME` is the name of the function containing the current +test case. +* `$BATS_TEST_DESCRIPTION` is the description of the current test +case. +* `$BATS_TEST_NUMBER` is the (1-based) index of the current test case +in the test file. +* `$BATS_TMPDIR` is the location to a directory that may be used to +store temporary files. + + +SEE ALSO +-------- + +`bash`(1), `bats`(1) diff --git a/test/node_modules/bats/package.json b/test/node_modules/bats/package.json new file mode 100644 index 000000000..ea9b31ff1 --- /dev/null +++ b/test/node_modules/bats/package.json @@ -0,0 +1,134 @@ +{ + "_args": [ + [ + { + "raw": "bats", + "scope": null, + "escapedName": "bats", + "name": "bats", + "rawSpec": "", + "spec": "latest", + "type": "tag" + }, + "/home/lpabon/git/golang/porx/src/github.com/libopenstorage/openstorage/test" + ] + ], + "_from": "bats@latest", + "_id": "bats@1.1.0", + "_inCache": true, + "_location": "/bats", + "_nodeVersion": "9.5.0", + "_npmOperationalInternal": { + "host": "s3://npm-registry-packages", + "tmp": "tmp/bats_1.1.0_1531099593379_0.5758385744869141" + }, + "_npmUser": { + "name": "mbland", + "email": "mbland@acm.org" + }, + "_npmVersion": "5.6.0", + "_phantomChildren": {}, + "_requested": { + "raw": "bats", + "scope": null, + "escapedName": "bats", + "name": "bats", + "rawSpec": "", + "spec": "latest", + "type": "tag" + }, + "_requiredBy": [ + "#DEV:/", + "#USER" + ], + "_resolved": "https://registry.npmjs.org/bats/-/bats-1.1.0.tgz", + "_shasum": "6fc44f283ed4e7af2b6ffac93ec5026a1acbdc66", + "_shrinkwrap": null, + "_spec": "bats", + "_where": "/home/lpabon/git/golang/porx/src/github.com/libopenstorage/openstorage/test", + "author": { + "name": "Sam Stephenson", + "email": "sstephenson@gmail.com", + "url": "http://sstephenson.us/" + }, + "bin": { + "bats": "bin/bats" + }, + "bugs": { + "url": "https://github.com/bats-core/bats-core/issues" + }, + "contributors": [ + { + "name": "Andrew Martin", + "url": "https://control-plane.io/" + }, + { + "name": "Bianca Tamayo", + "email": "hi@biancatamayo.me", + "url": "https://biancatamayo.me/" + }, + { + "name": "Jason Karns", + "email": "jason.karns@gmail.com", + "url": "http://jasonkarns.com/" + }, + { + "name": "Mike Bland", + "email": "mbland@acm.org", + "url": "https://mike-bland.com/" + } + ], + "dependencies": {}, + "description": "Bash Automated Testing System", + "devDependencies": {}, + "directories": { + "bin": "bin", + "doc": "docs", + "man": "man", + "test": "test" + }, + "dist": { + "integrity": "sha512-1pA29OhDByrUtAXX+nmqZxgRgx2y8PvuZzbLJVjd2dpEDVDvz0MjcBMdmIPNq5lC+tG53G+RbeRsbIlv3vw7tg==", + "shasum": "6fc44f283ed4e7af2b6ffac93ec5026a1acbdc66", + "tarball": "https://registry.npmjs.org/bats/-/bats-1.1.0.tgz", + "fileCount": 15, + "unpackedSize": 62108, + "npm-signature": "-----BEGIN PGP SIGNATURE-----\r\nVersion: OpenPGP.js v3.0.4\r\nComment: https://openpgpjs.org\r\n\r\nwsFcBAEBCAAQBQJbQrnJCRA9TVsSAnZWagAAhaUP/Au5hWCRLs2GwW+5OiKl\nTleBn6NwO9XUfrQqew9U4t92VpRZ3Mj7W6zu7hslX96OmJmAYAm3k9X7DT8/\neeDD4sTk3mvrVXZna6CBquKWdiO3sABZk6N3S7h0zsPwH+JmMw6OgX3yW0CL\nXHpcVSppXHgWdCfcA10IYEwBLPHVPaVveufLEvjhpIxCI5eFXVbGhPvX9fyu\nKbhbaG12GpeshNJw/VCMUYcVSiGIW7WoKNSl9hH2LxwJihx8w55JxvRQl2CX\ntUXleeZ9Zp+zmx/BuGZmqc8eOU1D0IDn59NUeFaINxSWz3iKEBjDGpr0R/7M\n6Ro1EvPEOmQ0aPmrzImwVIUY3g8Fl78MHazpmwuLudNY7/uNlXWpKcSLdCdB\nYi3sUHx6K6aCmwfiY3eslyT7abpDSIw/j6CAso4OR+RSMQUXke+i8kfyIBSj\nXM+1eTTBnu1mbjLEYK3MLNqIN3p6kRES7vVNoLmHRgXHe0IvkhA5w0Sfue2c\n7gEWeUIKDsvQNWZ4bi4hz3+JMurpkD+ihyR1/tvg3/nh0VEyzWf8fNTu/Lqk\n4oLqeabUdO6C1fXeNdg79tCN93RcGtqn/emTCcDaDkMGi4TifWMji8seOaIm\nQkouQg7J76p4ibTgJH7VWlX4As2EpJIuL2OXbBqI+Fu7AA1A7u7oWTDaHbt7\n+f3s\r\n=zPD+\r\n-----END PGP SIGNATURE-----\r\n" + }, + "files": [ + "bin", + "libexec", + "man" + ], + "gitHead": "c706d1470dd1376687776bbe985ac22d09780327", + "homepage": "https://github.com/bats-core/bats-core#readme", + "keywords": [ + "bats", + "bash", + "shell", + "test", + "unit" + ], + "license": "MIT", + "maintainers": [ + { + "name": "jasonkarns", + "email": "jason@karns.name" + } + ], + "man": [ + "/Users/msb/src/bats-core/bats-core/man/bats.1", + "/Users/msb/src/bats-core/bats-core/man/bats.7" + ], + "name": "bats", + "optionalDependencies": {}, + "readme": "ERROR: No README data found!", + "repository": { + "type": "git", + "url": "git+https://github.com/bats-core/bats-core.git" + }, + "scripts": { + "test": "bin/bats test" + }, + "version": "1.1.0" +} diff --git a/test/package.json b/test/package.json new file mode 100644 index 000000000..2832b15dc --- /dev/null +++ b/test/package.json @@ -0,0 +1,16 @@ +{ + "name": "openstorage-test", + "version": "0.0.1", + "description": "OpenStorage Tests", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "author": "", + "license": "ISC", + "devDependencies": { + "bats": "^1.1.0", + "bats-assert": "^2.0.0", + "bats-support": "^0.3.0" + } +} diff --git a/test/run.bash b/test/run.bash new file mode 100755 index 000000000..f85687387 --- /dev/null +++ b/test/run.bash @@ -0,0 +1,28 @@ +#!/bin/bash + +# Check dependencies +dependecies="kind kubectl jq curl" +for d in $dependecies ; do + if [[ -z "$(type -t $d)" ]] ; then + echo "Missing $d" >&2 + exit 1 + fi +done + +# Location of bats +BATS=./node_modules/bats/bin/bats + +# Setup +export KIND_CLUSTER=${USER}-kind-openstorage-test +export CLUSTER_CONTROL_PLANE_CONTAINER=${KIND_CLUSTER}-control-plane +export TMPDIR=/tmp/bats-test-$$ +mkdir -p ${TMPDIR} + +# generate 10y tokens +export ADMIN_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJlbWFpbCI6InN1cHBvcnQtYWRtaW5AbXljb21wYW55LmNvbSIsImV4cCI6MTkwMTczMDQwNywiZ3JvdXBzIjpbIioiXSwiaWF0IjoxNTg2MzcwNDA3LCJpc3MiOiJvcGVuc3RvcmFnZS5pbyIsIm5hbWUiOiJBZG1pbiIsInJvbGVzIjpbInN5c3RlbS5hZG1pbiJdLCJzdWIiOiJzdXBwb3J0LWFkbWluQG15Y29tcGFueS5jb20ifQ.RR0hduw2x4aQPLUFzwXRMp3g0Qg1Uq-gGkIY-vCMxRE +export K8S_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJlbWFpbCI6InN1cHBvcnRAbXljb21wYW55LmNvbSIsImV4cCI6MTkwMTczMDQ3MywiZ3JvdXBzIjpbIm15Y29tcGFueSIsImVuZ2luZWVyaW5nIiwiZGV2b3BzIl0sImlhdCI6MTU4NjM3MDQ3MywiaXNzIjoib3BlbnN0b3JhZ2UuaW8iLCJuYW1lIjoiS3ViZXJuZXRlcyIsInJvbGVzIjpbInN5c3RlbS51c2VyIl0sInN1YiI6InN1cHBvcnRAbXljb21wYW55LmNvbSJ9.2EnoEAR2qrTTxjxcH3k5w_E24l4p5DU7jOWF7ke0aJ4 +export TENANT1_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJlbWFpbCI6InN1cHBvcnRAdGVuYW50LW9uZS5jb20iLCJleHAiOjE5MDE3MzA1MDUsImdyb3VwcyI6WyJ0ZW5hbnQtb25lIl0sImlhdCI6MTU4NjM3MDUwNSwiaXNzIjoib3BlbnN0b3JhZ2UuaW8iLCJuYW1lIjoiVGVuYW50IE9uZSIsInJvbGVzIjpbInN5c3RlbS51c2VyIl0sInN1YiI6InN1cHBvcnRAdGVuYW50LW9uZS5jb20ifQ.56ruILoD_r-RpE_r9317nWq8gZ7PbjnMY5JMzQrPuhI +export TENANT2_TOKEN=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJlbWFpbCI6InN1cHBvcnRAdGVuYW50LXR3by5jb20iLCJleHAiOjE5MDE3MzA1NDksImdyb3VwcyI6WyJ0ZW5hbnQtdHdvIl0sImlhdCI6MTU4NjM3MDU0OSwiaXNzIjoib3BlbnN0b3JhZ2UuaW8iLCJuYW1lIjoiVGVuYW50IFR3byIsInJvbGVzIjpbInN5c3RlbS51c2VyIl0sInN1YiI6InN1cHBvcnRAdGVuYW50LXR3by5jb20ifQ.6t3DiToB5ttTKZ9IuSoM4XTKKltpBq84kz7HseehjFc + +# Set env DEBUG=1 to show output of osd::echo and osd::by +${BATS} setup testcases && rm -rf ${TMPDIR} diff --git a/test/setup/assets/auth/sc-csi.yml b/test/setup/assets/auth/sc-csi.yml new file mode 100644 index 000000000..ff7074d62 --- /dev/null +++ b/test/setup/assets/auth/sc-csi.yml @@ -0,0 +1,14 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-auth +provisioner: osd.openstorage.org +parameters: + repl: "1" + csi.storage.k8s.io/provisioner-secret-name: k8s-user + csi.storage.k8s.io/provisioner-secret-namespace: openstorage + csi.storage.k8s.io/node-publish-secret-name: k8s-user + csi.storage.k8s.io/node-publish-secret-namespace: openstorage + csi.storage.k8s.io/controller-expand-secret-name: k8s-user + csi.storage.k8s.io/controller-expand-secret-namespace: openstorage +allowVolumeExpansion: true diff --git a/test/setup/assets/auth/sc-intree.yml b/test/setup/assets/auth/sc-intree.yml new file mode 100644 index 000000000..187cf015a --- /dev/null +++ b/test/setup/assets/auth/sc-intree.yml @@ -0,0 +1,9 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: intree-auth +provisioner: kubernetes.io/portworx-volume +parameters: + repl: "1" + openstorage.io/auth-secret-name: k8s-user + openstorage.io/auth-secret-namespace: openstorage diff --git a/test/setup/assets/kind.yaml b/test/setup/assets/kind.yaml new file mode 100644 index 000000000..77c459054 --- /dev/null +++ b/test/setup/assets/kind.yaml @@ -0,0 +1,5 @@ +kind: Cluster +apiVersion: kind.sigs.k8s.io/v1alpha3 +nodes: +- role: control-plane +- role: worker diff --git a/test/setup/assets/multitenant/sc-csi.yml b/test/setup/assets/multitenant/sc-csi.yml new file mode 100644 index 000000000..d426cfa48 --- /dev/null +++ b/test/setup/assets/multitenant/sc-csi.yml @@ -0,0 +1,14 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-multitenant +provisioner: osd.openstorage.org +parameters: + repl: "1" + csi.storage.k8s.io/provisioner-secret-name: k8s-user + csi.storage.k8s.io/provisioner-secret-namespace: ${pvc.namespace} + csi.storage.k8s.io/node-publish-secret-name: k8s-user + csi.storage.k8s.io/node-publish-secret-namespace: ${pvc.namespace} + csi.storage.k8s.io/controller-expand-secret-name: k8s-user + csi.storage.k8s.io/controller-expand-secret-namespace: ${pvc.namespace} +allowVolumeExpansion: true diff --git a/test/setup/assets/multitenant/sc-intree.yml b/test/setup/assets/multitenant/sc-intree.yml new file mode 100644 index 000000000..b98436d77 --- /dev/null +++ b/test/setup/assets/multitenant/sc-intree.yml @@ -0,0 +1,9 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: intree-multitenant +provisioner: kubernetes.io/portworx-volume +parameters: + repl: "1" + openstorage.io/auth-secret-name: k8s-user + openstorage.io/auth-secret-namespace: ${pvc.namespace} diff --git a/test/setup/assets/noauth/sc-csi.yml b/test/setup/assets/noauth/sc-csi.yml new file mode 100644 index 000000000..f04714d35 --- /dev/null +++ b/test/setup/assets/noauth/sc-csi.yml @@ -0,0 +1,8 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-noauth +provisioner: osd.openstorage.org +parameters: + repl: "1" +allowVolumeExpansion: true diff --git a/test/setup/assets/noauth/sc-intree.yml b/test/setup/assets/noauth/sc-intree.yml new file mode 100644 index 000000000..ed04de55b --- /dev/null +++ b/test/setup/assets/noauth/sc-intree.yml @@ -0,0 +1,7 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1beta1 +metadata: + name: intree-noauth +provisioner: kubernetes.io/portworx-volume +parameters: + repl: "1" diff --git a/test/setup/assets/osd-csi.yaml b/test/setup/assets/osd-csi.yaml new file mode 100644 index 000000000..791f3a3e9 --- /dev/null +++ b/test/setup/assets/osd-csi.yaml @@ -0,0 +1,367 @@ +--- +kind: Service +apiVersion: v1 +metadata: + name: portworx-api + namespace: kube-system + labels: + name: portworx-api +spec: + selector: + name: osd-api + type: NodePort + ports: + - name: px-api + protocol: TCP + port: 9001 + targetPort: 9001 + - name: px-sdk + protocol: TCP + port: 9020 + targetPort: 9020 + - name: px-rest-gateway + protocol: TCP + port: 9021 + targetPort: 9021 +--- +kind: Service +apiVersion: v1 +metadata: + name: portworx-service + namespace: kube-system + labels: + name: portworx +spec: + selector: + name: openstorage + type: NodePort + ports: + - name: px-api + protocol: TCP + port: 9001 + targetPort: 9001 + - name: px-sdk + protocol: TCP + port: 9020 + targetPort: 9020 + - name: px-rest-gateway + protocol: TCP + port: 9021 + targetPort: 9021 +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: osd-api + namespace: kube-system + labels: + name: osd-api +spec: + selector: + matchLabels: + name: osd-api + minReadySeconds: 0 + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 100% + template: + metadata: + labels: + name: osd-api + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: px/enabled + operator: NotIn + values: + - "false" + - key: node-role.kubernetes.io/master + operator: DoesNotExist + hostNetwork: true + hostPID: false + containers: + - name: osd-api + image: k8s.gcr.io/pause:3.1 + imagePullPolicy: Always + readinessProbe: + periodSeconds: 10 + httpGet: + host: 127.0.0.1 + path: /v1/identities/version + port: 9021 + restartPolicy: Always + serviceAccountName: osd-csi-account +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: osd-csi-account + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: osd-csi-role +rules: +- apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + resourceNames: ["privileged"] + verbs: ["use"] +- apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["*"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] +- apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] +- apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] +- apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] +- apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] +- apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] +- apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] +- apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots", "volumesnapshotcontents", "volumesnapshotclasses", "volumesnapshots/status"] + verbs: ["create", "get", "list", "watch", "update", "delete"] +- apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch", "update"] +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] +- apiGroups: ["csi.storage.k8s.io"] + resources: ["csidrivers"] + verbs: ["create", "delete"] +- apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["*"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: osd-csi-role-binding +subjects: +- kind: ServiceAccount + name: osd-csi-account + namespace: kube-system +roleRef: + kind: ClusterRole + name: osd-csi-role + apiGroup: rbac.authorization.k8s.io +--- +kind: Service +apiVersion: v1 +metadata: + name: osd-csi-service + namespace: kube-system +spec: + clusterIP: None +--- +kind: Deployment +apiVersion: apps/v1 +metadata: + name: osd-csi-ext + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + app: osd-csi-driver + template: + metadata: + labels: + app: osd-csi-driver + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: px/enabled + operator: NotIn + values: + - "false" + - key: node-role.kubernetes.io/master + operator: DoesNotExist + hostNetwork: true + serviceAccount: osd-csi-account + containers: + - name: csi-external-provisioner + imagePullPolicy: Always + image: quay.io/openstorage/csi-provisioner:v1.4.0-1 + args: + - "--v=3" + - "--provisioner=osd.openstorage.org" + - "--csi-address=$(ADDRESS)" + - "--enable-leader-election" + - "--leader-election-type=leases" + env: + - name: ADDRESS + value: /csi/csi.sock + securityContext: + privileged: true + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-snapshotter + imagePullPolicy: Always + image: quay.io/openstorage/csi-snapshotter:v1.2.2-1 + args: + - "--v=3" + - "--csi-address=$(ADDRESS)" + - "--snapshotter=osd.openstorage.org" + - "--leader-election=true" + - "--leader-election-type=leases" + env: + - name: ADDRESS + value: /csi/csi.sock + securityContext: + privileged: true + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-resizer + imagePullPolicy: Always + image: quay.io/k8scsi/csi-resizer:v0.3.0 + args: + - "--v=3" + - "--csi-address=$(ADDRESS)" + - "--leader-election=true" + env: + - name: ADDRESS + value: /csi/csi.sock + securityContext: + privileged: true + volumeMounts: + - name: socket-dir + mountPath: /csi + volumes: + - name: socket-dir + hostPath: + path: /var/lib/kubelet/plugins/osd.openstorage.org + type: DirectoryOrCreate +--- +apiVersion: storage.k8s.io/v1beta1 +kind: CSIDriver +metadata: + name: osd.openstorage.org +spec: + attachRequired: false + podInfoOnMount: true + volumeLifecycleModes: + - Persistent + - Ephemeral +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: openstorage + namespace: kube-system + labels: + name: openstorage +spec: + selector: + matchLabels: + name: openstorage + minReadySeconds: 0 + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + name: openstorage + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: px/enabled + operator: NotIn + values: + - "false" + - key: node-role.kubernetes.io/master + operator: DoesNotExist + hostNetwork: true + hostPID: false + containers: + - name: osd + image: quay.io/openstorage/osd:latest + imagePullPolicy: Never # Manually loaded into KinD (see setup script) + args: + ["-d","--driver=name=fake", + "--csidrivername","osd.openstorage.org", + "--jwt-issuer", "openstorage.io", + "--jwt-shared-secret", "mysecret", + "--sdkport", "9020", + "--sdkrestport", "9021", + "--secrets-type", "k8s"] + env: + - name: "CSI_ENDPOINT" + value: "/var/lib/kubelet/plugins/osd.openstorage.org/csi.sock" + securityContext: + privileged: true + volumeMounts: + - name: csi-driver-path + mountPath: /var/lib/kubelet/plugins/osd.openstorage.org + - name: csi-kubelet-path + mountPath: /var/lib/kubelet + - name: csi-node-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=/var/lib/kubelet/plugins/osd.openstorage.org/csi.sock" + imagePullPolicy: Always + env: + - name: ADDRESS + value: /csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + securityContext: + privileged: true + volumeMounts: + - name: csi-driver-path + mountPath: /csi + - name: registration-dir + mountPath: /registration + restartPolicy: Always + serviceAccountName: osd-csi-account + volumes: + - name: registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry + type: DirectoryOrCreate + - name: csi-kubelet-path + hostPath: + path: /var/lib/kubelet + type: DirectoryOrCreate + - name: csi-driver-path + hostPath: + path: /var/lib/kubelet/plugins/osd.openstorage.org + type: DirectoryOrCreate diff --git a/test/setup/setup.bats b/test/setup/setup.bats new file mode 100644 index 000000000..ab7beb981 --- /dev/null +++ b/test/setup/setup.bats @@ -0,0 +1,64 @@ +load ../vendor/k8s +load ../lib/osd +load ../node_modules/bats-assert/load +load ../node_modules/bats-support/load + +KIND_IMAGE=kindest/node:v1.17.0 +ASSETS="setup/assets" + +@test "Setup kind cluster ${KIND_CLUSTER}" { + local name=${KIND_CLUSTER} + if kind get clusters | grep ${KIND_CLUSTER} > /dev/null 2>&1 ; then + skip "Cluster already up and running" + fi + + run kind create cluster \ + --name ${name} \ + --config ${ASSETS}/kind.yaml \ + --image ${KIND_IMAGE} + assert_success + + run kubectl apply -f ${ASSETS}/noauth + assert_success + + run kubectl apply -f ${ASSETS}/auth + assert_success + + run kubectl apply -f ${ASSETS}/multitenant + assert_success + + run kubectl create namespace openstorage + assert_success + + run kubectl -n openstorage create secret \ + generic k8s-user --from-literal=auth-token=${K8S_TOKEN} + assert_success + + run kubectl -n openstorage create secret \ + generic admin-user --from-literal=auth-token=${ADMIN_TOKEN} + assert_success +} + +@test "Install openstorage in ${KIND_CLUSTER}" { + run kind load docker-image quay.io/openstorage/osd:latest --name ${KIND_CLUSTER} || exit 1 + assert_success + + # Start OSD + kubectl delete -f ${ASSETS}/osd-csi.yaml > /dev/null 2>&1 || true + + # Deploy + run kubectl apply -f ${ASSETS}/osd-csi.yaml + assert_success + + # Tell DETIK what command to use to verify + DETIK_CLIENT_NAME="kubectl -n kube-system" + + # Wait for openstorage to come up + run try "at most 120 times every 1s to get pods named '^openstorage' and verify that 'status' is 'running'" + assert_success + +} + +@test "Verify SDK GW is accessible" { + timeout 60 sh -c "until curl --silent -H \"Authorization:bearer $ADMIN_TOKEN\" -X GET -d {} http://$(osd::getSdkRestGWEndpoint)/v1/clusters/inspectcurrent | grep STATUS_OK; do sleep 1; done" +} diff --git a/test/testcases/assets/nginx-ss.yml.tmpl b/test/testcases/assets/nginx-ss.yml.tmpl new file mode 100644 index 000000000..71a4d885c --- /dev/null +++ b/test/testcases/assets/nginx-ss.yml.tmpl @@ -0,0 +1,50 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: nginx + labels: + app: nginx +spec: + ports: + - port: 80 + name: web + clusterIP: None + selector: + app: nginx +--- +apiVersion: apps/v1 # for k8s versions before 1.9.0 use apps/v1beta2 and before 1.8.0 use extensions/v1beta1 +kind: StatefulSet +metadata: + name: web + labels: + app: nginx +spec: + serviceName: "nginx" + selector: + matchLabels: + app: nginx + replicas: 1 + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: k8s.gcr.io/nginx-slim:0.8 + ports: + - containerPort: 80 + name: web + volumeMounts: + - name: www + mountPath: /usr/share/nginx/html + volumeClaimTemplates: + - metadata: + name: www + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi + storageClassName: %%STORAGECLASS%% diff --git a/test/testcases/assets/pvc.yml.tmpl b/test/testcases/assets/pvc.yml.tmpl new file mode 100644 index 000000000..a56d07c7d --- /dev/null +++ b/test/testcases/assets/pvc.yml.tmpl @@ -0,0 +1,11 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: %%PVCNAME%% +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: %%STORAGECLASS%% diff --git a/test/testcases/public-volume.bats b/test/testcases/public-volume.bats new file mode 100644 index 000000000..d5f7e9e44 --- /dev/null +++ b/test/testcases/public-volume.bats @@ -0,0 +1,32 @@ +load ../vendor/k8s +load ../lib/osd +load ../node_modules/bats-assert/load +load ../node_modules/bats-support/load + +ASSETS=testcases/assets + +@test "Verify user can create a pvc without authentication" { + local pvcname="pvc-noauth" + local user="user$$" + local kubeconfig="${BATS_TMPDIR}/${user}-kubeconfig.conf" + + DETIK_CLIENT_NAME="kubectl -n ${user}" + run osd::createUserKubeconfig "${user}" "$BATS_TMPDIR" + assert_success + + storageclasses="intree-noauth csi-noauth" + for sc in $storageclasses ; do + sed -e "s#%%PVCNAME%%#${pvcname}#" \ + -e "s#%%STORAGECLASS%%#${sc}#" \ + ${ASSETS}/pvc.yml.tmpl | kubectl --kubeconfig=${kubeconfig} create -f - + + # assert it is there + run try "at most 120 times every 1s to get pvc named '^${pvcname}' and verify that 'status' is 'bound'" + assert_success + + # cleanup + run osd::kubeDeleteObjectAndWait 120 "--kubeconfig=${kubeconfig}" "pvc" "${pvcname}" + assert_success + done +} + diff --git a/test/testcases/secure-volume.bats b/test/testcases/secure-volume.bats new file mode 100644 index 000000000..cc1639258 --- /dev/null +++ b/test/testcases/secure-volume.bats @@ -0,0 +1,156 @@ +load ../vendor/k8s +load ../lib/osd +load ../node_modules/bats-assert/load +load ../node_modules/bats-support/load + +ASSETS=testcases/assets + +@test "Verify user can create a pvc with authentication" { + local pvcname="pvc-auth" + local user="user$$" + local kubeconfig="${BATS_TMPDIR}/${user}-kubeconfig.conf" + + DETIK_CLIENT_NAME="kubectl -n ${user}" + run osd::createUserKubeconfig "${user}" "$BATS_TMPDIR" + assert_success + + storageclasses="intree-auth csi-auth" + for sc in $storageclasses ; do + sed -e "s#%%PVCNAME%%#${pvcname}#" \ + -e "s#%%STORAGECLASS%%#${sc}#" \ + ${ASSETS}/pvc.yml.tmpl | kubectl --kubeconfig=${kubeconfig} create -f - + + # assert it is there + run try "at most 120 times every 1s to get pvc named '^${pvcname}' and verify that 'status' is 'bound'" + assert_success + + # assert that the owner is the tenant. The 'sub' for the kubernetes + # token is: support@mycompany.com, so this *must* be the owner of the volume. + # Since we have created only one, there must be exactly only 1 volume owned + # by this account + local owner="support@mycompany.com" + nvols=$(curl -s -X POST \ + "http://$(osd::getSdkRestGWEndpoint)/v1/volumes/inspectwithfilters" \ + -H "accept: application/json" \ + -H "Content-Type: application/json" \ + -H "Authorization: bearer $K8S_TOKEN" \ + -d "{\"labels\":{\"namespace\":\"${user}\",\"pvc\":\"${pvcname}\"},\"ownership\":{\"owner\":\"${owner}\"}}" | jq '.volumes | length') + [[ $nvols -eq 1 ]] + + # cleanup + run osd::kubeDeleteObjectAndWait 120 "--kubeconfig=${kubeconfig}" "pvc" "${pvcname}" + assert_success + + # Check that the volume is no longer there + nvols=$(curl -s -X POST \ + "http://$(osd::getSdkRestGWEndpoint)/v1/volumes/inspectwithfilters" \ + -H "accept: application/json" \ + -H "Content-Type: application/json" \ + -H "Authorization: bearer $K8S_TOKEN" \ + -d "{\"labels\":{\"namespace\":\"${user}\",\"pvc\":\"${pvcname}\"},\"ownership\":{\"owner\":\"${owner}\"}}" | jq '.volumes | length') + [[ $nvols -eq 0 ]] + done +} + +@test "Verify multitenancy by having user create volume with their token" { + local pvcname="pvc-auth" + local user="tenant-1-$$" + local kubeconfig="${BATS_TMPDIR}/${user}-kubeconfig.conf" + + DETIK_CLIENT_NAME="kubectl -n ${user}" + run osd::createUserKubeconfig "${user}" "$BATS_TMPDIR" + assert_success + + # Insert token as admin + run kubectl -n ${user} create secret \ + generic k8s-user --from-literal=auth-token=${TENANT1_TOKEN} + assert_success + + storageclasses="intree-multitenant csi-multitenant" + for sc in $storageclasses ; do + osd::by "deploying using storageclass ${sc}" + + sed -e "s#%%PVCNAME%%#${pvcname}#" \ + -e "s#%%STORAGECLASS%%#${sc}#" \ + ${ASSETS}/pvc.yml.tmpl | kubectl --kubeconfig=${kubeconfig} create -f - + + # assert it is there + run try "at most 120 times every 1s to get pvc named '^${pvcname}' and verify that 'status' is 'bound'" + assert_success + + # assert that the owner is the tenant. The 'sub' for the tenant 1 + # token is: support@tenant-one.com, so this *must* be the owner of the volume. + # Since we have created only one, there must be exactly only 1 volume owned + # by this account + local owner="support@tenant-one.com" + nvols=$(curl -s -X POST \ + "http://$(osd::getSdkRestGWEndpoint)/v1/volumes/inspectwithfilters" \ + -H "accept: application/json" \ + -H "Content-Type: application/json" \ + -H "Authorization: bearer $TENANT1_TOKEN" \ + -d "{\"labels\":{\"namespace\":\"${user}\",\"pvc\":\"${pvcname}\"},\"ownership\":{\"owner\":\"${owner}\"}}" | jq '.volumes | length') + echo "Value $nvols" + [[ $nvols -eq 1 ]] + + # cleanup + run osd::kubeDeleteObjectAndWait 120 "--kubeconfig=${kubeconfig}" "pvc" "${pvcname}" + assert_success + + # Check that the volume is no longer there + nvols=$(curl -s -X POST \ + "http://$(osd::getSdkRestGWEndpoint)/v1/volumes/inspectwithfilters" \ + -H "accept: application/json" \ + -H "Content-Type: application/json" \ + -H "Authorization: bearer $TENANT1_TOKEN" \ + -d "{\"labels\":{\"namespace\":\"${user}\",\"pvc\":\"${pvcname}\"},\"ownership\":{\"owner\":\"${owner}\"}}" | jq '.volumes | length') + echo "Value $nvols" + [[ $nvols -eq 0 ]] + + done +} + +@test "Verify pvc can be mounted securely by deploying an application" { + local pvcname="pvc-auth" + local user="tenant-1-$$" + local kubeconfig="${BATS_TMPDIR}/${user}-kubeconfig.conf" + + run osd::createUserKubeconfig "${user}" "$BATS_TMPDIR" + assert_success + + # Insert token as admin + run kubectl -n ${user} create secret \ + generic k8s-user --from-literal=auth-token=${TENANT1_TOKEN} + assert_success + + for drivertype in "intree" "csi" ; do + for sc in "noauth" "auth" "multitenant" ; do + osd::by "testing with ${drivertype}-${sc} on namespace ${user}" + + run mountAttach ${drivertype}-${sc} ${kubeconfig} ${user} + assert_success + done + done +} + +function mountAttach() { + local sc="$1" + local kubeconfig="$2" + local namespace="$3" + osd::echo "mountAttach sc=${sc} kubeconfig=${kubeconfig} ns=${namespace}" + + DETIK_CLIENT_NAME="kubectl -n ${namespace}" + sed -e "s#%%STORAGECLASS%%#${sc}#" \ + ${ASSETS}/nginx-ss.yml.tmpl | kubectl --kubeconfig=${kubeconfig} apply -f - + + run try "at most 120 times every 1s to get pvc named 'www-web-0' and verify that 'status' is 'bound'" + assert_success + + run try "at most 120 times every 1s to get pod named 'web-0' and verify that 'status' is 'running'" + assert_success + + sed -e "s#%%STORAGECLASS%%#${sc}#" \ + ${ASSETS}/nginx-ss.yml.tmpl | kubectl --kubeconfig=${kubeconfig} delete -f - + + run kubectl --kubeconfig=${kubeconfig} delete pvc --all + assert_success +} \ No newline at end of file diff --git a/test/vendor/README.md b/test/vendor/README.md new file mode 100644 index 000000000..466fb0144 --- /dev/null +++ b/test/vendor/README.md @@ -0,0 +1,2 @@ +DO NOT EDIT THESE FILES + diff --git a/test/vendor/detik/README.md b/test/vendor/detik/README.md new file mode 100644 index 000000000..356f15863 --- /dev/null +++ b/test/vendor/detik/README.md @@ -0,0 +1 @@ +See https://github.com/bats-core/bats-detik#setup diff --git a/test/vendor/detik/detik.bash b/test/vendor/detik/detik.bash new file mode 100755 index 000000000..018a3299d --- /dev/null +++ b/test/vendor/detik/detik.bash @@ -0,0 +1,265 @@ +#!/bin/bash + +directory=$(dirname "${BASH_SOURCE[0]}") +source "$directory/utils.bash" + + +# Retrieves values and attempts to compare values to an expected result (with retries). +# @param {string} A text query that respect the appropriate syntax +# @return +# 1 Empty query +# 2 Invalid syntax +# 3 The assertion could not be verified after all the attempts +# 0 Everything is fine +try() { + + # Concatenate all the arguments into a single string + IFS=' ' + exp="$*" + + # Trim the expression + exp=$(trim "$exp") + + # Make the regular expression case-insensitive + shopt -s nocasematch; + + # Verify the expression and use it to build a request + if [[ "$exp" == "" ]]; then + echo "An empty expression was not expected." + return 1 + fi + + # Let's verify the syntax + times="" + delay="" + resource="" + name="" + property="" + expected_value="" + expected_count="" + + if [[ "$exp" =~ $try_regex_verify ]]; then + + # Extract parameters + times="${BASH_REMATCH[1]}" + delay="${BASH_REMATCH[2]}" + resource=$(to_lower_case "${BASH_REMATCH[3]}") + name="${BASH_REMATCH[4]}" + property="${BASH_REMATCH[5]}" + expected_value=$(to_lower_case "${BASH_REMATCH[6]}") + + elif [[ "$exp" =~ $try_regex_find ]]; then + + # Extract parameters + times="${BASH_REMATCH[1]}" + delay="${BASH_REMATCH[2]}" + expected_count="${BASH_REMATCH[3]}" + resource=$(to_lower_case "${BASH_REMATCH[4]}") + name="${BASH_REMATCH[5]}" + property="${BASH_REMATCH[6]}" + expected_value=$(to_lower_case "${BASH_REMATCH[7]}") + fi + + # Do we have something? + if [[ "$times" != "" ]]; then + + # Prevent line breaks from being removed in command results + IFS="" + + # Start the loop + echo "Valid expression. Verification in progress..." + code=0 + for ((i=1; i<=$times; i++)); do + + # Verify the value + verify_value $property $expected_value $resource $name "$expected_count" + code=$? + + # Break the loop prematurely? + if [[ "$code" == "0" ]]; then + break + elif [[ "$i" != "1" ]]; then + code=3 + sleep $delay + else + code=3 + fi + done + + ## Error code + return $code + fi + + # Default behavior + echo "Invalid expression: it does not respect the expected syntax." + return 2 +} + + +# Retrieves values and attempts to compare values to an expected result (without any retry). +# @param {string} A text query that respect one of the supported syntaxes +# @return +# 1 Empty query +# 2 Invalid syntax +# 3 The elements count is incorrect +# 0 Everything is fine +verify() { + + # Concatenate all the arguments into a single string + IFS=' ' + exp="$*" + + # Trim the expression + exp=$(trim "$exp") + + # Make the regular expression case-insensitive + shopt -s nocasematch; + + # Verify the expression and use it to build a request + if [[ "$exp" == "" ]]; then + echo "An empty expression was not expected." + return 1 + + elif [[ "$exp" =~ $verify_regex_count_is ]] || [[ "$exp" =~ $verify_regex_count_are ]]; then + card="${BASH_REMATCH[1]}" + resource=$(to_lower_case "${BASH_REMATCH[2]}") + name="${BASH_REMATCH[3]}" + + echo "Valid expression. Verification in progress..." + query=$(build_k8s_request "") + result=$(eval $DETIK_CLIENT_NAME get $resource $query | grep $name | tail -n +1 | wc -l | tr -d '[:space:]') + + detik_debug "----DETIK-----" + detik_debug "$BATS_TEST_FILENAME" + detik_debug "$BATS_TEST_DESCRIPTION" + detik_debug "" + detik_debug "Client query:" + detik_debug "$DETIK_CLIENT_NAME get $resource $query" + detik_debug "" + detik_debug "Result:" + detik_debug "$result" + detik_debug "----DETIK-----" + + if [[ "$result" == "$card" ]]; then + echo "Found $result $resource named $name (as expected)." + else + echo "Found $result $resource named $name (instead of $card expected)." + return 3 + fi + + elif [[ "$exp" =~ $verify_regex_property_is ]]; then + property="${BASH_REMATCH[1]}" + expected_value="${BASH_REMATCH[2]}" + resource=$(to_lower_case "${BASH_REMATCH[3]}") + name="${BASH_REMATCH[4]}" + + echo "Valid expression. Verification in progress..." + verify_value $property $expected_value $resource $name + + if [[ "$?" != "0" ]]; then + return 3 + fi + + else + echo "Invalid expression: it does not respect the expected syntax." + return 2 + fi +} + + +# Verifies the value of a column for a set of elements. +# @param {string} A K8s column or one of the supported aliases. +# @param {string} The expected value. +# @param {string} The resouce type (e.g. pod). +# @param {string} The resource name or regex. +# @param {integer} a.k.a. "expected_count": the expected number of elements having this property (optional) +# @return +# If "expected_count" was NOT set: the number of elements with the wrong value. +# If "expected_count" was set: 101 if the elements count is not right, 0 otherwise. +verify_value() { + + # Make the parameters readable + property="$1" + expected_value=$(to_lower_case "$2") + resource="$3" + name="$4" + expected_count="$5" + + # List the items and remove the first line (the one that contains the column names) + query=$(build_k8s_request $property) + result=$(eval $DETIK_CLIENT_NAME get $resource $query | grep $name | tail -n +1) + + # Debug? + detik_debug "----DETIK-----" + detik_debug "$BATS_TEST_FILENAME" + detik_debug "$BATS_TEST_DESCRIPTION" + detik_debug "" + detik_debug "Client query:" + detik_debug "$DETIK_CLIENT_NAME get $resource $query" + detik_debug "" + detik_debug "Result:" + detik_debug "$result" + if [[ "$expected_count" != "" ]]; then + detik_debug "" + detik_debug "Expected count: $expected_count" + fi + detik_debug "----DETIK-----" + + # Is the result empty? + empty=0 + if [[ "$result" == "" ]]; then + echo "No resource of type '$resource' was found with the name '$name'." + fi + + # Verify the result + IFS=$'\n' + invalid=0 + valid=0 + for line in $result; do + + # Keep the second column (property to verify) + # and put it in lower case + value=$(to_lower_case "$line" | awk '{ print $2 }') + element=$(echo "$line" | awk '{ print $1 }') + if [[ "$value" != "$expected_value" ]]; then + echo "Current value for $element is $value..." + invalid=$((invalid + 1)) + else + echo "$element has the right value ($value)." + valid=$((valid + 1)) + fi + done + + # Do we have the right number of elements? + if [[ "$expected_count" != "" ]]; then + if [[ "$valid" != "$expected_count" ]]; then + echo "Expected $expected_count $resource named $name to have this value ($expected_value). Found $valid." + invalid=101 + else + invalid=0 + fi + fi + + return $invalid +} + + +# Builds the request for the get operation of the K8s client. +# @param {string} A K8s column or one of the supported aliases. +# @return 0 +build_k8s_request() { + + req="-o custom-columns=NAME:.metadata.name" + if [[ "$1" == "status" ]]; then + req="$req,PROP:.status.phase" + elif [[ "$1" == "port" ]]; then + req="$req,PROP:.spec.ports[*].port" + elif [[ "$1" == "targetPort" ]]; then + req="$req,PROP:.spec.ports[*].targetPort" + elif [[ "$1" != "" ]]; then + req="$req,PROP:$1" + fi + + echo $req +} + diff --git a/test/vendor/detik/linter.bash b/test/vendor/detik/linter.bash new file mode 100755 index 000000000..ac163eb69 --- /dev/null +++ b/test/vendor/detik/linter.bash @@ -0,0 +1,244 @@ +#!/bin/bash + +directory=$(dirname "${BASH_SOURCE[0]}") +source "$directory/utils.bash" + + +# Constants +lint_try_regex="^(run[[:space:]]+)?[[:space:]]*try[[:space:]]+(.*)$" +lint_verify_regex="^(run[[:space:]]+)?[[:space:]]*verify[[:space:]]+(.*)$" + +# Global variables +errors_count=0 +verified_entries_count=0 + + +# Verifies the syntax of DETIK queries. +# @param {string} A file path +# @return +# Any integer above 0: the number of found errors +# 0 Everything is fine +lint() { + + # Verify the file exists + if [ ! -f "$1" ]; then + handle_error "'$1' does not exist or is not a regular file." + return 1 + fi + + # Make the regular expression case-insensitive + shopt -s nocasematch; + + current_line="" + current_line_number=0 + user_line_number=0 + multi_line=1 + was_multi_line=1 + while IFS='' read -r line || [[ -n "$line" ]]; do + + # Increase the line number + current_line_number=$((current_line_number + 1)) + + # Debug + detik_debug "Read line $current_line_number: $line" + + # Skip empty lines and comments + if [[ ! -n "$line" ]] || [[ "$line" =~ ^[[:space:]]*#.* ]]; then + if [[ "$multi_line" == "0" ]]; then + handle_error "Incomplete multi-line statement at $current_line_number." + $current_line="" + fi + continue + fi + + # Is this line a part of a multi-line statement? + was_multi_line="$multi_line" + [[ "$line" =~ ^.*\\$ ]] + multi_line="$?" + + # Do we need to update the user line number? + if [[ "$was_multi_line" != "0" ]]; then + user_line_number="$current_line_number" + fi + + # Is this the continuation of a previous line? + if [[ "$multi_line" == "0" ]]; then + current_line="$current_line ${line::-1}" + elif [[ "$was_multi_line" == "0" ]]; then + current_line="$current_line $line" + else + current_line="$line" + fi + + # When we have a complete line... + if [[ "$multi_line" != "0" ]]; then + check_line "$current_line" "$user_line_number" + current_line="" + fi + line="" + done < "$1" + + # Output + if [[ "$verified_entries_count" == "1" ]]; then + echo "1 DETIK query was verified." + else + echo "$verified_entries_count DETIK queries were verified." + fi + + if [[ "$errors_count" == "1" ]]; then + echo "1 DETIK query was found to be invalid or malformed." + else + echo "$errors_count DETIK queries were found to be invalid or malformed." + fi + + # Prepare the result + res="$errors_count" + + # Reset global variables + errors_count=0 + verified_entries_count=0 + + return "$res" +} + + +# Verifies the correctness of a read line. +# @param {string} The line to verify +# @param {integer} The line number +# @return 0 +check_line() { + + # Make the regular expression case-insensitive + shopt -s nocasematch; + + # Get parameters and prepare the line + line="$1" + line_number="$2" + context="Current line: $line" + + line=$(echo "$line" | sed -e 's/"[[:space:]]*"//g') + line=$(trim "$line") + context="$context\nPurged line: $line" + + # Basic case: "run try", "run verify", "try", "verify" alone + if [[ "$line" =~ ^(run[[:space:]]+)?try$ ]] || [[ "$line" =~ ^(run[[:space:]]+)?verify$ ]]; then + verified_entries_count=$((verified_entries_count + 1)) + handle_error "Empty statement at line $line_number." "$context" + + # We have "try" or "run try" followed by something + elif [[ "$line" =~ $lint_try_regex ]]; then + verified_entries_count=$((verified_entries_count + 1)) + + part=$(clean_regex_part "${BASH_REMATCH[2]}") + context="$context\nRegex part: $part" + + verify_against_pattern "$part" "$try_regex_verify" + p_verify="$?" + + verify_against_pattern "$part" "$try_regex_find" + p_find="$?" + + # detik_debug "p_verify=$p_verify, p_find=$p_find, part=$part" + if [[ "$p_verify" != "0" ]] && [[ "$p_find" != "0" ]]; then + handle_error "Invalid TRY statement at line $line_number." "$context" + fi + + # We have "verify" or "run verify" followed by something + elif [[ "$line" =~ $lint_verify_regex ]]; then + verified_entries_count=$((verified_entries_count + 1)) + + part=$(clean_regex_part "${BASH_REMATCH[2]}") + context="$context\nRegex part: $part" + + verify_against_pattern "$part" "$verify_regex_count_is" + p_is="$?" + + verify_against_pattern "$part" "$verify_regex_count_are" + p_are="$?" + + verify_against_pattern "$part" "$verify_regex_property_is" + p_prop="$?" + + # detik_debug "p_is=$p_is, p_are=$p_are, p_prop=$p_prop, part=$part" + if [[ "$p_is" != "0" ]] && [[ "$p_are" != "0" ]] && [[ "$p_prop" != "0" ]] ; then + handle_error "Invalid VERIFY statement at line $line_number." "$context" + fi + fi +} + + +# Cleans a string before being checked by a regexp. +# @param {string} The string to clean +# @return 0 +clean_regex_part() { + + part=$(trim "$1") + part=$(remove_surrounding_quotes "$part") + part=$(trim "$part") + echo "$part" +} + + +# Removes surrounding quotes. +# @param {string} The string to clean +# @return 0 +remove_surrounding_quotes() { + + # Starting and ending with a quote? Remove them. + if [[ "$1" =~ ^\"(.*)\"$ ]]; then + echo "${BASH_REMATCH[1]}" + + # Otherwise, ignore it + else + echo "$1" + fi + + return 0 +} + + +# Verifies an assertion part against a regular expression. +# Given that assertions can skip double quotes around the whole +# assertion, we try the given regular expression and an altered one. +# +# Example: run try at most 5 times ... "'nginx'" ... +# +# Here, "'nginx'" is not part of the default regular expression. +# So, we update it to allow this kind of assertions. +# +# @param {string} The line to verify +# @param {string} The pattern +# @return +# 0 if everyhing went fine +# not-zero in case of error +verify_against_pattern() { + + # Make the regular expression case-insensitive + shopt -s nocasematch; + + line="$1" + pattern="$2" + code=0 + if ! [[ "$line" =~ $pattern ]]; then + line=${line//\"\'/\'} + line=${line//\'\"/\'} + [[ "$line" =~ $pattern ]] + code="$?" + fi + + return "$code" +} + + +# Handles an error by printing it and updating the error count. +# @param {string} The error message +# @param2 {string} The error context +# @return 0 +handle_error() { + + detik_debug "$2" + detik_debug "Error: $1" + + echo "$1" + errors_count=$((errors_count + 1)) +} diff --git a/test/vendor/detik/utils.bash b/test/vendor/detik/utils.bash new file mode 100755 index 000000000..7740215ec --- /dev/null +++ b/test/vendor/detik/utils.bash @@ -0,0 +1,75 @@ +#!/bin/bash + + +# The regex for the "try" key word +try_regex_verify="^at +most +([0-9]+) +times +every +([0-9]+)s +to +get +([a-z]+) +named +'([^']+)' +and +verify +that +'([^']+)' +is +'([^']+)'$" +try_regex_find="^at +most +([0-9]+) +times +every +([0-9]+)s +to +find +([0-9]+) +([a-z]+) +named +'([^']+)' +with +'([^']+)' +being +'([^']+)'$" + +# The regex for the "verify" key word +verify_regex_count_is="^there +is +(0|1) +([a-z]+) +named +'([^']+)'$" +verify_regex_count_are="^there +are +([0-9]+) +([a-z]+) +named +'([^']+)'$" +verify_regex_property_is="^'([^']+)' +is +'([^']+)' +for +([a-z]+) +named +'([^']+)'$" + + + +# Prints a string in lower case. +# @param {string} The string. +# @return 0 +to_lower_case() { + echo "$1" | tr '[:upper:]' '[:lower:]' +} + + +# Trims a text. +# @param {string} The string. +# @return 0 +trim() { + echo $1 | sed -e 's/^[[:space:]]*([^[[:space:]]].*[^[[:space:]]])[[:space:]]*$/$1/' +} + + +# Trims ANSI codes (used to format strings in consoles). +# @param {string} The string. +# @return 0 +trim_ansi_codes() { + echo $1 | sed -e 's/[[:cntrl:]]\[[0-9;]*[a-zA-Z]//g' +} + + +# Adds a debug message for a given test. +# @param {string} The debug message. +# @return 0 +debug() { + debug_filename=$(basename -- $BATS_TEST_FILENAME) + mkdir -p /tmp/detik + echo -e "$1" >> "/tmp/detik/$debug_filename.debug" +} + + +# Deletes the file that contains debug messages for a given test. +# @return 0 +reset_debug() { + debug_filename=$(basename -- $BATS_TEST_FILENAME) + rm -f "/tmp/detik/$debug_filename.debug" +} + + +# Adds a debug message for a given test about DETIK. +# @param {string} The debug message. +# @return 0 +detik_debug() { + + if [[ "$DEBUG_DETIK" == "true" ]]; then + debug "$1" + fi +} + + +# Deletes the file that contains debug messages for a given test about DETIK. +# @return 0 +reset_detik_debug() { + + if [[ "$DEBUG_DETIK" == "true" ]]; then + reset_debug + fi +} diff --git a/test/vendor/k8s.bash b/test/vendor/k8s.bash new file mode 100644 index 000000000..01f94e0d1 --- /dev/null +++ b/test/vendor/k8s.bash @@ -0,0 +1,3 @@ +source "$(dirname "${BASH_SOURCE[0]}")/detik/utils.bash" +source "$(dirname "${BASH_SOURCE[0]}")/detik/detik.bash" +source "$(dirname "${BASH_SOURCE[0]}")/detik/linter.bash" diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE new file mode 100644 index 000000000..37ec93a14 --- /dev/null +++ b/vendor/github.com/golang/groupcache/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go new file mode 100644 index 000000000..eac1c7664 --- /dev/null +++ b/vendor/github.com/golang/groupcache/lru/lru.go @@ -0,0 +1,133 @@ +/* +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package lru implements an LRU cache. +package lru + +import "container/list" + +// Cache is an LRU cache. It is not safe for concurrent access. +type Cache struct { + // MaxEntries is the maximum number of cache entries before + // an item is evicted. Zero means no limit. + MaxEntries int + + // OnEvicted optionally specifies a callback function to be + // executed when an entry is purged from the cache. + OnEvicted func(key Key, value interface{}) + + ll *list.List + cache map[interface{}]*list.Element +} + +// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators +type Key interface{} + +type entry struct { + key Key + value interface{} +} + +// New creates a new Cache. +// If maxEntries is zero, the cache has no limit and it's assumed +// that eviction is done by the caller. +func New(maxEntries int) *Cache { + return &Cache{ + MaxEntries: maxEntries, + ll: list.New(), + cache: make(map[interface{}]*list.Element), + } +} + +// Add adds a value to the cache. +func (c *Cache) Add(key Key, value interface{}) { + if c.cache == nil { + c.cache = make(map[interface{}]*list.Element) + c.ll = list.New() + } + if ee, ok := c.cache[key]; ok { + c.ll.MoveToFront(ee) + ee.Value.(*entry).value = value + return + } + ele := c.ll.PushFront(&entry{key, value}) + c.cache[key] = ele + if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { + c.RemoveOldest() + } +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key Key) (value interface{}, ok bool) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.ll.MoveToFront(ele) + return ele.Value.(*entry).value, true + } + return +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key Key) { + if c.cache == nil { + return + } + if ele, hit := c.cache[key]; hit { + c.removeElement(ele) + } +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + if c.cache == nil { + return + } + ele := c.ll.Back() + if ele != nil { + c.removeElement(ele) + } +} + +func (c *Cache) removeElement(e *list.Element) { + c.ll.Remove(e) + kv := e.Value.(*entry) + delete(c.cache, kv.key) + if c.OnEvicted != nil { + c.OnEvicted(kv.key, kv.value) + } +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + if c.cache == nil { + return 0 + } + return c.ll.Len() +} + +// Clear purges all stored items from the cache. +func (c *Cache) Clear() { + if c.OnEvicted != nil { + for _, e := range c.cache { + kv := e.Value.(*entry) + c.OnEvicted(kv.key, kv.value) + } + } + c.ll = nil + c.cache = nil +} diff --git a/vendor/github.com/portworx/sched-ops/k8s/core/core.go b/vendor/github.com/portworx/sched-ops/k8s/core/core.go index 0a7e8bb2a..f9364d985 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/core/core.go +++ b/vendor/github.com/portworx/sched-ops/k8s/core/core.go @@ -17,6 +17,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/record" ) const ( @@ -35,7 +36,9 @@ var ( // Ops is an interface to perform kubernetes related operations on the core resources. type Ops interface { ConfigMapOps + EndpointsOps EventOps + RecorderOps NamespaceOps NodeOps PersistentVolumeClaimOps @@ -101,6 +104,10 @@ func NewInstanceFromConfigFile(config string) (Ops, error) { type Client struct { config *rest.Config kubernetes kubernetes.Interface + // eventRecorders is a map of component to event recorders + eventRecorders map[string]record.EventRecorder + eventRecordersLock sync.Mutex + eventBroadcaster record.EventBroadcaster } // SetConfig sets the config and resets the client. @@ -118,6 +125,7 @@ func (c *Client) GetVersion() (*version.Info, error) { return c.kubernetes.Discovery().ServerVersion() } +// ResourceExists checks if resource already exists func (c *Client) ResourceExists(gvk schema.GroupVersionKind) (bool, error) { if err := c.initClient(); err != nil { return false, err @@ -162,7 +170,6 @@ func (c *Client) setClient() error { } } - return err } @@ -228,6 +235,8 @@ func (c *Client) handleWatch( err = c.WatchConfigMap(cm, fn) } else if _, ok := object.(*corev1.Pod); ok { err = c.WatchPods(namespace, fn, listOptions) + } else if sc, ok := object.(*corev1.Secret); ok { + err = c.WatchSecret(sc, fn) } else { return "", false, fmt.Errorf("unsupported object: %v given to handle watch", object) } diff --git a/vendor/github.com/portworx/sched-ops/k8s/core/endpoints.go b/vendor/github.com/portworx/sched-ops/k8s/core/endpoints.go new file mode 100644 index 000000000..79f76a568 --- /dev/null +++ b/vendor/github.com/portworx/sched-ops/k8s/core/endpoints.go @@ -0,0 +1,51 @@ +package core + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// EndpointsOps is an interface to deal with kubernetes endpoints. +type EndpointsOps interface { + // CreateEndpoints creates a given endpoints. + CreateEndpoints(endpoints *corev1.Endpoints) (*corev1.Endpoints, error) + // GetEndpoints retrieves endpoints for a given namespace/name. + GetEndpoints(name, namespace string) (*corev1.Endpoints, error) + // PatchEndpoints applies a patch for a given endpoints. + PatchEndpoints(name, namespace string, pt types.PatchType, jsonPatch []byte) (*corev1.Endpoints, error) + // DeleteEndpoints removes endpoints for a given namespace/name. + DeleteEndpoints(name, namespace string) error +} + +// CreateEndpoints creates a given endpoints. +func (c *Client) CreateEndpoints(endpoints *corev1.Endpoints) (*corev1.Endpoints, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kubernetes.CoreV1().Endpoints(endpoints.Namespace).Create(endpoints) +} + +// GetEndpoints retrieves endpoints for a given namespace/name. +func (c *Client) GetEndpoints(name, ns string) (*corev1.Endpoints, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kubernetes.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{}) +} + +// PatchEndpoints applies a patch for a given endpoints. +func (c *Client) PatchEndpoints(name, ns string, pt types.PatchType, jsonPatch []byte) (*corev1.Endpoints, error) { + if err := c.initClient(); err != nil { + return nil, err + } + return c.kubernetes.CoreV1().Endpoints(ns).Patch(name, pt, jsonPatch) +} + +// DeleteEndpoints retrieves endpoints for a given namespace/name. +func (c *Client) DeleteEndpoints(name, ns string) error { + if err := c.initClient(); err != nil { + return err + } + return c.kubernetes.CoreV1().Endpoints(ns).Delete(name, nil) +} diff --git a/vendor/github.com/portworx/sched-ops/k8s/core/events.go b/vendor/github.com/portworx/sched-ops/k8s/core/events.go index 6565cfe05..63a8a9b50 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/core/events.go +++ b/vendor/github.com/portworx/sched-ops/k8s/core/events.go @@ -2,7 +2,12 @@ package core import ( corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" ) // EventOps is an interface to put and get k8s events @@ -28,3 +33,37 @@ func (c *Client) ListEvents(namespace string, opts metav1.ListOptions) (*corev1. } return c.kubernetes.CoreV1().Events(namespace).List(opts) } + +// RecorderOps is an interface to record k8s events +type RecorderOps interface { + // RecordEvent records an event into k8s using client-go's EventRecorder inteface + // It takes the event source and the object on which the event is being raised. + RecordEvent(source v1.EventSource, object runtime.Object, eventtype, reason, message string) +} + +func (c *Client) RecordEvent(source v1.EventSource, object runtime.Object, eventtype, reason, message string) { + if err := c.initClient(); err != nil { + return + } + c.eventRecordersLock.Lock() + if len(c.eventRecorders) == 0 { + c.eventRecorders = make(map[string]record.EventRecorder) + c.eventBroadcaster = record.NewBroadcaster() + c.eventBroadcaster.StartRecordingToSink( + &typedcorev1.EventSinkImpl{ + Interface: c.kubernetes.CoreV1().Events(""), // use the namespace from the object + }, + ) + } + key := source.Component + "-" + source.Host + eventRecorder, exists := c.eventRecorders[key] + if !exists { + eventRecorder = c.eventBroadcaster.NewRecorder( + scheme.Scheme, + source, + ) + c.eventRecorders[key] = eventRecorder + } + c.eventRecordersLock.Unlock() + eventRecorder.Event(object, eventtype, reason, message) +} diff --git a/vendor/github.com/portworx/sched-ops/k8s/core/persistentvolumeclaims.go b/vendor/github.com/portworx/sched-ops/k8s/core/persistentvolumeclaims.go index 1aceaf6a6..bca972c52 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/core/persistentvolumeclaims.go +++ b/vendor/github.com/portworx/sched-ops/k8s/core/persistentvolumeclaims.go @@ -46,6 +46,8 @@ type PersistentVolumeClaimOps interface { GetPVCsUsingStorageClass(scName string) ([]corev1.PersistentVolumeClaim, error) // GetStorageProvisionerForPVC returns storage provisioner for given PVC if it exists GetStorageProvisionerForPVC(pvc *corev1.PersistentVolumeClaim) (string, error) + // GetStorageClassForPVC returns the appropriate storage class object for a certain pvc + GetStorageClassForPVC(pvc *corev1.PersistentVolumeClaim) (*storagev1.StorageClass, error) } // CreatePersistentVolumeClaim creates the given persistent volume claim @@ -259,7 +261,7 @@ func (c *Client) GetPersistentVolumeClaimParams(pvc *corev1.PersistentVolumeClai requestGB := uint64(roundUpSize(capacity.Value(), 1024*1024*1024)) params["size"] = fmt.Sprintf("%dG", requestGB) - sc, err := c.getStorageClassForPVC(result) + sc, err := c.GetStorageClassForPVC(result) if err != nil { return nil, fmt.Errorf("failed to get storage class for pvc: %v", result.Name) } @@ -284,7 +286,7 @@ func (c *Client) GetPVCsUsingStorageClass(scName string) ([]corev1.PersistentVol } for _, pvc := range pvcs.Items { - sc, err := c.getStorageClassForPVC(&pvc) + sc, err := c.GetStorageClassForPVC(&pvc) if err == nil && sc.Name == scName { retList = append(retList, pvc) } @@ -301,7 +303,7 @@ func (c *Client) GetStorageProvisionerForPVC(pvc *corev1.PersistentVolumeClaim) return provisionerName, nil } - sc, err := c.getStorageClassForPVC(pvc) + sc, err := c.GetStorageClassForPVC(pvc) if err != nil { return "", err } @@ -321,6 +323,7 @@ func (c *Client) isPVCShared(pvc *corev1.PersistentVolumeClaim) bool { return false } -func (c *Client) getStorageClassForPVC(pvc *corev1.PersistentVolumeClaim) (*storagev1.StorageClass, error) { +// GetStorageClassForPVC returns the appropriate storage class object for a certain pvc +func (c *Client) GetStorageClassForPVC(pvc *corev1.PersistentVolumeClaim) (*storagev1.StorageClass, error) { return common.GetStorageClassForPVC(c.kubernetes.StorageV1(), pvc) } diff --git a/vendor/github.com/portworx/sched-ops/k8s/core/pods.go b/vendor/github.com/portworx/sched-ops/k8s/core/pods.go index c9fcd5887..22a59a1c8 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/core/pods.go +++ b/vendor/github.com/portworx/sched-ops/k8s/core/pods.go @@ -27,11 +27,11 @@ type PodOps interface { // GetPodsByNode returns all pods in given namespace and given k8s node name. // If namespace is empty, it will return pods from all namespaces. GetPodsByNode(nodeName, namespace string) (*corev1.PodList, error) - // GetPodsByNodeByLabels returns all pods in given namespace and given k8s node name + // GetPodsByNodeAndLabels returns all pods in given namespace and given k8s node name // with a given label selector. // If namespace is empty, it will return pods from all namespaces. - GetPodsByNodeByLabels(nodeName, namespace string, labelSelector map[string]string) (*corev1.PodList, error) - // GetPodsByOwner returns pods for the given owner and namespaces + GetPodsByNodeAndLabels(nodeName, namespace string, labelSelector map[string]string) (*corev1.PodList, error) + // GetPodsByOwner returns pods for the given owner and namespace GetPodsByOwner(types.UID, string) ([]corev1.Pod, error) // GetPodsUsingPV returns all pods in cluster using given pv GetPodsUsingPV(pvName string) ([]corev1.Pod, error) @@ -134,9 +134,9 @@ func (c *Client) GetPodsByNode(nodeName, namespace string) (*corev1.PodList, err return c.getPodsWithListOptions(namespace, listOptions) } -// GetPodsByNodeByLabels returns all pods in given namespace and given k8s node name for the given labels +// GetPodsByNodeAndLabels returns all pods in given namespace and given k8s node name for the given labels // If namespace is empty, it will return pods from all namespaces -func (c *Client) GetPodsByNodeByLabels(nodeName, namespace string, labels map[string]string) (*corev1.PodList, error) { +func (c *Client) GetPodsByNodeAndLabels(nodeName, namespace string, labels map[string]string) (*corev1.PodList, error) { if len(nodeName) == 0 { return nil, fmt.Errorf("node name is required for this API") } diff --git a/vendor/github.com/portworx/sched-ops/k8s/core/secrets.go b/vendor/github.com/portworx/sched-ops/k8s/core/secrets.go index 4f5457232..4dcd7c8cc 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/core/secrets.go +++ b/vendor/github.com/portworx/sched-ops/k8s/core/secrets.go @@ -4,7 +4,9 @@ import ( "strings" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" ) // SecretOps is an interface to perform k8s Secret operations @@ -19,6 +21,8 @@ type SecretOps interface { UpdateSecretData(string, string, map[string][]byte) (*corev1.Secret, error) // DeleteSecret deletes the given secret DeleteSecret(name, namespace string) error + // WatchSecret changes and callback fn + WatchSecret(*corev1.Secret, WatchFunc) error } // GetSecret gets the secrets object given its name and namespace @@ -86,3 +90,23 @@ func (c *Client) DeleteSecret(name, namespace string) error { PropagationPolicy: &deleteForegroundPolicy, }) } + +func (c *Client) WatchSecret(secret *v1.Secret, fn WatchFunc) error { + if err := c.initClient(); err != nil { + return err + } + + listOptions := metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", secret.Name).String(), + Watch: true, + } + + watchInterface, err := c.kubernetes.CoreV1().Secrets(secret.Namespace).Watch(listOptions) + if err != nil { + return err + } + + // fire off watch function + go c.handleWatch(watchInterface, secret, "", fn, listOptions) + return nil +} diff --git a/vendor/github.com/portworx/sched-ops/k8s/core/serviceaccounts.go b/vendor/github.com/portworx/sched-ops/k8s/core/serviceaccounts.go index 6f3b62fb2..1910759b6 100644 --- a/vendor/github.com/portworx/sched-ops/k8s/core/serviceaccounts.go +++ b/vendor/github.com/portworx/sched-ops/k8s/core/serviceaccounts.go @@ -11,6 +11,8 @@ type ServiceAccountOps interface { CreateServiceAccount(account *corev1.ServiceAccount) (*corev1.ServiceAccount, error) // GetServiceAccount gets the given service account GetServiceAccount(name, namespace string) (*corev1.ServiceAccount, error) + // UpdateServiceAccount updates the given service account + UpdateServiceAccount(account *corev1.ServiceAccount) (*corev1.ServiceAccount, error) // DeleteServiceAccount deletes the given service account DeleteServiceAccount(accountName, namespace string) error } @@ -33,6 +35,15 @@ func (c *Client) GetServiceAccount(name, namespace string) (*corev1.ServiceAccou return c.kubernetes.CoreV1().ServiceAccounts(namespace).Get(name, metav1.GetOptions{}) } +// UpdaeServiceAccount updates the given service account +func (c *Client) UpdateServiceAccount(account *corev1.ServiceAccount) (*corev1.ServiceAccount, error) { + if err := c.initClient(); err != nil { + return nil, err + } + + return c.kubernetes.CoreV1().ServiceAccounts(account.Namespace).Update(account) +} + // DeleteServiceAccount deletes the given service account func (c *Client) DeleteServiceAccount(accountName, namespace string) error { if err := c.initClient(); err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 8987e74c6..e2c92c4c9 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -100,7 +100,27 @@ func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { } iter.ReportError("DecodeNumber", err.Error()) default: + // init depth, if needed + if iter.Attachment == nil { + iter.Attachment = int(1) + } + + // remember current depth + originalAttachment := iter.Attachment + + // increment depth before descending + if i, ok := iter.Attachment.(int); ok { + iter.Attachment = i + 1 + if i > 10000 { + iter.ReportError("parse", "exceeded max depth") + return + } + } + *(*interface{})(ptr) = iter.Read() + + // restore current depth + iter.Attachment = originalAttachment } } diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go index 10c8cb837..0e2e30175 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -19,6 +19,7 @@ package json import ( "bytes" "encoding/json" + "fmt" "io" ) @@ -34,6 +35,9 @@ func Marshal(v interface{}) ([]byte, error) { return json.Marshal(v) } +// limit recursive depth to prevent stack overflow errors +const maxDepth = 10000 + // Unmarshal unmarshals the given data // If v is a *map[string]interface{}, numbers are converted to int64 or float64 func Unmarshal(data []byte, v interface{}) error { @@ -48,7 +52,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertMapNumbers(*v) + return convertMapNumbers(*v, 0) case *[]interface{}: // Build a decoder from the given data @@ -60,7 +64,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertSliceNumbers(*v) + return convertSliceNumbers(*v, 0) default: return json.Unmarshal(data, v) @@ -69,16 +73,20 @@ func Unmarshal(data []byte, v interface{}) error { // convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertMapNumbers(m map[string]interface{}) error { +func convertMapNumbers(m map[string]interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + var err error for k, v := range m { switch v := v.(type) { case json.Number: m[k], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v) + err = convertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v) + err = convertSliceNumbers(v, depth+1) } if err != nil { return err @@ -89,16 +97,20 @@ func convertMapNumbers(m map[string]interface{}) error { // convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertSliceNumbers(s []interface{}) error { +func convertSliceNumbers(s []interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + var err error for i, v := range s { switch v := v.(type) { case json.Number: s[i], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v) + err = convertMapNumbers(v, depth+1) case []interface{}: - err = convertSliceNumbers(v) + err = convertSliceNumbers(v, depth+1) } if err != nil { return err diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS new file mode 100644 index 000000000..3f72c69ba --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- pwittrock +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go new file mode 100644 index 000000000..16501d5af --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go @@ -0,0 +1,102 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mergepatch + +import ( + "errors" + "fmt" + "reflect" +) + +var ( + ErrBadJSONDoc = errors.New("invalid JSON document") + ErrNoListOfLists = errors.New("lists of lists are not supported") + ErrBadPatchFormatForPrimitiveList = errors.New("invalid patch format of primitive list") + ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys") + ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list") + ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list") + ErrUnsupportedStrategicMergePatchFormat = errors.New("strategic merge patch format is not supported") +) + +func ErrNoMergeKey(m map[string]interface{}, k string) error { + return fmt.Errorf("map: %v does not contain declared merge key: %s", m, k) +} + +func ErrBadArgType(expected, actual interface{}) error { + return fmt.Errorf("expected a %s, but received a %s", + reflect.TypeOf(expected), + reflect.TypeOf(actual)) +} + +func ErrBadArgKind(expected, actual interface{}) error { + var expectedKindString, actualKindString string + if expected == nil { + expectedKindString = "nil" + } else { + expectedKindString = reflect.TypeOf(expected).Kind().String() + } + if actual == nil { + actualKindString = "nil" + } else { + actualKindString = reflect.TypeOf(actual).Kind().String() + } + return fmt.Errorf("expected a %s, but received a %s", expectedKindString, actualKindString) +} + +func ErrBadPatchType(t interface{}, m map[string]interface{}) error { + return fmt.Errorf("unknown patch type: %s in map: %v", t, m) +} + +// IsPreconditionFailed returns true if the provided error indicates +// a precondition failed. +func IsPreconditionFailed(err error) bool { + _, ok := err.(ErrPreconditionFailed) + return ok +} + +type ErrPreconditionFailed struct { + message string +} + +func NewErrPreconditionFailed(target map[string]interface{}) ErrPreconditionFailed { + s := fmt.Sprintf("precondition failed for: %v", target) + return ErrPreconditionFailed{s} +} + +func (err ErrPreconditionFailed) Error() string { + return err.message +} + +type ErrConflict struct { + message string +} + +func NewErrConflict(patch, current string) ErrConflict { + s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current) + return ErrConflict{s} +} + +func (err ErrConflict) Error() string { + return err.message +} + +// IsConflict returns true if the provided error indicates +// a conflict between the patch and the current configuration. +func IsConflict(err error) bool { + _, ok := err.(ErrConflict) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go new file mode 100644 index 000000000..990fa0d43 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -0,0 +1,133 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mergepatch + +import ( + "fmt" + "reflect" + + "github.com/davecgh/go-spew/spew" + "sigs.k8s.io/yaml" +) + +// PreconditionFunc asserts that an incompatible change is not present within a patch. +type PreconditionFunc func(interface{}) bool + +// RequireKeyUnchanged returns a precondition function that fails if the provided key +// is present in the patch (indicating that its value has changed). +func RequireKeyUnchanged(key string) PreconditionFunc { + return func(patch interface{}) bool { + patchMap, ok := patch.(map[string]interface{}) + if !ok { + return true + } + + // The presence of key means that its value has been changed, so the test fails. + _, ok = patchMap[key] + return !ok + } +} + +// RequireMetadataKeyUnchanged creates a precondition function that fails +// if the metadata.key is present in the patch (indicating its value +// has changed). +func RequireMetadataKeyUnchanged(key string) PreconditionFunc { + return func(patch interface{}) bool { + patchMap, ok := patch.(map[string]interface{}) + if !ok { + return true + } + patchMap1, ok := patchMap["metadata"] + if !ok { + return true + } + patchMap2, ok := patchMap1.(map[string]interface{}) + if !ok { + return true + } + _, ok = patchMap2[key] + return !ok + } +} + +func ToYAMLOrError(v interface{}) string { + y, err := toYAML(v) + if err != nil { + return err.Error() + } + + return y +} + +func toYAML(v interface{}) (string, error) { + y, err := yaml.Marshal(v) + if err != nil { + return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) + } + + return string(y), nil +} + +// HasConflicts returns true if the left and right JSON interface objects overlap with +// different values in any key. All keys are required to be strings. Since patches of the +// same Type have congruent keys, this is valid for multiple patch types. This method +// supports JSON merge patch semantics. +// +// NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. +// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). +func HasConflicts(left, right interface{}) (bool, error) { + switch typedLeft := left.(type) { + case map[string]interface{}: + switch typedRight := right.(type) { + case map[string]interface{}: + for key, leftValue := range typedLeft { + rightValue, ok := typedRight[key] + if !ok { + continue + } + if conflict, err := HasConflicts(leftValue, rightValue); err != nil || conflict { + return conflict, err + } + } + + return false, nil + default: + return true, nil + } + case []interface{}: + switch typedRight := right.(type) { + case []interface{}: + if len(typedLeft) != len(typedRight) { + return true, nil + } + + for i := range typedLeft { + if conflict, err := HasConflicts(typedLeft[i], typedRight[i]); err != nil || conflict { + return conflict, err + } + } + + return false, nil + default: + return true, nil + } + case string, float64, bool, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS new file mode 100644 index 000000000..cfee199fa --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- pwittrock +- mengqiy +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go new file mode 100644 index 000000000..ab66d0452 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" +) + +type LookupPatchMetaError struct { + Path string + Err error +} + +func (e LookupPatchMetaError) Error() string { + return fmt.Sprintf("LookupPatchMetaError(%s): %v", e.Path, e.Err) +} + +type FieldNotFoundError struct { + Path string + Field string +} + +func (e FieldNotFoundError) Error() string { + return fmt.Sprintf("unable to find api field %q in %s", e.Field, e.Path) +} + +type InvalidTypeError struct { + Path string + Expected string + Actual string +} + +func (e InvalidTypeError) Error() string { + return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go new file mode 100644 index 000000000..c31de15e7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -0,0 +1,194 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/util/mergepatch" + forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +type PatchMeta struct { + patchStrategies []string + patchMergeKey string +} + +func (pm PatchMeta) GetPatchStrategies() []string { + if pm.patchStrategies == nil { + return []string{} + } + return pm.patchStrategies +} + +func (pm PatchMeta) SetPatchStrategies(ps []string) { + pm.patchStrategies = ps +} + +func (pm PatchMeta) GetPatchMergeKey() string { + return pm.patchMergeKey +} + +func (pm PatchMeta) SetPatchMergeKey(pmk string) { + pm.patchMergeKey = pmk +} + +type LookupPatchMeta interface { + // LookupPatchMetadataForStruct gets subschema and the patch metadata (e.g. patch strategy and merge key) for map. + LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) + // LookupPatchMetadataForSlice get subschema and the patch metadata for slice. + LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) + // Get the type name of the field + Name() string +} + +type PatchMetaFromStruct struct { + T reflect.Type +} + +func NewPatchMetaFromStruct(dataStruct interface{}) (PatchMetaFromStruct, error) { + t, err := getTagStructType(dataStruct) + return PatchMetaFromStruct{T: t}, err +} + +var _ LookupPatchMeta = PatchMetaFromStruct{} + +func (s PatchMetaFromStruct) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadataForStruct(s.T, key) + if err != nil { + return nil, PatchMeta{}, err + } + + return PatchMetaFromStruct{T: fieldType}, + PatchMeta{ + patchStrategies: fieldPatchStrategies, + patchMergeKey: fieldPatchMergeKey, + }, nil +} + +func (s PatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + subschema, patchMeta, err := s.LookupPatchMetadataForStruct(key) + if err != nil { + return nil, PatchMeta{}, err + } + elemPatchMetaFromStruct := subschema.(PatchMetaFromStruct) + t := elemPatchMetaFromStruct.T + + var elemType reflect.Type + switch t.Kind() { + // If t is an array or a slice, get the element type. + // If element is still an array or a slice, return an error. + // Otherwise, return element type. + case reflect.Array, reflect.Slice: + elemType = t.Elem() + if elemType.Kind() == reflect.Array || elemType.Kind() == reflect.Slice { + return nil, PatchMeta{}, errors.New("unexpected slice of slice") + } + // If t is an pointer, get the underlying element. + // If the underlying element is neither an array nor a slice, the pointer is pointing to a slice, + // e.g. https://github.com/kubernetes/kubernetes/blob/bc22e206c79282487ea0bf5696d5ccec7e839a76/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go#L2782-L2822 + // If the underlying element is either an array or a slice, return its element type. + case reflect.Ptr: + t = t.Elem() + if t.Kind() == reflect.Array || t.Kind() == reflect.Slice { + t = t.Elem() + } + elemType = t + default: + return nil, PatchMeta{}, fmt.Errorf("expected slice or array type, but got: %s", s.T.Kind().String()) + } + + return PatchMetaFromStruct{T: elemType}, patchMeta, nil +} + +func (s PatchMetaFromStruct) Name() string { + return s.T.Kind().String() +} + +func getTagStructType(dataStruct interface{}) (reflect.Type, error) { + if dataStruct == nil { + return nil, mergepatch.ErrBadArgKind(struct{}{}, nil) + } + + t := reflect.TypeOf(dataStruct) + // Get the underlying type for pointers + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct) + } + + return t, nil +} + +func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { + t, err := getTagStructType(dataStruct) + if err != nil { + panic(err) + } + return t +} + +type PatchMetaFromOpenAPI struct { + Schema openapi.Schema +} + +func NewPatchMetaFromOpenAPI(s openapi.Schema) PatchMetaFromOpenAPI { + return PatchMetaFromOpenAPI{Schema: s} +} + +var _ LookupPatchMeta = PatchMetaFromOpenAPI{} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + kindItem := NewKindItem(key, s.Schema.GetPath()) + s.Schema.Accept(kindItem) + + err := kindItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: kindItem.subschema}, + kindItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + sliceItem := NewSliceItem(key, s.Schema.GetPath()) + s.Schema.Accept(sliceItem) + + err := sliceItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: sliceItem.subschema}, + sliceItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) Name() string { + schema := s.Schema + return schema.GetName() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go new file mode 100644 index 000000000..ddf998172 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -0,0 +1,2174 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/mergepatch" +) + +// An alternate implementation of JSON Merge Patch +// (https://tools.ietf.org/html/rfc7386) which supports the ability to annotate +// certain fields with metadata that indicates whether the elements of JSON +// lists should be merged or replaced. +// +// For more information, see the PATCH section of docs/devel/api-conventions.md. +// +// Some of the content of this package was borrowed with minor adaptations from +// evanphx/json-patch and openshift/origin. + +const ( + directiveMarker = "$patch" + deleteDirective = "delete" + replaceDirective = "replace" + mergeDirective = "merge" + + retainKeysStrategy = "retainKeys" + + deleteFromPrimitiveListDirectivePrefix = "$deleteFromPrimitiveList" + retainKeysDirective = "$" + retainKeysStrategy + setElementOrderDirectivePrefix = "$setElementOrder" +) + +// JSONMap is a representations of JSON object encoded as map[string]interface{} +// where the children can be either map[string]interface{}, []interface{} or +// primitive type). +// Operating on JSONMap representation is much faster as it doesn't require any +// json marshaling and/or unmarshaling operations. +type JSONMap map[string]interface{} + +type DiffOptions struct { + // SetElementOrder determines whether we generate the $setElementOrder parallel list. + SetElementOrder bool + // IgnoreChangesAndAdditions indicates if we keep the changes and additions in the patch. + IgnoreChangesAndAdditions bool + // IgnoreDeletions indicates if we keep the deletions in the patch. + IgnoreDeletions bool + // We introduce a new value retainKeys for patchStrategy. + // It indicates that all fields needing to be preserved must be + // present in the `retainKeys` list. + // And the fields that are present will be merged with live object. + // All the missing fields will be cleared when patching. + BuildRetainKeysDirective bool +} + +type MergeOptions struct { + // MergeParallelList indicates if we are merging the parallel list. + // We don't merge parallel list when calling mergeMap() in CreateThreeWayMergePatch() + // which is called client-side. + // We merge parallel list iff when calling mergeMap() in StrategicMergeMapPatch() + // which is called server-side + MergeParallelList bool + // IgnoreUnmatchedNulls indicates if we should process the unmatched nulls. + IgnoreUnmatchedNulls bool +} + +// The following code is adapted from github.com/openshift/origin/pkg/util/jsonmerge. +// Instead of defining a Delta that holds an original, a patch and a set of preconditions, +// the reconcile method accepts a set of preconditions as an argument. + +// CreateTwoWayMergePatch creates a patch that can be passed to StrategicMergePatch from an original +// document and a modified document, which are passed to the method as json encoded content. It will +// return a patch that yields the modified document when applied to the original document, or an error +// if either of the two documents is invalid. +func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return CreateTwoWayMergePatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergePatchUsingLookupPatchMeta( + original, modified []byte, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + patchMap, err := CreateTwoWayMergeMapPatchUsingLookupPatchMeta(originalMap, modifiedMap, schema, fns...) + if err != nil { + return nil, err + } + + return json.Marshal(patchMap) +} + +// CreateTwoWayMergeMapPatch creates a patch from an original and modified JSON objects, +// encoded JSONMap. +// The serialized version of the map can then be passed to StrategicMergeMapPatch. +func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified JSONMap, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { + diffOptions := DiffOptions{ + SetElementOrder: true, + } + patchMap, err := diffMaps(original, modified, schema, diffOptions) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + } + + return patchMap, nil +} + +// Returns a (recursive) strategic merge patch that yields modified when applied to original. +// Including: +// - Adding fields to the patch present in modified, missing from original +// - Setting fields to the patch present in modified and original with different values +// - Delete fields present in original, missing from modified through +// - IFF map field - set to nil in patch +// - IFF list of maps && merge strategy - use deleteDirective for the elements +// - IFF list of primitives && merge strategy - use parallel deletion list +// - IFF list of maps or primitives with replace strategy (default) - set patch value to the value in modified +// - Build $retainKeys directive for fields with retainKeys patch strategy +func diffMaps(original, modified map[string]interface{}, schema LookupPatchMeta, diffOptions DiffOptions) (map[string]interface{}, error) { + patch := map[string]interface{}{} + + // This will be used to build the $retainKeys directive sent in the patch + retainKeysList := make([]interface{}, 0, len(modified)) + + // Compare each value in the modified map against the value in the original map + for key, modifiedValue := range modified { + // Get the underlying type for pointers + if diffOptions.BuildRetainKeysDirective && modifiedValue != nil { + retainKeysList = append(retainKeysList, key) + } + + originalValue, ok := original[key] + if !ok { + // Key was added, so add to patch + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + continue + } + + // The patch may have a patch directive + // TODO: figure out if we need this. This shouldn't be needed by apply. When would the original map have patch directives in it? + foundDirectiveMarker, err := handleDirectiveMarker(key, originalValue, modifiedValue, patch) + if err != nil { + return nil, err + } + if foundDirectiveMarker { + continue + } + + if reflect.TypeOf(originalValue) != reflect.TypeOf(modifiedValue) { + // Types have changed, so add to patch + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + continue + } + + // Types are the same, so compare values + switch originalValueTyped := originalValue.(type) { + case map[string]interface{}: + modifiedValueTyped := modifiedValue.(map[string]interface{}) + err = handleMapDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) + case []interface{}: + modifiedValueTyped := modifiedValue.([]interface{}) + err = handleSliceDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) + default: + replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) + } + if err != nil { + return nil, err + } + } + + updatePatchIfMissing(original, modified, patch, diffOptions) + // Insert the retainKeysList iff there are values present in the retainKeysList and + // either of the following is true: + // - the patch is not empty + // - there are additional field in original that need to be cleared + if len(retainKeysList) > 0 && + (len(patch) > 0 || hasAdditionalNewField(original, modified)) { + patch[retainKeysDirective] = sortScalars(retainKeysList) + } + return patch, nil +} + +// handleDirectiveMarker handles how to diff directive marker between 2 objects +func handleDirectiveMarker(key string, originalValue, modifiedValue interface{}, patch map[string]interface{}) (bool, error) { + if key == directiveMarker { + originalString, ok := originalValue.(string) + if !ok { + return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + modifiedString, ok := modifiedValue.(string) + if !ok { + return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + if modifiedString != originalString { + patch[directiveMarker] = modifiedValue + } + return true, nil + } + return false, nil +} + +// handleMapDiff diff between 2 maps `originalValueTyped` and `modifiedValue`, +// puts the diff in the `patch` associated with `key` +// key is the key associated with originalValue and modifiedValue. +// originalValue, modifiedValue are the old and new value respectively.They are both maps +// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue +// diffOptions contains multiple options to control how we do the diff. +func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]interface{}, + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForStruct(key) + + if err != nil { + // We couldn't look up metadata for the field + // If the values are identical, this doesn't matter, no patch is needed + if reflect.DeepEqual(originalValue, modifiedValue) { + return nil + } + // Otherwise, return the error + return err + } + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + diffOptions.BuildRetainKeysDirective = retainKeys + switch patchStrategy { + // The patch strategic from metadata tells us to replace the entire object instead of diffing it + case replaceDirective: + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + default: + patchValue, err := diffMaps(originalValue, modifiedValue, subschema, diffOptions) + if err != nil { + return err + } + // Maps were not identical, use provided patch value + if len(patchValue) > 0 { + patch[key] = patchValue + } + } + return nil +} + +// handleSliceDiff diff between 2 slices `originalValueTyped` and `modifiedValue`, +// puts the diff in the `patch` associated with `key` +// key is the key associated with originalValue and modifiedValue. +// originalValue, modifiedValue are the old and new value respectively.They are both slices +// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue +// diffOptions contains multiple options to control how we do the diff. +func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, patch map[string]interface{}, + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(key) + if err != nil { + // We couldn't look up metadata for the field + // If the values are identical, this doesn't matter, no patch is needed + if reflect.DeepEqual(originalValue, modifiedValue) { + return nil + } + // Otherwise, return the error + return err + } + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + switch patchStrategy { + // Merge the 2 slices using mergePatchKey + case mergeDirective: + diffOptions.BuildRetainKeysDirective = retainKeys + addList, deletionList, setOrderList, err := diffLists(originalValue, modifiedValue, subschema, patchMeta.GetPatchMergeKey(), diffOptions) + if err != nil { + return err + } + if len(addList) > 0 { + patch[key] = addList + } + // generate a parallel list for deletion + if len(deletionList) > 0 { + parallelDeletionListKey := fmt.Sprintf("%s/%s", deleteFromPrimitiveListDirectivePrefix, key) + patch[parallelDeletionListKey] = deletionList + } + if len(setOrderList) > 0 { + parallelSetOrderListKey := fmt.Sprintf("%s/%s", setElementOrderDirectivePrefix, key) + patch[parallelSetOrderListKey] = setOrderList + } + default: + replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) + } + return nil +} + +// replacePatchFieldIfNotEqual updates the patch if original and modified are not deep equal +// if diffOptions.IgnoreChangesAndAdditions is false. +// original is the old value, maybe either the live cluster object or the last applied configuration +// modified is the new value, is always the users new config +func replacePatchFieldIfNotEqual(key string, original, modified interface{}, + patch map[string]interface{}, diffOptions DiffOptions) { + if diffOptions.IgnoreChangesAndAdditions { + // Ignoring changes - do nothing + return + } + if reflect.DeepEqual(original, modified) { + // Contents are identical - do nothing + return + } + // Create a patch to replace the old value with the new one + patch[key] = modified +} + +// updatePatchIfMissing iterates over `original` when ignoreDeletions is false. +// Clear the field whose key is not present in `modified`. +// original is the old value, maybe either the live cluster object or the last applied configuration +// modified is the new value, is always the users new config +func updatePatchIfMissing(original, modified, patch map[string]interface{}, diffOptions DiffOptions) { + if diffOptions.IgnoreDeletions { + // Ignoring deletion - do nothing + return + } + // Add nils for deleted values + for key := range original { + if _, found := modified[key]; !found { + patch[key] = nil + } + } +} + +// validateMergeKeyInLists checks if each map in the list has the mentryerge key. +func validateMergeKeyInLists(mergeKey string, lists ...[]interface{}) error { + for _, list := range lists { + for _, item := range list { + m, ok := item.(map[string]interface{}) + if !ok { + return mergepatch.ErrBadArgType(m, item) + } + if _, ok = m[mergeKey]; !ok { + return mergepatch.ErrNoMergeKey(m, mergeKey) + } + } + } + return nil +} + +// normalizeElementOrder sort `patch` list by `patchOrder` and sort `serverOnly` list by `serverOrder`. +// Then it merges the 2 sorted lists. +// It guarantee the relative order in the patch list and in the serverOnly list is kept. +// `patch` is a list of items in the patch, and `serverOnly` is a list of items in the live object. +// `patchOrder` is the order we want `patch` list to have and +// `serverOrder` is the order we want `serverOnly` list to have. +// kind is the kind of each item in the lists `patch` and `serverOnly`. +func normalizeElementOrder(patch, serverOnly, patchOrder, serverOrder []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { + patch, err := normalizeSliceOrder(patch, patchOrder, mergeKey, kind) + if err != nil { + return nil, err + } + serverOnly, err = normalizeSliceOrder(serverOnly, serverOrder, mergeKey, kind) + if err != nil { + return nil, err + } + all := mergeSortedSlice(serverOnly, patch, serverOrder, mergeKey, kind) + + return all, nil +} + +// mergeSortedSlice merges the 2 sorted lists by serverOrder with best effort. +// It will insert each item in `left` list to `right` list. In most cases, the 2 lists will be interleaved. +// The relative order of left and right are guaranteed to be kept. +// They have higher precedence than the order in the live list. +// The place for a item in `left` is found by: +// scan from the place of last insertion in `right` to the end of `right`, +// the place is before the first item that is greater than the item we want to insert. +// example usage: using server-only items as left and patch items as right. We insert server-only items +// to patch list. We use the order of live object as record for comparison. +func mergeSortedSlice(left, right, serverOrder []interface{}, mergeKey string, kind reflect.Kind) []interface{} { + // Returns if l is less than r, and if both have been found. + // If l and r both present and l is in front of r, l is less than r. + less := func(l, r interface{}) (bool, bool) { + li := index(serverOrder, l, mergeKey, kind) + ri := index(serverOrder, r, mergeKey, kind) + if li >= 0 && ri >= 0 { + return li < ri, true + } else { + return false, false + } + } + + // left and right should be non-overlapping. + size := len(left) + len(right) + i, j := 0, 0 + s := make([]interface{}, size, size) + + for k := 0; k < size; k++ { + if i >= len(left) && j < len(right) { + // have items left in `right` list + s[k] = right[j] + j++ + } else if j >= len(right) && i < len(left) { + // have items left in `left` list + s[k] = left[i] + i++ + } else { + // compare them if i and j are both in bound + less, foundBoth := less(left[i], right[j]) + if foundBoth && less { + s[k] = left[i] + i++ + } else { + s[k] = right[j] + j++ + } + } + } + return s +} + +// index returns the index of the item in the given items, or -1 if it doesn't exist +// l must NOT be a slice of slices, this should be checked before calling. +func index(l []interface{}, valToLookUp interface{}, mergeKey string, kind reflect.Kind) int { + var getValFn func(interface{}) interface{} + // Get the correct `getValFn` based on item `kind`. + // It should return the value of merge key for maps and + // return the item for other kinds. + switch kind { + case reflect.Map: + getValFn = func(item interface{}) interface{} { + typedItem, ok := item.(map[string]interface{}) + if !ok { + return nil + } + val := typedItem[mergeKey] + return val + } + default: + getValFn = func(item interface{}) interface{} { + return item + } + } + + for i, v := range l { + if getValFn(valToLookUp) == getValFn(v) { + return i + } + } + return -1 +} + +// extractToDeleteItems takes a list and +// returns 2 lists: one contains items that should be kept and the other contains items to be deleted. +func extractToDeleteItems(l []interface{}) ([]interface{}, []interface{}, error) { + var nonDelete, toDelete []interface{} + for _, v := range l { + m, ok := v.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(m, v) + } + + directive, foundDirective := m[directiveMarker] + if foundDirective && directive == deleteDirective { + toDelete = append(toDelete, v) + } else { + nonDelete = append(nonDelete, v) + } + } + return nonDelete, toDelete, nil +} + +// normalizeSliceOrder sort `toSort` list by `order` +func normalizeSliceOrder(toSort, order []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { + var toDelete []interface{} + if kind == reflect.Map { + // make sure each item in toSort, order has merge key + err := validateMergeKeyInLists(mergeKey, toSort, order) + if err != nil { + return nil, err + } + toSort, toDelete, err = extractToDeleteItems(toSort) + if err != nil { + return nil, err + } + } + + sort.SliceStable(toSort, func(i, j int) bool { + if ii := index(order, toSort[i], mergeKey, kind); ii >= 0 { + if ij := index(order, toSort[j], mergeKey, kind); ij >= 0 { + return ii < ij + } + } + return true + }) + toSort = append(toSort, toDelete...) + return toSort, nil +} + +// Returns a (recursive) strategic merge patch, a parallel deletion list if necessary and +// another list to set the order of the list +// Only list of primitives with merge strategy will generate a parallel deletion list. +// These two lists should yield modified when applied to original, for lists with merge semantics. +func diffLists(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, []interface{}, error) { + if len(original) == 0 { + // Both slices are empty - do nothing + if len(modified) == 0 || diffOptions.IgnoreChangesAndAdditions { + return nil, nil, nil, nil + } + + // Old slice was empty - add all elements from the new slice + return modified, nil, nil, nil + } + + elementType, err := sliceElementType(original, modified) + if err != nil { + return nil, nil, nil, err + } + + var patchList, deleteList, setOrderList []interface{} + kind := elementType.Kind() + switch kind { + case reflect.Map: + patchList, deleteList, err = diffListsOfMaps(original, modified, schema, mergeKey, diffOptions) + if err != nil { + return nil, nil, nil, err + } + patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) + if err != nil { + return nil, nil, nil, err + } + orderSame, err := isOrderSame(original, modified, mergeKey) + if err != nil { + return nil, nil, nil, err + } + // append the deletions to the end of the patch list. + patchList = append(patchList, deleteList...) + deleteList = nil + // generate the setElementOrder list when there are content changes or order changes + if diffOptions.SetElementOrder && + ((!diffOptions.IgnoreChangesAndAdditions && (len(patchList) > 0 || !orderSame)) || + (!diffOptions.IgnoreDeletions && len(patchList) > 0)) { + // Generate a list of maps that each item contains only the merge key. + setOrderList = make([]interface{}, len(modified)) + for i, v := range modified { + typedV := v.(map[string]interface{}) + setOrderList[i] = map[string]interface{}{ + mergeKey: typedV[mergeKey], + } + } + } + case reflect.Slice: + // Lists of Lists are not permitted by the api + return nil, nil, nil, mergepatch.ErrNoListOfLists + default: + patchList, deleteList, err = diffListsOfScalars(original, modified, diffOptions) + if err != nil { + return nil, nil, nil, err + } + patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) + // generate the setElementOrder list when there are content changes or order changes + if diffOptions.SetElementOrder && ((!diffOptions.IgnoreDeletions && len(deleteList) > 0) || + (!diffOptions.IgnoreChangesAndAdditions && !reflect.DeepEqual(original, modified))) { + setOrderList = modified + } + } + return patchList, deleteList, setOrderList, err +} + +// isOrderSame checks if the order in a list has changed +func isOrderSame(original, modified []interface{}, mergeKey string) (bool, error) { + if len(original) != len(modified) { + return false, nil + } + for i, modifiedItem := range modified { + equal, err := mergeKeyValueEqual(original[i], modifiedItem, mergeKey) + if err != nil || !equal { + return equal, err + } + } + return true, nil +} + +// diffListsOfScalars returns 2 lists, the first one is addList and the second one is deletionList. +// Argument diffOptions.IgnoreChangesAndAdditions controls if calculate addList. true means not calculate. +// Argument diffOptions.IgnoreDeletions controls if calculate deletionList. true means not calculate. +// original may be changed, but modified is guaranteed to not be changed +func diffListsOfScalars(original, modified []interface{}, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { + modifiedCopy := make([]interface{}, len(modified)) + copy(modifiedCopy, modified) + // Sort the scalars for easier calculating the diff + originalScalars := sortScalars(original) + modifiedScalars := sortScalars(modifiedCopy) + + originalIndex, modifiedIndex := 0, 0 + addList := []interface{}{} + deletionList := []interface{}{} + + for { + originalInBounds := originalIndex < len(originalScalars) + modifiedInBounds := modifiedIndex < len(modifiedScalars) + if !originalInBounds && !modifiedInBounds { + break + } + // we need to compare the string representation of the scalar, + // because the scalar is an interface which doesn't support either < or > + // And that's how func sortScalars compare scalars. + var originalString, modifiedString string + var originalValue, modifiedValue interface{} + if originalInBounds { + originalValue = originalScalars[originalIndex] + originalString = fmt.Sprintf("%v", originalValue) + } + if modifiedInBounds { + modifiedValue = modifiedScalars[modifiedIndex] + modifiedString = fmt.Sprintf("%v", modifiedValue) + } + + originalV, modifiedV := compareListValuesAtIndex(originalInBounds, modifiedInBounds, originalString, modifiedString) + switch { + case originalV == nil && modifiedV == nil: + originalIndex++ + modifiedIndex++ + case originalV != nil && modifiedV == nil: + if !diffOptions.IgnoreDeletions { + deletionList = append(deletionList, originalValue) + } + originalIndex++ + case originalV == nil && modifiedV != nil: + if !diffOptions.IgnoreChangesAndAdditions { + addList = append(addList, modifiedValue) + } + modifiedIndex++ + default: + return nil, nil, fmt.Errorf("Unexpected returned value from compareListValuesAtIndex: %v and %v", originalV, modifiedV) + } + } + + return addList, deduplicateScalars(deletionList), nil +} + +// If first return value is non-nil, list1 contains an element not present in list2 +// If second return value is non-nil, list2 contains an element not present in list1 +func compareListValuesAtIndex(list1Inbounds, list2Inbounds bool, list1Value, list2Value string) (interface{}, interface{}) { + bothInBounds := list1Inbounds && list2Inbounds + switch { + // scalars are identical + case bothInBounds && list1Value == list2Value: + return nil, nil + // only list2 is in bound + case !list1Inbounds: + fallthrough + // list2 has additional scalar + case bothInBounds && list1Value > list2Value: + return nil, list2Value + // only original is in bound + case !list2Inbounds: + fallthrough + // original has additional scalar + case bothInBounds && list1Value < list2Value: + return list1Value, nil + default: + return nil, nil + } +} + +// diffListsOfMaps takes a pair of lists and +// returns a (recursive) strategic merge patch list contains additions and changes and +// a deletion list contains deletions +func diffListsOfMaps(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { + patch := make([]interface{}, 0, len(modified)) + deletionList := make([]interface{}, 0, len(original)) + + originalSorted, err := sortMergeListsByNameArray(original, schema, mergeKey, false) + if err != nil { + return nil, nil, err + } + modifiedSorted, err := sortMergeListsByNameArray(modified, schema, mergeKey, false) + if err != nil { + return nil, nil, err + } + + originalIndex, modifiedIndex := 0, 0 + for { + originalInBounds := originalIndex < len(originalSorted) + modifiedInBounds := modifiedIndex < len(modifiedSorted) + bothInBounds := originalInBounds && modifiedInBounds + if !originalInBounds && !modifiedInBounds { + break + } + + var originalElementMergeKeyValueString, modifiedElementMergeKeyValueString string + var originalElementMergeKeyValue, modifiedElementMergeKeyValue interface{} + var originalElement, modifiedElement map[string]interface{} + if originalInBounds { + originalElement, originalElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(originalIndex, mergeKey, originalSorted) + if err != nil { + return nil, nil, err + } + originalElementMergeKeyValueString = fmt.Sprintf("%v", originalElementMergeKeyValue) + } + if modifiedInBounds { + modifiedElement, modifiedElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(modifiedIndex, mergeKey, modifiedSorted) + if err != nil { + return nil, nil, err + } + modifiedElementMergeKeyValueString = fmt.Sprintf("%v", modifiedElementMergeKeyValue) + } + + switch { + case bothInBounds && ItemMatchesOriginalAndModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + // Merge key values are equal, so recurse + patchValue, err := diffMaps(originalElement, modifiedElement, schema, diffOptions) + if err != nil { + return nil, nil, err + } + if len(patchValue) > 0 { + patchValue[mergeKey] = modifiedElementMergeKeyValue + patch = append(patch, patchValue) + } + originalIndex++ + modifiedIndex++ + // only modified is in bound + case !originalInBounds: + fallthrough + // modified has additional map + case bothInBounds && ItemAddedToModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + if !diffOptions.IgnoreChangesAndAdditions { + patch = append(patch, modifiedElement) + } + modifiedIndex++ + // only original is in bound + case !modifiedInBounds: + fallthrough + // original has additional map + case bothInBounds && ItemRemovedFromModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + if !diffOptions.IgnoreDeletions { + // Item was deleted, so add delete directive + deletionList = append(deletionList, CreateDeleteDirective(mergeKey, originalElementMergeKeyValue)) + } + originalIndex++ + } + } + + return patch, deletionList, nil +} + +// getMapAndMergeKeyValueByIndex return a map in the list and its merge key value given the index of the map. +func getMapAndMergeKeyValueByIndex(index int, mergeKey string, listOfMaps []interface{}) (map[string]interface{}, interface{}, error) { + m, ok := listOfMaps[index].(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(m, listOfMaps[index]) + } + + val, ok := m[mergeKey] + if !ok { + return nil, nil, mergepatch.ErrNoMergeKey(m, mergeKey) + } + return m, val, nil +} + +// StrategicMergePatch applies a strategic merge patch. The patch and the original document +// must be json encoded content. A patch can be created from an original and a modified document +// by calling CreateStrategicMergePatch. +func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return StrategicMergePatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergePatchUsingLookupPatchMeta(original, patch []byte, schema LookupPatchMeta) ([]byte, error) { + originalMap, err := handleUnmarshal(original) + if err != nil { + return nil, err + } + patchMap, err := handleUnmarshal(patch) + if err != nil { + return nil, err + } + + result, err := StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, schema) + if err != nil { + return nil, err + } + + return json.Marshal(result) +} + +func handleUnmarshal(j []byte) (map[string]interface{}, error) { + if j == nil { + j = []byte("{}") + } + + m := map[string]interface{}{} + err := json.Unmarshal(j, &m) + if err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + return m, nil +} + +// StrategicMergeMapPatch applies a strategic merge patch. The original and patch documents +// must be JSONMap. A patch can be created from an original and modified document by +// calling CreateTwoWayMergeMapPatch. +// Warning: the original and patch JSONMap objects are mutated by this function and should not be reused. +func StrategicMergeMapPatch(original, patch JSONMap, dataStruct interface{}) (JSONMap, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + // We need the go struct tags `patchMergeKey` and `patchStrategy` for fields that support a strategic merge patch. + // For native resources, we can easily figure out these tags since we know the fields. + + // Because custom resources are decoded as Unstructured and because we're missing the metadata about how to handle + // each field in a strategic merge patch, we can't find the go struct tags. Hence, we can't easily do a strategic merge + // for custom resources. So we should fail fast and return an error. + if _, ok := dataStruct.(*unstructured.Unstructured); ok { + return nil, mergepatch.ErrUnsupportedStrategicMergePatchFormat + } + + return StrategicMergeMapPatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergeMapPatchUsingLookupPatchMeta(original, patch JSONMap, schema LookupPatchMeta) (JSONMap, error) { + mergeOptions := MergeOptions{ + MergeParallelList: true, + IgnoreUnmatchedNulls: true, + } + return mergeMap(original, patch, schema, mergeOptions) +} + +// MergeStrategicMergeMapPatchUsingLookupPatchMeta merges strategic merge +// patches retaining `null` fields and parallel lists. If 2 patches change the +// same fields and the latter one will override the former one. If you don't +// want that happen, you need to run func MergingMapsHaveConflicts before +// merging these patches. Applying the resulting merged merge patch to a JSONMap +// yields the same as merging each strategic merge patch to the JSONMap in +// succession. +func MergeStrategicMergeMapPatchUsingLookupPatchMeta(schema LookupPatchMeta, patches ...JSONMap) (JSONMap, error) { + mergeOptions := MergeOptions{ + MergeParallelList: false, + IgnoreUnmatchedNulls: false, + } + merged := JSONMap{} + var err error + for _, patch := range patches { + merged, err = mergeMap(merged, patch, schema, mergeOptions) + if err != nil { + return nil, err + } + } + return merged, nil +} + +// handleDirectiveInMergeMap handles the patch directive when merging 2 maps. +func handleDirectiveInMergeMap(directive interface{}, patch map[string]interface{}) (map[string]interface{}, error) { + if directive == replaceDirective { + // If the patch contains "$patch: replace", don't merge it, just use the + // patch directly. Later on, we can add a single level replace that only + // affects the map that the $patch is in. + delete(patch, directiveMarker) + return patch, nil + } + + if directive == deleteDirective { + // If the patch contains "$patch: delete", don't merge it, just return + // an empty map. + return map[string]interface{}{}, nil + } + + return nil, mergepatch.ErrBadPatchType(directive, patch) +} + +func containsDirectiveMarker(item interface{}) bool { + m, ok := item.(map[string]interface{}) + if ok { + if _, foundDirectiveMarker := m[directiveMarker]; foundDirectiveMarker { + return true + } + } + return false +} + +func mergeKeyValueEqual(left, right interface{}, mergeKey string) (bool, error) { + if len(mergeKey) == 0 { + return left == right, nil + } + typedLeft, ok := left.(map[string]interface{}) + if !ok { + return false, mergepatch.ErrBadArgType(typedLeft, left) + } + typedRight, ok := right.(map[string]interface{}) + if !ok { + return false, mergepatch.ErrBadArgType(typedRight, right) + } + mergeKeyLeft, ok := typedLeft[mergeKey] + if !ok { + return false, mergepatch.ErrNoMergeKey(typedLeft, mergeKey) + } + mergeKeyRight, ok := typedRight[mergeKey] + if !ok { + return false, mergepatch.ErrNoMergeKey(typedRight, mergeKey) + } + return mergeKeyLeft == mergeKeyRight, nil +} + +// extractKey trims the prefix and return the original key +func extractKey(s, prefix string) (string, error) { + substrings := strings.SplitN(s, "/", 2) + if len(substrings) <= 1 || substrings[0] != prefix { + switch prefix { + case deleteFromPrimitiveListDirectivePrefix: + return "", mergepatch.ErrBadPatchFormatForPrimitiveList + case setElementOrderDirectivePrefix: + return "", mergepatch.ErrBadPatchFormatForSetElementOrderList + default: + return "", fmt.Errorf("fail to find unknown prefix %q in %s\n", prefix, s) + } + } + return substrings[1], nil +} + +// validatePatchUsingSetOrderList verifies: +// the relative order of any two items in the setOrderList list matches that in the patch list. +// the items in the patch list must be a subset or the same as the $setElementOrder list (deletions are ignored). +func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey string) error { + typedSetOrderList, ok := setOrderList.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + typedPatchList, ok := patchList.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + if len(typedSetOrderList) == 0 || len(typedPatchList) == 0 { + return nil + } + + var nonDeleteList, toDeleteList []interface{} + var err error + if len(mergeKey) > 0 { + nonDeleteList, toDeleteList, err = extractToDeleteItems(typedPatchList) + if err != nil { + return err + } + } else { + nonDeleteList = typedPatchList + } + + patchIndex, setOrderIndex := 0, 0 + for patchIndex < len(nonDeleteList) && setOrderIndex < len(typedSetOrderList) { + if containsDirectiveMarker(nonDeleteList[patchIndex]) { + patchIndex++ + continue + } + mergeKeyEqual, err := mergeKeyValueEqual(nonDeleteList[patchIndex], typedSetOrderList[setOrderIndex], mergeKey) + if err != nil { + return err + } + if mergeKeyEqual { + patchIndex++ + } + setOrderIndex++ + } + // If patchIndex is inbound but setOrderIndex if out of bound mean there are items mismatching between the patch list and setElementOrder list. + // the second check is is a sanity check, and should always be true if the first is true. + if patchIndex < len(nonDeleteList) && setOrderIndex >= len(typedSetOrderList) { + return fmt.Errorf("The order in patch list:\n%v\n doesn't match %s list:\n%v\n", typedPatchList, setElementOrderDirectivePrefix, setOrderList) + } + typedPatchList = append(nonDeleteList, toDeleteList...) + return nil +} + +// preprocessDeletionListForMerging preprocesses the deletion list. +// it returns shouldContinue, isDeletionList, noPrefixKey +func preprocessDeletionListForMerging(key string, original map[string]interface{}, + patchVal interface{}, mergeDeletionList bool) (bool, bool, string, error) { + // If found a parallel list for deletion and we are going to merge the list, + // overwrite the key to the original key and set flag isDeleteList + foundParallelListPrefix := strings.HasPrefix(key, deleteFromPrimitiveListDirectivePrefix) + if foundParallelListPrefix { + if !mergeDeletionList { + original[key] = patchVal + return true, false, "", nil + } + originalKey, err := extractKey(key, deleteFromPrimitiveListDirectivePrefix) + return false, true, originalKey, err + } + return false, false, "", nil +} + +// applyRetainKeysDirective looks for a retainKeys directive and applies to original +// - if no directive exists do nothing +// - if directive is found, clear keys in original missing from the directive list +// - validate that all keys present in the patch are present in the retainKeys directive +// note: original may be another patch request, e.g. applying the add+modified patch to the deletions patch. In this case it may have directives +func applyRetainKeysDirective(original, patch map[string]interface{}, options MergeOptions) error { + retainKeysInPatch, foundInPatch := patch[retainKeysDirective] + if !foundInPatch { + return nil + } + // cleanup the directive + delete(patch, retainKeysDirective) + + if !options.MergeParallelList { + // If original is actually a patch, make sure the retainKeys directives are the same in both patches if present in both. + // If not present in the original patch, copy from the modified patch. + retainKeysInOriginal, foundInOriginal := original[retainKeysDirective] + if foundInOriginal { + if !reflect.DeepEqual(retainKeysInOriginal, retainKeysInPatch) { + // This error actually should never happen. + return fmt.Errorf("%v and %v are not deep equal: this may happen when calculating the 3-way diff patch", retainKeysInOriginal, retainKeysInPatch) + } + } else { + original[retainKeysDirective] = retainKeysInPatch + } + return nil + } + + retainKeysList, ok := retainKeysInPatch.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForRetainKeys + } + + // validate patch to make sure all fields in the patch are present in the retainKeysList. + // The map is used only as a set, the value is never referenced + m := map[interface{}]struct{}{} + for _, v := range retainKeysList { + m[v] = struct{}{} + } + for k, v := range patch { + if v == nil || strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) || + strings.HasPrefix(k, setElementOrderDirectivePrefix) { + continue + } + // If there is an item present in the patch but not in the retainKeys list, + // the patch is invalid. + if _, found := m[k]; !found { + return mergepatch.ErrBadPatchFormatForRetainKeys + } + } + + // clear not present fields + for k := range original { + if _, found := m[k]; !found { + delete(original, k) + } + } + return nil +} + +// mergePatchIntoOriginal processes $setElementOrder list. +// When not merging the directive, it will make sure $setElementOrder list exist only in original. +// When merging the directive, it will try to find the $setElementOrder list and +// its corresponding patch list, validate it and merge it. +// Then, sort them by the relative order in setElementOrder, patch list and live list. +// The precedence is $setElementOrder > order in patch list > order in live list. +// This function will delete the item after merging it to prevent process it again in the future. +// Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md +func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { + for key, patchV := range patch { + // Do nothing if there is no ordering directive + if !strings.HasPrefix(key, setElementOrderDirectivePrefix) { + continue + } + + setElementOrderInPatch := patchV + // Copies directive from the second patch (`patch`) to the first patch (`original`) + // and checks they are equal and delete the directive in the second patch + if !mergeOptions.MergeParallelList { + setElementOrderListInOriginal, ok := original[key] + if ok { + // check if the setElementOrder list in original and the one in patch matches + if !reflect.DeepEqual(setElementOrderListInOriginal, setElementOrderInPatch) { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + } else { + // move the setElementOrder list from patch to original + original[key] = setElementOrderInPatch + } + } + delete(patch, key) + + var ( + ok bool + originalFieldValue, patchFieldValue, merged []interface{} + patchStrategy string + patchMeta PatchMeta + subschema LookupPatchMeta + ) + typedSetElementOrderList, ok := setElementOrderInPatch.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(typedSetElementOrderList, setElementOrderInPatch) + } + // Trim the setElementOrderDirectivePrefix to get the key of the list field in original. + originalKey, err := extractKey(key, setElementOrderDirectivePrefix) + if err != nil { + return err + } + // try to find the list with `originalKey` in `original` and `modified` and merge them. + originalList, foundOriginal := original[originalKey] + patchList, foundPatch := patch[originalKey] + if foundOriginal { + originalFieldValue, ok = originalList.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(originalFieldValue, originalList) + } + } + if foundPatch { + patchFieldValue, ok = patchList.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(patchFieldValue, patchList) + } + } + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(originalKey) + if err != nil { + return err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + // Check for consistency between the element order list and the field it applies to + err = validatePatchWithSetOrderList(patchFieldValue, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) + if err != nil { + return err + } + + switch { + case foundOriginal && !foundPatch: + // no change to list contents + merged = originalFieldValue + case !foundOriginal && foundPatch: + // list was added + merged = patchFieldValue + case foundOriginal && foundPatch: + merged, err = mergeSliceHandler(originalList, patchList, subschema, + patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions) + if err != nil { + return err + } + case !foundOriginal && !foundPatch: + continue + } + + // Split all items into patch items and server-only items and then enforce the order. + var patchItems, serverOnlyItems []interface{} + if len(patchMeta.GetPatchMergeKey()) == 0 { + // Primitives doesn't need merge key to do partitioning. + patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, typedSetElementOrderList) + + } else { + // Maps need merge key to do partitioning. + patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) + if err != nil { + return err + } + } + + elementType, err := sliceElementType(originalFieldValue, patchFieldValue) + if err != nil { + return err + } + kind := elementType.Kind() + // normalize merged list + // typedSetElementOrderList contains all the relative order in typedPatchList, + // so don't need to use typedPatchList + both, err := normalizeElementOrder(patchItems, serverOnlyItems, typedSetElementOrderList, originalFieldValue, patchMeta.GetPatchMergeKey(), kind) + if err != nil { + return err + } + original[originalKey] = both + // delete patch list from patch to prevent process again in the future + delete(patch, originalKey) + } + return nil +} + +// partitionPrimitivesByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. +func partitionPrimitivesByPresentInList(original, partitionBy []interface{}) ([]interface{}, []interface{}) { + patch := make([]interface{}, 0, len(original)) + serverOnly := make([]interface{}, 0, len(original)) + inPatch := map[interface{}]bool{} + for _, v := range partitionBy { + inPatch[v] = true + } + for _, v := range original { + if !inPatch[v] { + serverOnly = append(serverOnly, v) + } else { + patch = append(patch, v) + } + } + return patch, serverOnly +} + +// partitionMapsByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. +func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { + patch := make([]interface{}, 0, len(original)) + serverOnly := make([]interface{}, 0, len(original)) + for _, v := range original { + typedV, ok := v.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedV, v) + } + mergeKeyValue, foundMergeKey := typedV[mergeKey] + if !foundMergeKey { + return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + _, _, found, err := findMapInSliceBasedOnKeyValue(partitionBy, mergeKey, mergeKeyValue) + if err != nil { + return nil, nil, err + } + if !found { + serverOnly = append(serverOnly, v) + } else { + patch = append(patch, v) + } + } + return patch, serverOnly, nil +} + +// Merge fields from a patch map into the original map. Note: This may modify +// both the original map and the patch because getting a deep copy of a map in +// golang is highly non-trivial. +// flag mergeOptions.MergeParallelList controls if using the parallel list to delete or keeping the list. +// If patch contains any null field (e.g. field_1: null) that is not +// present in original, then to propagate it to the end result use +// mergeOptions.IgnoreUnmatchedNulls == false. +func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) (map[string]interface{}, error) { + if v, ok := patch[directiveMarker]; ok { + return handleDirectiveInMergeMap(v, patch) + } + + // nil is an accepted value for original to simplify logic in other places. + // If original is nil, replace it with an empty map and then apply the patch. + if original == nil { + original = map[string]interface{}{} + } + + err := applyRetainKeysDirective(original, patch, mergeOptions) + if err != nil { + return nil, err + } + + // Process $setElementOrder list and other lists sharing the same key. + // When not merging the directive, it will make sure $setElementOrder list exist only in original. + // When merging the directive, it will process $setElementOrder and its patch list together. + // This function will delete the merged elements from patch so they will not be reprocessed + err = mergePatchIntoOriginal(original, patch, schema, mergeOptions) + if err != nil { + return nil, err + } + + // Start merging the patch into the original. + for k, patchV := range patch { + skipProcessing, isDeleteList, noPrefixKey, err := preprocessDeletionListForMerging(k, original, patchV, mergeOptions.MergeParallelList) + if err != nil { + return nil, err + } + if skipProcessing { + continue + } + if len(noPrefixKey) > 0 { + k = noPrefixKey + } + + // If the value of this key is null, delete the key if it exists in the + // original. Otherwise, check if we want to preserve it or skip it. + // Preserving the null value is useful when we want to send an explicit + // delete to the API server. + if patchV == nil { + if _, ok := original[k]; ok { + delete(original, k) + } + if mergeOptions.IgnoreUnmatchedNulls { + continue + } + } + + _, ok := original[k] + if !ok { + // If it's not in the original document, just take the patch value. + original[k] = patchV + continue + } + + originalType := reflect.TypeOf(original[k]) + patchType := reflect.TypeOf(patchV) + if originalType != patchType { + original[k] = patchV + continue + } + // If they're both maps or lists, recurse into the value. + switch originalType.Kind() { + case reflect.Map: + subschema, patchMeta, err2 := schema.LookupPatchMetadataForStruct(k) + if err2 != nil { + return nil, err2 + } + _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err2 != nil { + return nil, err2 + } + original[k], err = mergeMapHandler(original[k], patchV, subschema, patchStrategy, mergeOptions) + case reflect.Slice: + subschema, patchMeta, err2 := schema.LookupPatchMetadataForSlice(k) + if err2 != nil { + return nil, err2 + } + _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err2 != nil { + return nil, err2 + } + original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) + default: + original[k] = patchV + } + if err != nil { + return nil, err + } + } + return original, nil +} + +// mergeMapHandler handles how to merge `patchV` whose key is `key` with `original` respecting +// fieldPatchStrategy and mergeOptions. +func mergeMapHandler(original, patch interface{}, schema LookupPatchMeta, + fieldPatchStrategy string, mergeOptions MergeOptions) (map[string]interface{}, error) { + typedOriginal, typedPatch, err := mapTypeAssertion(original, patch) + if err != nil { + return nil, err + } + + if fieldPatchStrategy != replaceDirective { + return mergeMap(typedOriginal, typedPatch, schema, mergeOptions) + } else { + return typedPatch, nil + } +} + +// mergeSliceHandler handles how to merge `patchV` whose key is `key` with `original` respecting +// fieldPatchStrategy, fieldPatchMergeKey, isDeleteList and mergeOptions. +func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, isDeleteList bool, mergeOptions MergeOptions) ([]interface{}, error) { + typedOriginal, typedPatch, err := sliceTypeAssertion(original, patch) + if err != nil { + return nil, err + } + + if fieldPatchStrategy == mergeDirective { + return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList) + } else { + return typedPatch, nil + } +} + +// Merge two slices together. Note: This may modify both the original slice and +// the patch because getting a deep copy of a slice in golang is highly +// non-trivial. +func mergeSlice(original, patch []interface{}, schema LookupPatchMeta, mergeKey string, mergeOptions MergeOptions, isDeleteList bool) ([]interface{}, error) { + if len(original) == 0 && len(patch) == 0 { + return original, nil + } + + // All the values must be of the same type, but not a list. + t, err := sliceElementType(original, patch) + if err != nil { + return nil, err + } + + var merged []interface{} + kind := t.Kind() + // If the elements are not maps, merge the slices of scalars. + if kind != reflect.Map { + if mergeOptions.MergeParallelList && isDeleteList { + return deleteFromSlice(original, patch), nil + } + // Maybe in the future add a "concat" mode that doesn't + // deduplicate. + both := append(original, patch...) + merged = deduplicateScalars(both) + + } else { + if mergeKey == "" { + return nil, fmt.Errorf("cannot merge lists without merge key for %s", schema.Name()) + } + + original, patch, err = mergeSliceWithSpecialElements(original, patch, mergeKey) + if err != nil { + return nil, err + } + + merged, err = mergeSliceWithoutSpecialElements(original, patch, mergeKey, schema, mergeOptions) + if err != nil { + return nil, err + } + } + + // enforce the order + var patchItems, serverOnlyItems []interface{} + if len(mergeKey) == 0 { + patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, patch) + } else { + patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, patch, mergeKey) + if err != nil { + return nil, err + } + } + return normalizeElementOrder(patchItems, serverOnlyItems, patch, original, mergeKey, kind) +} + +// mergeSliceWithSpecialElements handles special elements with directiveMarker +// before merging the slices. It returns a updated `original` and a patch without special elements. +// original and patch must be slices of maps, they should be checked before calling this function. +func mergeSliceWithSpecialElements(original, patch []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { + patchWithoutSpecialElements := []interface{}{} + replace := false + for _, v := range patch { + typedV := v.(map[string]interface{}) + patchType, ok := typedV[directiveMarker] + if !ok { + patchWithoutSpecialElements = append(patchWithoutSpecialElements, v) + } else { + switch patchType { + case deleteDirective: + mergeValue, ok := typedV[mergeKey] + if ok { + var err error + original, err = deleteMatchingEntries(original, mergeKey, mergeValue) + if err != nil { + return nil, nil, err + } + } else { + return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + case replaceDirective: + replace = true + // Continue iterating through the array to prune any other $patch elements. + case mergeDirective: + return nil, nil, fmt.Errorf("merging lists cannot yet be specified in the patch") + default: + return nil, nil, mergepatch.ErrBadPatchType(patchType, typedV) + } + } + } + if replace { + return patchWithoutSpecialElements, nil, nil + } + return original, patchWithoutSpecialElements, nil +} + +// delete all matching entries (based on merge key) from a merging list +func deleteMatchingEntries(original []interface{}, mergeKey string, mergeValue interface{}) ([]interface{}, error) { + for { + _, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if !found { + break + } + // Delete the element at originalKey. + original = append(original[:originalKey], original[originalKey+1:]...) + } + return original, nil +} + +// mergeSliceWithoutSpecialElements merges slices with non-special elements. +// original and patch must be slices of maps, they should be checked before calling this function. +func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey string, schema LookupPatchMeta, mergeOptions MergeOptions) ([]interface{}, error) { + for _, v := range patch { + typedV := v.(map[string]interface{}) + mergeValue, ok := typedV[mergeKey] + if !ok { + return nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + + // If we find a value with this merge key value in original, merge the + // maps. Otherwise append onto original. + originalMap, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if found { + var mergedMaps interface{} + var err error + // Merge into original. + mergedMaps, err = mergeMap(originalMap, typedV, schema, mergeOptions) + if err != nil { + return nil, err + } + + original[originalKey] = mergedMaps + } else { + original = append(original, v) + } + } + return original, nil +} + +// deleteFromSlice uses the parallel list to delete the items in a list of scalars +func deleteFromSlice(current, toDelete []interface{}) []interface{} { + toDeleteMap := map[interface{}]interface{}{} + processed := make([]interface{}, 0, len(current)) + for _, v := range toDelete { + toDeleteMap[v] = true + } + for _, v := range current { + if _, found := toDeleteMap[v]; !found { + processed = append(processed, v) + } + } + return processed +} + +// This method no longer panics if any element of the slice is not a map. +func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) { + for k, v := range m { + typedV, ok := v.(map[string]interface{}) + if !ok { + return nil, 0, false, fmt.Errorf("value for key %v is not a map", k) + } + + valueToMatch, ok := typedV[key] + if ok && valueToMatch == value { + return typedV, k, true, nil + } + } + + return nil, 0, false, nil +} + +// This function takes a JSON map and sorts all the lists that should be merged +// by key. This is needed by tests because in JSON, list order is significant, +// but in Strategic Merge Patch, merge lists do not have significant order. +// Sorting the lists allows for order-insensitive comparison of patched maps. +func sortMergeListsByName(mapJSON []byte, schema LookupPatchMeta) ([]byte, error) { + var m map[string]interface{} + err := json.Unmarshal(mapJSON, &m) + if err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + + newM, err := sortMergeListsByNameMap(m, schema) + if err != nil { + return nil, err + } + + return json.Marshal(newM) +} + +// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in a map. +func sortMergeListsByNameMap(s map[string]interface{}, schema LookupPatchMeta) (map[string]interface{}, error) { + newS := map[string]interface{}{} + for k, v := range s { + if k == retainKeysDirective { + typedV, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForRetainKeys + } + v = sortScalars(typedV) + } else if strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) { + typedV, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForPrimitiveList + } + v = sortScalars(typedV) + } else if strings.HasPrefix(k, setElementOrderDirectivePrefix) { + _, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForSetElementOrderList + } + } else if k != directiveMarker { + // recurse for map and slice. + switch typedV := v.(type) { + case map[string]interface{}: + subschema, _, err := schema.LookupPatchMetadataForStruct(k) + if err != nil { + return nil, err + } + v, err = sortMergeListsByNameMap(typedV, subschema) + if err != nil { + return nil, err + } + case []interface{}: + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(k) + if err != nil { + return nil, err + } + _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return nil, err + } + if patchStrategy == mergeDirective { + var err error + v, err = sortMergeListsByNameArray(typedV, subschema, patchMeta.GetPatchMergeKey(), true) + if err != nil { + return nil, err + } + } + } + } + + newS[k] = v + } + + return newS, nil +} + +// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in an array. +func sortMergeListsByNameArray(s []interface{}, schema LookupPatchMeta, mergeKey string, recurse bool) ([]interface{}, error) { + if len(s) == 0 { + return s, nil + } + + // We don't support lists of lists yet. + t, err := sliceElementType(s) + if err != nil { + return nil, err + } + + // If the elements are not maps... + if t.Kind() != reflect.Map { + // Sort the elements, because they may have been merged out of order. + return deduplicateAndSortScalars(s), nil + } + + // Elements are maps - if one of the keys of the map is a map or a + // list, we may need to recurse into it. + newS := []interface{}{} + for _, elem := range s { + if recurse { + typedElem := elem.(map[string]interface{}) + newElem, err := sortMergeListsByNameMap(typedElem, schema) + if err != nil { + return nil, err + } + + newS = append(newS, newElem) + } else { + newS = append(newS, elem) + } + } + + // Sort the maps. + newS = sortMapsBasedOnField(newS, mergeKey) + return newS, nil +} + +func sortMapsBasedOnField(m []interface{}, fieldName string) []interface{} { + mapM := mapSliceFromSlice(m) + ss := SortableSliceOfMaps{mapM, fieldName} + sort.Sort(ss) + newS := sliceFromMapSlice(ss.s) + return newS +} + +func mapSliceFromSlice(m []interface{}) []map[string]interface{} { + newM := []map[string]interface{}{} + for _, v := range m { + vt := v.(map[string]interface{}) + newM = append(newM, vt) + } + + return newM +} + +func sliceFromMapSlice(s []map[string]interface{}) []interface{} { + newS := []interface{}{} + for _, v := range s { + newS = append(newS, v) + } + + return newS +} + +type SortableSliceOfMaps struct { + s []map[string]interface{} + k string // key to sort on +} + +func (ss SortableSliceOfMaps) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfMaps) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i][ss.k]) + jStr := fmt.Sprintf("%v", ss.s[j][ss.k]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfMaps) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +func deduplicateAndSortScalars(s []interface{}) []interface{} { + s = deduplicateScalars(s) + return sortScalars(s) +} + +func sortScalars(s []interface{}) []interface{} { + ss := SortableSliceOfScalars{s} + sort.Sort(ss) + return ss.s +} + +func deduplicateScalars(s []interface{}) []interface{} { + // Clever algorithm to deduplicate. + length := len(s) - 1 + for i := 0; i < length; i++ { + for j := i + 1; j <= length; j++ { + if s[i] == s[j] { + s[j] = s[length] + s = s[0:length] + length-- + j-- + } + } + } + + return s +} + +type SortableSliceOfScalars struct { + s []interface{} +} + +func (ss SortableSliceOfScalars) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfScalars) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i]) + jStr := fmt.Sprintf("%v", ss.s[j]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfScalars) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +// Returns the type of the elements of N slice(s). If the type is different, +// another slice or undefined, returns an error. +func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { + var prevType reflect.Type + for _, s := range slices { + // Go through elements of all given slices and make sure they are all the same type. + for _, v := range s { + currentType := reflect.TypeOf(v) + if prevType == nil { + prevType = currentType + // We don't support lists of lists yet. + if prevType.Kind() == reflect.Slice { + return nil, mergepatch.ErrNoListOfLists + } + } else { + if prevType != currentType { + return nil, fmt.Errorf("list element types are not identical: %v", fmt.Sprint(slices)) + } + prevType = currentType + } + } + } + + if prevType == nil { + return nil, fmt.Errorf("no elements in any of the given slices") + } + + return prevType, nil +} + +// MergingMapsHaveConflicts returns true if the left and right JSON interface +// objects overlap with different values in any key. All keys are required to be +// strings. Since patches of the same Type have congruent keys, this is valid +// for multiple patch types. This method supports strategic merge patch semantics. +func MergingMapsHaveConflicts(left, right map[string]interface{}, schema LookupPatchMeta) (bool, error) { + return mergingMapFieldsHaveConflicts(left, right, schema, "", "") +} + +func mergingMapFieldsHaveConflicts( + left, right interface{}, + schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + switch leftType := left.(type) { + case map[string]interface{}: + rightType, ok := right.(map[string]interface{}) + if !ok { + return true, nil + } + leftMarker, okLeft := leftType[directiveMarker] + rightMarker, okRight := rightType[directiveMarker] + // if one or the other has a directive marker, + // then we need to consider that before looking at the individual keys, + // since a directive operates on the whole map. + if okLeft || okRight { + // if one has a directive marker and the other doesn't, + // then we have a conflict, since one is deleting or replacing the whole map, + // and the other is doing things to individual keys. + if okLeft != okRight { + return true, nil + } + // if they both have markers, but they are not the same directive, + // then we have a conflict because they're doing different things to the map. + if leftMarker != rightMarker { + return true, nil + } + } + if fieldPatchStrategy == replaceDirective { + return false, nil + } + // Check the individual keys. + return mapsHaveConflicts(leftType, rightType, schema) + + case []interface{}: + rightType, ok := right.([]interface{}) + if !ok { + return true, nil + } + return slicesHaveConflicts(leftType, rightType, schema, fieldPatchStrategy, fieldPatchMergeKey) + case string, float64, bool, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} + +func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { + for key, leftValue := range typedLeft { + if key != directiveMarker && key != retainKeysDirective { + if rightValue, ok := typedRight[key]; ok { + var subschema LookupPatchMeta + var patchMeta PatchMeta + var patchStrategy string + var err error + switch leftValue.(type) { + case []interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } + case map[string]interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForStruct(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } + } + + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, + subschema, patchStrategy, patchMeta.GetPatchMergeKey()); hasConflicts { + return true, err + } + } + } + } + + return false, nil +} + +func slicesHaveConflicts( + typedLeft, typedRight []interface{}, + schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + elementType, err := sliceElementType(typedLeft, typedRight) + if err != nil { + return true, err + } + + if fieldPatchStrategy == mergeDirective { + // Merging lists of scalars have no conflicts by definition + // So we only need to check further if the elements are maps + if elementType.Kind() != reflect.Map { + return false, nil + } + + // Build a map for each slice and then compare the two maps + leftMap, err := sliceOfMapsToMapOfMaps(typedLeft, fieldPatchMergeKey) + if err != nil { + return true, err + } + + rightMap, err := sliceOfMapsToMapOfMaps(typedRight, fieldPatchMergeKey) + if err != nil { + return true, err + } + + return mapsOfMapsHaveConflicts(leftMap, rightMap, schema) + } + + // Either we don't have type information, or these are non-merging lists + if len(typedLeft) != len(typedRight) { + return true, nil + } + + // Sort scalar slices to prevent ordering issues + // We have no way to sort non-merging lists of maps + if elementType.Kind() != reflect.Map { + typedLeft = deduplicateAndSortScalars(typedLeft) + typedRight = deduplicateAndSortScalars(typedRight) + } + + // Compare the slices element by element in order + // This test will fail if the slices are not sorted + for i := range typedLeft { + if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], schema, "", ""); hasConflicts { + return true, err + } + } + + return false, nil +} + +func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]interface{}, error) { + result := make(map[string]interface{}, len(slice)) + for _, value := range slice { + typedValue, ok := value.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("invalid element type in merging list:%v", slice) + } + + mergeValue, ok := typedValue[mergeKey] + if !ok { + return nil, fmt.Errorf("cannot find merge key `%s` in merging list element:%v", mergeKey, typedValue) + } + + result[fmt.Sprintf("%s", mergeValue)] = typedValue + } + + return result, nil +} + +func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { + for key, leftValue := range typedLeft { + if rightValue, ok := typedRight[key]; ok { + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, schema, "", ""); hasConflicts { + return true, err + } + } + } + + return false, nil +} + +// CreateThreeWayMergePatch reconciles a modified configuration with an original configuration, +// while preserving any changes or deletions made to the original configuration in the interim, +// and not overridden by the current configuration. All three documents must be passed to the +// method as json encoded content. It will return a strategic merge patch, or an error if any +// of the documents is invalid, or if there are any preconditions that fail against the modified +// configuration, or, if overwrite is false and there are conflicts between the modified and current +// configurations. Conflicts are defined as keys changed differently from original to modified +// than from original to current. In other words, a conflict occurs if modified changes any key +// in a way that is different from how it is changed in current (e.g., deleting it, changing its +// value). We also propagate values fields that do not exist in original but are explicitly +// defined in modified. +func CreateThreeWayMergePatch(original, modified, current []byte, schema LookupPatchMeta, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + currentMap := map[string]interface{}{} + if len(current) > 0 { + if err := json.Unmarshal(current, ¤tMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + // The patch is the difference from current to modified without deletions, plus deletions + // from original to modified. To find it, we compute deletions, which are the deletions from + // original to modified, and delta, which is the difference from current to modified without + // deletions, and then apply delta to deletions as a patch, which should be strictly additive. + deltaMapDiffOptions := DiffOptions{ + IgnoreDeletions: true, + SetElementOrder: true, + } + deltaMap, err := diffMaps(currentMap, modifiedMap, schema, deltaMapDiffOptions) + if err != nil { + return nil, err + } + deletionsMapDiffOptions := DiffOptions{ + SetElementOrder: true, + IgnoreChangesAndAdditions: true, + } + deletionsMap, err := diffMaps(originalMap, modifiedMap, schema, deletionsMapDiffOptions) + if err != nil { + return nil, err + } + + mergeOptions := MergeOptions{} + patchMap, err := mergeMap(deletionsMap, deltaMap, schema, mergeOptions) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + } + + // If overwrite is false, and the patch contains any keys that were changed differently, + // then return a conflict error. + if !overwrite { + changeMapDiffOptions := DiffOptions{} + changedMap, err := diffMaps(originalMap, currentMap, schema, changeMapDiffOptions) + if err != nil { + return nil, err + } + + hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, schema) + if err != nil { + return nil, err + } + + if hasConflicts { + return nil, mergepatch.NewErrConflict(mergepatch.ToYAMLOrError(patchMap), mergepatch.ToYAMLOrError(changedMap)) + } + } + + return json.Marshal(patchMap) +} + +func ItemAddedToModifiedSlice(original, modified string) bool { return original > modified } + +func ItemRemovedFromModifiedSlice(original, modified string) bool { return original < modified } + +func ItemMatchesOriginalAndModifiedSlice(original, modified string) bool { return original == modified } + +func CreateDeleteDirective(mergeKey string, mergeKeyValue interface{}) map[string]interface{} { + return map[string]interface{}{mergeKey: mergeKeyValue, directiveMarker: deleteDirective} +} + +func mapTypeAssertion(original, patch interface{}) (map[string]interface{}, map[string]interface{}, error) { + typedOriginal, ok := original.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) + } + typedPatch, ok := patch.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) + } + return typedOriginal, typedPatch, nil +} + +func sliceTypeAssertion(original, patch interface{}) ([]interface{}, []interface{}, error) { + typedOriginal, ok := original.([]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) + } + typedPatch, ok := patch.([]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) + } + return typedOriginal, typedPatch, nil +} + +// extractRetainKeysPatchStrategy process patch strategy, which is a string may contains multiple +// patch strategies separated by ",". It returns a boolean var indicating if it has +// retainKeys strategies and a string for the other strategy. +func extractRetainKeysPatchStrategy(strategies []string) (bool, string, error) { + switch len(strategies) { + case 0: + return false, "", nil + case 1: + singleStrategy := strategies[0] + switch singleStrategy { + case retainKeysStrategy: + return true, "", nil + default: + return false, singleStrategy, nil + } + case 2: + switch { + case strategies[0] == retainKeysStrategy: + return true, strategies[1], nil + case strategies[1] == retainKeysStrategy: + return true, strategies[0], nil + default: + return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) + } + default: + return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) + } +} + +// hasAdditionalNewField returns if original map has additional key with non-nil value than modified. +func hasAdditionalNewField(original, modified map[string]interface{}) bool { + for k, v := range original { + if v == nil { + continue + } + if _, found := modified[k]; !found { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go new file mode 100644 index 000000000..f84d65aac --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go @@ -0,0 +1,193 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "strings" + + "k8s.io/apimachinery/pkg/util/mergepatch" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +const ( + patchStrategyOpenapiextensionKey = "x-kubernetes-patch-strategy" + patchMergeKeyOpenapiextensionKey = "x-kubernetes-patch-merge-key" +) + +type LookupPatchItem interface { + openapi.SchemaVisitor + + Error() error + Path() *openapi.Path +} + +type kindItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewKindItem(key string, path *openapi.Path) *kindItem { + return &kindItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &kindItem{} + +func (item *kindItem) Error() error { + return item.err +} + +func (item *kindItem) Path() *openapi.Path { + return item.path +} + +func (item *kindItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected kind, but got primitive") +} + +func (item *kindItem) VisitArray(schema *openapi.Array) { + item.err = errors.New("expected kind, but got slice") +} + +func (item *kindItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected kind, but got map") +} + +func (item *kindItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } +} + +func (item *kindItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.subschema = subschema +} + +type sliceItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewSliceItem(key string, path *openapi.Path) *sliceItem { + return &sliceItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &sliceItem{} + +func (item *sliceItem) Error() error { + return item.err +} + +func (item *sliceItem) Path() *openapi.Path { + return item.path +} + +func (item *sliceItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected slice, but got primitive") +} + +func (item *sliceItem) VisitArray(schema *openapi.Array) { + if !item.hasVisitKind { + item.err = errors.New("expected visit kind first, then visit array") + } + subschema := schema.SubType + item.subschema = subschema +} + +func (item *sliceItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected slice, but got map") +} + +func (item *sliceItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } else { + item.subschema = schema.SubSchema() + } +} + +func (item *sliceItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.hasVisitKind = true + subschema.Accept(item) +} + +func parsePatchMetadata(extensions map[string]interface{}) (string, []string, error) { + ps, foundPS := extensions[patchStrategyOpenapiextensionKey] + var patchStrategies []string + var mergeKey, patchStrategy string + var ok bool + if foundPS { + patchStrategy, ok = ps.(string) + if ok { + patchStrategies = strings.Split(patchStrategy, ",") + } else { + return "", nil, mergepatch.ErrBadArgType(patchStrategy, ps) + } + } + mk, foundMK := extensions[patchMergeKeyOpenapiextensionKey] + if foundMK { + mergeKey, ok = mk.(string) + if !ok { + return "", nil, mergepatch.ErrBadArgType(mergeKey, mk) + } + } + return mergeKey, patchStrategies, nil +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS new file mode 100644 index 000000000..3f72c69ba --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- pwittrock +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go new file mode 100644 index 000000000..8205a4dd1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go @@ -0,0 +1,513 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json is forked from the Go standard library to enable us to find the +// field of a struct that a given JSON key maps to. +package json + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +const ( + patchStrategyTagKey = "patchStrategy" + patchMergeKeyTagKey = "patchMergeKey" +) + +// Finds the patchStrategy and patchMergeKey struct tag fields on a given +// struct field given the struct type and the JSON name of the field. +// It returns field type, a slice of patch strategies, merge key and error. +// TODO: fix the returned errors to be introspectable. +func LookupPatchMetadataForStruct(t reflect.Type, jsonField string) ( + elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + e = fmt.Errorf("merging an object in json but data type is not struct, instead is: %s", + t.Kind().String()) + return + } + jf := []byte(jsonField) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, jf) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, jf) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential struct field. + tjf := t.Field(f.index[0]) + // we must navigate down all the anonymously included structs in the chain + for i := 1; i < len(f.index); i++ { + tjf = tjf.Type.Field(f.index[i]) + } + patchStrategy := tjf.Tag.Get(patchStrategyTagKey) + patchMergeKey = tjf.Tag.Get(patchMergeKeyTagKey) + patchStrategies = strings.Split(patchStrategy, ",") + elemType = tjf.Type + return + } + e = fmt.Errorf("unable to find api field in struct %s for the json field %q", t.Name(), jsonField) + return +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + // index is the sequence of indexes from the containing type fields to this field. + // it is a slice because anonymous structs will need multiple navigation steps to correctly + // resolve the proper fields + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func (f field) String() string { + return fmt.Sprintf("{name: %s, type: %v, tag: %v, index: %v, omitEmpty: %v, quoted: %v}", f.name, f.typ, f.tag, f.index, f.omitEmpty, f.quoted) +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'Å¿' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/k8s.io/client-go/tools/record/OWNERS b/vendor/k8s.io/client-go/tools/record/OWNERS new file mode 100644 index 000000000..6ce73bb5c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/OWNERS @@ -0,0 +1,29 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- erictune +- pmorie +- dchen1107 +- saad-ali +- luxas +- yifan-gu +- eparis +- mwielgus +- timothysc +- jsafrane +- dims +- krousey +- a-robinson +- aveshagarwal +- resouer +- cjcullen diff --git a/vendor/k8s.io/client-go/tools/record/doc.go b/vendor/k8s.io/client-go/tools/record/doc.go new file mode 100644 index 000000000..657ddecbc --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package record has all client logic for recording and reporting events. +package record // import "k8s.io/client-go/tools/record" diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go new file mode 100644 index 000000000..565e72802 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -0,0 +1,302 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + "math/rand" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/clock" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record/util" + ref "k8s.io/client-go/tools/reference" + "k8s.io/klog" +) + +const maxTriesPerEvent = 12 + +var defaultSleepDuration = 10 * time.Second + +const maxQueuedEvents = 1000 + +// EventSink knows how to store events (client.Client implements it.) +// EventSink must respect the namespace that will be embedded in 'event'. +// It is assumed that EventSink will return the same sorts of errors as +// pkg/client's REST client. +type EventSink interface { + Create(event *v1.Event) (*v1.Event, error) + Update(event *v1.Event) (*v1.Event, error) + Patch(oldEvent *v1.Event, data []byte) (*v1.Event, error) +} + +// EventRecorder knows how to record events on behalf of an EventSource. +type EventRecorder interface { + // Event constructs an event from the given information and puts it in the queue for sending. + // 'object' is the object this event is about. Event will make a reference-- or you may also + // pass a reference to the object directly. + // 'type' of this event, and can be one of Normal, Warning. New types could be added in future + // 'reason' is the reason this event is generated. 'reason' should be short and unique; it + // should be in UpperCamelCase format (starting with a capital letter). "reason" will be used + // to automate handling of events, so imagine people writing switch statements to handle them. + // You want to make that easy. + // 'message' is intended to be human readable. + // + // The resulting event will be created in the same namespace as the reference object. + Event(object runtime.Object, eventtype, reason, message string) + + // Eventf is just like Event, but with Sprintf for the message field. + Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) + + // PastEventf is just like Eventf, but with an option to specify the event's 'timestamp' field. + PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) + + // AnnotatedEventf is just like eventf, but with annotations attached + AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) +} + +// EventBroadcaster knows how to receive events and send them to any EventSink, watcher, or log. +type EventBroadcaster interface { + // StartEventWatcher starts sending events received from this EventBroadcaster to the given + // event handler function. The return value can be ignored or used to stop recording, if + // desired. + StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface + + // StartRecordingToSink starts sending events received from this EventBroadcaster to the given + // sink. The return value can be ignored or used to stop recording, if desired. + StartRecordingToSink(sink EventSink) watch.Interface + + // StartLogging starts sending events received from this EventBroadcaster to the given logging + // function. The return value can be ignored or used to stop recording, if desired. + StartLogging(logf func(format string, args ...interface{})) watch.Interface + + // NewRecorder returns an EventRecorder that can be used to send events to this EventBroadcaster + // with the event source set to the given event source. + NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder +} + +// Creates a new event broadcaster. +func NewBroadcaster() EventBroadcaster { + return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), defaultSleepDuration} +} + +func NewBroadcasterForTests(sleepDuration time.Duration) EventBroadcaster { + return &eventBroadcasterImpl{watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull), sleepDuration} +} + +type eventBroadcasterImpl struct { + *watch.Broadcaster + sleepDuration time.Duration +} + +// StartRecordingToSink starts sending events received from the specified eventBroadcaster to the given sink. +// The return value can be ignored or used to stop recording, if desired. +// TODO: make me an object with parameterizable queue length and retry interval +func (eventBroadcaster *eventBroadcasterImpl) StartRecordingToSink(sink EventSink) watch.Interface { + // The default math/rand package functions aren't thread safe, so create a + // new Rand object for each StartRecording call. + randGen := rand.New(rand.NewSource(time.Now().UnixNano())) + eventCorrelator := NewEventCorrelator(clock.RealClock{}) + return eventBroadcaster.StartEventWatcher( + func(event *v1.Event) { + recordToSink(sink, event, eventCorrelator, randGen, eventBroadcaster.sleepDuration) + }) +} + +func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrelator, randGen *rand.Rand, sleepDuration time.Duration) { + // Make a copy before modification, because there could be multiple listeners. + // Events are safe to copy like this. + eventCopy := *event + event = &eventCopy + result, err := eventCorrelator.EventCorrelate(event) + if err != nil { + utilruntime.HandleError(err) + } + if result.Skip { + return + } + tries := 0 + for { + if recordEvent(sink, result.Event, result.Patch, result.Event.Count > 1, eventCorrelator) { + break + } + tries++ + if tries >= maxTriesPerEvent { + klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event) + break + } + // Randomize the first sleep so that various clients won't all be + // synced up if the master goes down. + if tries == 1 { + time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64())) + } else { + time.Sleep(sleepDuration) + } + } +} + +// recordEvent attempts to write event to a sink. It returns true if the event +// was successfully recorded or discarded, false if it should be retried. +// If updateExistingEvent is false, it creates a new event, otherwise it updates +// existing event. +func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEvent bool, eventCorrelator *EventCorrelator) bool { + var newEvent *v1.Event + var err error + if updateExistingEvent { + newEvent, err = sink.Patch(event, patch) + } + // Update can fail because the event may have been removed and it no longer exists. + if !updateExistingEvent || (updateExistingEvent && util.IsKeyNotFoundError(err)) { + // Making sure that ResourceVersion is empty on creation + event.ResourceVersion = "" + newEvent, err = sink.Create(event) + } + if err == nil { + // we need to update our event correlator with the server returned state to handle name/resourceversion + eventCorrelator.UpdateState(newEvent) + return true + } + + // If we can't contact the server, then hold everything while we keep trying. + // Otherwise, something about the event is malformed and we should abandon it. + switch err.(type) { + case *restclient.RequestConstructionError: + // We will construct the request the same next time, so don't keep trying. + klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err) + return true + case *errors.StatusError: + if errors.IsAlreadyExists(err) { + klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } else { + klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err) + } + return true + case *errors.UnexpectedObjectError: + // We don't expect this; it implies the server's response didn't match a + // known pattern. Go ahead and retry. + default: + // This case includes actual http transport errors. Go ahead and retry. + } + klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err) + return false +} + +// StartLogging starts sending events received from this EventBroadcaster to the given logging function. +// The return value can be ignored or used to stop recording, if desired. +func (eventBroadcaster *eventBroadcasterImpl) StartLogging(logf func(format string, args ...interface{})) watch.Interface { + return eventBroadcaster.StartEventWatcher( + func(e *v1.Event) { + logf("Event(%#v): type: '%v' reason: '%v' %v", e.InvolvedObject, e.Type, e.Reason, e.Message) + }) +} + +// StartEventWatcher starts sending events received from this EventBroadcaster to the given event handler function. +// The return value can be ignored or used to stop recording, if desired. +func (eventBroadcaster *eventBroadcasterImpl) StartEventWatcher(eventHandler func(*v1.Event)) watch.Interface { + watcher := eventBroadcaster.Watch() + go func() { + defer utilruntime.HandleCrash() + for watchEvent := range watcher.ResultChan() { + event, ok := watchEvent.Object.(*v1.Event) + if !ok { + // This is all local, so there's no reason this should + // ever happen. + continue + } + eventHandler(event) + } + }() + return watcher +} + +// NewRecorder returns an EventRecorder that records events with the given event source. +func (eventBroadcaster *eventBroadcasterImpl) NewRecorder(scheme *runtime.Scheme, source v1.EventSource) EventRecorder { + return &recorderImpl{scheme, source, eventBroadcaster.Broadcaster, clock.RealClock{}} +} + +type recorderImpl struct { + scheme *runtime.Scheme + source v1.EventSource + *watch.Broadcaster + clock clock.Clock +} + +func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) { + ref, err := ref.GetReference(recorder.scheme, object) + if err != nil { + klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message) + return + } + + if !util.ValidateEventType(eventtype) { + klog.Errorf("Unsupported event type: '%v'", eventtype) + return + } + + event := recorder.makeEvent(ref, annotations, eventtype, reason, message) + event.Source = recorder.source + + go func() { + // NOTE: events should be a non-blocking operation + defer utilruntime.HandleCrash() + recorder.Action(watch.Added, event) + }() +} + +func (recorder *recorderImpl) Event(object runtime.Object, eventtype, reason, message string) { + recorder.generateEvent(object, nil, metav1.Now(), eventtype, reason, message) +} + +func (recorder *recorderImpl) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.Event(object, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.generateEvent(object, nil, timestamp, eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + recorder.generateEvent(object, annotations, metav1.Now(), eventtype, reason, fmt.Sprintf(messageFmt, args...)) +} + +func (recorder *recorderImpl) makeEvent(ref *v1.ObjectReference, annotations map[string]string, eventtype, reason, message string) *v1.Event { + t := metav1.Time{Time: recorder.clock.Now()} + namespace := ref.Namespace + if namespace == "" { + namespace = metav1.NamespaceDefault + } + return &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), + Namespace: namespace, + Annotations: annotations, + }, + InvolvedObject: *ref, + Reason: reason, + Message: message, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventtype, + } +} diff --git a/vendor/k8s.io/client-go/tools/record/events_cache.go b/vendor/k8s.io/client-go/tools/record/events_cache.go new file mode 100644 index 000000000..a42084f3a --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/events_cache.go @@ -0,0 +1,462 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "github.com/golang/groupcache/lru" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/client-go/util/flowcontrol" +) + +const ( + maxLruCacheEntries = 4096 + + // if we see the same event that varies only by message + // more than 10 times in a 10 minute period, aggregate the event + defaultAggregateMaxEvents = 10 + defaultAggregateIntervalInSeconds = 600 + + // by default, allow a source to send 25 events about an object + // but control the refill rate to 1 new event every 5 minutes + // this helps control the long-tail of events for things that are always + // unhealthy + defaultSpamBurst = 25 + defaultSpamQPS = 1. / 300. +) + +// getEventKey builds unique event key based on source, involvedObject, reason, message +func getEventKey(event *v1.Event) string { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + event.InvolvedObject.FieldPath, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + event.Message, + }, + "") +} + +// getSpamKey builds unique event key based on source, involvedObject +func getSpamKey(event *v1.Event) string { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + }, + "") +} + +// EventFilterFunc is a function that returns true if the event should be skipped +type EventFilterFunc func(event *v1.Event) bool + +// EventSourceObjectSpamFilter is responsible for throttling +// the amount of events a source and object can produce. +type EventSourceObjectSpamFilter struct { + sync.RWMutex + + // the cache that manages last synced state + cache *lru.Cache + + // burst is the amount of events we allow per source + object + burst int + + // qps is the refill rate of the token bucket in queries per second + qps float32 + + // clock is used to allow for testing over a time interval + clock clock.Clock +} + +// NewEventSourceObjectSpamFilter allows burst events from a source about an object with the specified qps refill. +func NewEventSourceObjectSpamFilter(lruCacheSize, burst int, qps float32, clock clock.Clock) *EventSourceObjectSpamFilter { + return &EventSourceObjectSpamFilter{ + cache: lru.New(lruCacheSize), + burst: burst, + qps: qps, + clock: clock, + } +} + +// spamRecord holds data used to perform spam filtering decisions. +type spamRecord struct { + // rateLimiter controls the rate of events about this object + rateLimiter flowcontrol.RateLimiter +} + +// Filter controls that a given source+object are not exceeding the allowed rate. +func (f *EventSourceObjectSpamFilter) Filter(event *v1.Event) bool { + var record spamRecord + + // controls our cached information about this event (source+object) + eventKey := getSpamKey(event) + + // do we have a record of similar events in our cache? + f.Lock() + defer f.Unlock() + value, found := f.cache.Get(eventKey) + if found { + record = value.(spamRecord) + } + + // verify we have a rate limiter for this record + if record.rateLimiter == nil { + record.rateLimiter = flowcontrol.NewTokenBucketRateLimiterWithClock(f.qps, f.burst, f.clock) + } + + // ensure we have available rate + filter := !record.rateLimiter.TryAccept() + + // update the cache + f.cache.Add(eventKey, record) + + return filter +} + +// EventAggregatorKeyFunc is responsible for grouping events for aggregation +// It returns a tuple of the following: +// aggregateKey - key the identifies the aggregate group to bucket this event +// localKey - key that makes this event in the local group +type EventAggregatorKeyFunc func(event *v1.Event) (aggregateKey string, localKey string) + +// EventAggregatorByReasonFunc aggregates events by exact match on event.Source, event.InvolvedObject, event.Type and event.Reason +func EventAggregatorByReasonFunc(event *v1.Event) (string, string) { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + }, + ""), event.Message +} + +// EventAggregatorMessageFunc is responsible for producing an aggregation message +type EventAggregatorMessageFunc func(event *v1.Event) string + +// EventAggregratorByReasonMessageFunc returns an aggregate message by prefixing the incoming message +func EventAggregatorByReasonMessageFunc(event *v1.Event) string { + return "(combined from similar events): " + event.Message +} + +// EventAggregator identifies similar events and aggregates them into a single event +type EventAggregator struct { + sync.RWMutex + + // The cache that manages aggregation state + cache *lru.Cache + + // The function that groups events for aggregation + keyFunc EventAggregatorKeyFunc + + // The function that generates a message for an aggregate event + messageFunc EventAggregatorMessageFunc + + // The maximum number of events in the specified interval before aggregation occurs + maxEvents uint + + // The amount of time in seconds that must transpire since the last occurrence of a similar event before it's considered new + maxIntervalInSeconds uint + + // clock is used to allow for testing over a time interval + clock clock.Clock +} + +// NewEventAggregator returns a new instance of an EventAggregator +func NewEventAggregator(lruCacheSize int, keyFunc EventAggregatorKeyFunc, messageFunc EventAggregatorMessageFunc, + maxEvents int, maxIntervalInSeconds int, clock clock.Clock) *EventAggregator { + return &EventAggregator{ + cache: lru.New(lruCacheSize), + keyFunc: keyFunc, + messageFunc: messageFunc, + maxEvents: uint(maxEvents), + maxIntervalInSeconds: uint(maxIntervalInSeconds), + clock: clock, + } +} + +// aggregateRecord holds data used to perform aggregation decisions +type aggregateRecord struct { + // we track the number of unique local keys we have seen in the aggregate set to know when to actually aggregate + // if the size of this set exceeds the max, we know we need to aggregate + localKeys sets.String + // The last time at which the aggregate was recorded + lastTimestamp metav1.Time +} + +// EventAggregate checks if a similar event has been seen according to the +// aggregation configuration (max events, max interval, etc) and returns: +// +// - The (potentially modified) event that should be created +// - The cache key for the event, for correlation purposes. This will be set to +// the full key for normal events, and to the result of +// EventAggregatorMessageFunc for aggregate events. +func (e *EventAggregator) EventAggregate(newEvent *v1.Event) (*v1.Event, string) { + now := metav1.NewTime(e.clock.Now()) + var record aggregateRecord + // eventKey is the full cache key for this event + eventKey := getEventKey(newEvent) + // aggregateKey is for the aggregate event, if one is needed. + aggregateKey, localKey := e.keyFunc(newEvent) + + // Do we have a record of similar events in our cache? + e.Lock() + defer e.Unlock() + value, found := e.cache.Get(aggregateKey) + if found { + record = value.(aggregateRecord) + } + + // Is the previous record too old? If so, make a fresh one. Note: if we didn't + // find a similar record, its lastTimestamp will be the zero value, so we + // create a new one in that case. + maxInterval := time.Duration(e.maxIntervalInSeconds) * time.Second + interval := now.Time.Sub(record.lastTimestamp.Time) + if interval > maxInterval { + record = aggregateRecord{localKeys: sets.NewString()} + } + + // Write the new event into the aggregation record and put it on the cache + record.localKeys.Insert(localKey) + record.lastTimestamp = now + e.cache.Add(aggregateKey, record) + + // If we are not yet over the threshold for unique events, don't correlate them + if uint(record.localKeys.Len()) < e.maxEvents { + return newEvent, eventKey + } + + // do not grow our local key set any larger than max + record.localKeys.PopAny() + + // create a new aggregate event, and return the aggregateKey as the cache key + // (so that it can be overwritten.) + eventCopy := &v1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", newEvent.InvolvedObject.Name, now.UnixNano()), + Namespace: newEvent.Namespace, + }, + Count: 1, + FirstTimestamp: now, + InvolvedObject: newEvent.InvolvedObject, + LastTimestamp: now, + Message: e.messageFunc(newEvent), + Type: newEvent.Type, + Reason: newEvent.Reason, + Source: newEvent.Source, + } + return eventCopy, aggregateKey +} + +// eventLog records data about when an event was observed +type eventLog struct { + // The number of times the event has occurred since first occurrence. + count uint + + // The time at which the event was first recorded. + firstTimestamp metav1.Time + + // The unique name of the first occurrence of this event + name string + + // Resource version returned from previous interaction with server + resourceVersion string +} + +// eventLogger logs occurrences of an event +type eventLogger struct { + sync.RWMutex + cache *lru.Cache + clock clock.Clock +} + +// newEventLogger observes events and counts their frequencies +func newEventLogger(lruCacheEntries int, clock clock.Clock) *eventLogger { + return &eventLogger{cache: lru.New(lruCacheEntries), clock: clock} +} + +// eventObserve records an event, or updates an existing one if key is a cache hit +func (e *eventLogger) eventObserve(newEvent *v1.Event, key string) (*v1.Event, []byte, error) { + var ( + patch []byte + err error + ) + eventCopy := *newEvent + event := &eventCopy + + e.Lock() + defer e.Unlock() + + // Check if there is an existing event we should update + lastObservation := e.lastEventObservationFromCache(key) + + // If we found a result, prepare a patch + if lastObservation.count > 0 { + // update the event based on the last observation so patch will work as desired + event.Name = lastObservation.name + event.ResourceVersion = lastObservation.resourceVersion + event.FirstTimestamp = lastObservation.firstTimestamp + event.Count = int32(lastObservation.count) + 1 + + eventCopy2 := *event + eventCopy2.Count = 0 + eventCopy2.LastTimestamp = metav1.NewTime(time.Unix(0, 0)) + eventCopy2.Message = "" + + newData, _ := json.Marshal(event) + oldData, _ := json.Marshal(eventCopy2) + patch, err = strategicpatch.CreateTwoWayMergePatch(oldData, newData, event) + } + + // record our new observation + e.cache.Add( + key, + eventLog{ + count: uint(event.Count), + firstTimestamp: event.FirstTimestamp, + name: event.Name, + resourceVersion: event.ResourceVersion, + }, + ) + return event, patch, err +} + +// updateState updates its internal tracking information based on latest server state +func (e *eventLogger) updateState(event *v1.Event) { + key := getEventKey(event) + e.Lock() + defer e.Unlock() + // record our new observation + e.cache.Add( + key, + eventLog{ + count: uint(event.Count), + firstTimestamp: event.FirstTimestamp, + name: event.Name, + resourceVersion: event.ResourceVersion, + }, + ) +} + +// lastEventObservationFromCache returns the event from the cache, reads must be protected via external lock +func (e *eventLogger) lastEventObservationFromCache(key string) eventLog { + value, ok := e.cache.Get(key) + if ok { + observationValue, ok := value.(eventLog) + if ok { + return observationValue + } + } + return eventLog{} +} + +// EventCorrelator processes all incoming events and performs analysis to avoid overwhelming the system. It can filter all +// incoming events to see if the event should be filtered from further processing. It can aggregate similar events that occur +// frequently to protect the system from spamming events that are difficult for users to distinguish. It performs de-duplication +// to ensure events that are observed multiple times are compacted into a single event with increasing counts. +type EventCorrelator struct { + // the function to filter the event + filterFunc EventFilterFunc + // the object that performs event aggregation + aggregator *EventAggregator + // the object that observes events as they come through + logger *eventLogger +} + +// EventCorrelateResult is the result of a Correlate +type EventCorrelateResult struct { + // the event after correlation + Event *v1.Event + // if provided, perform a strategic patch when updating the record on the server + Patch []byte + // if true, do no further processing of the event + Skip bool +} + +// NewEventCorrelator returns an EventCorrelator configured with default values. +// +// The EventCorrelator is responsible for event filtering, aggregating, and counting +// prior to interacting with the API server to record the event. +// +// The default behavior is as follows: +// * Aggregation is performed if a similar event is recorded 10 times in a +// in a 10 minute rolling interval. A similar event is an event that varies only by +// the Event.Message field. Rather than recording the precise event, aggregation +// will create a new event whose message reports that it has combined events with +// the same reason. +// * Events are incrementally counted if the exact same event is encountered multiple +// times. +// * A source may burst 25 events about an object, but has a refill rate budget +// per object of 1 event every 5 minutes to control long-tail of spam. +func NewEventCorrelator(clock clock.Clock) *EventCorrelator { + cacheSize := maxLruCacheEntries + spamFilter := NewEventSourceObjectSpamFilter(cacheSize, defaultSpamBurst, defaultSpamQPS, clock) + return &EventCorrelator{ + filterFunc: spamFilter.Filter, + aggregator: NewEventAggregator( + cacheSize, + EventAggregatorByReasonFunc, + EventAggregatorByReasonMessageFunc, + defaultAggregateMaxEvents, + defaultAggregateIntervalInSeconds, + clock), + + logger: newEventLogger(cacheSize, clock), + } +} + +// EventCorrelate filters, aggregates, counts, and de-duplicates all incoming events +func (c *EventCorrelator) EventCorrelate(newEvent *v1.Event) (*EventCorrelateResult, error) { + if newEvent == nil { + return nil, fmt.Errorf("event is nil") + } + aggregateEvent, ckey := c.aggregator.EventAggregate(newEvent) + observedEvent, patch, err := c.logger.eventObserve(aggregateEvent, ckey) + if c.filterFunc(observedEvent) { + return &EventCorrelateResult{Skip: true}, nil + } + return &EventCorrelateResult{Event: observedEvent, Patch: patch}, err +} + +// UpdateState based on the latest observed state from server +func (c *EventCorrelator) UpdateState(event *v1.Event) { + c.logger.updateState(event) +} diff --git a/vendor/k8s.io/client-go/tools/record/fake.go b/vendor/k8s.io/client-go/tools/record/fake.go new file mode 100644 index 000000000..6e031daaf --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/fake.go @@ -0,0 +1,58 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package record + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// FakeRecorder is used as a fake during tests. It is thread safe. It is usable +// when created manually and not by NewFakeRecorder, however all events may be +// thrown away in this case. +type FakeRecorder struct { + Events chan string +} + +func (f *FakeRecorder) Event(object runtime.Object, eventtype, reason, message string) { + if f.Events != nil { + f.Events <- fmt.Sprintf("%s %s %s", eventtype, reason, message) + } +} + +func (f *FakeRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + if f.Events != nil { + f.Events <- fmt.Sprintf(eventtype+" "+reason+" "+messageFmt, args...) + } +} + +func (f *FakeRecorder) PastEventf(object runtime.Object, timestamp metav1.Time, eventtype, reason, messageFmt string, args ...interface{}) { +} + +func (f *FakeRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + f.Eventf(object, eventtype, reason, messageFmt, args) +} + +// NewFakeRecorder creates new fake event recorder with event channel with +// buffer of given size. +func NewFakeRecorder(bufferSize int) *FakeRecorder { + return &FakeRecorder{ + Events: make(chan string, bufferSize), + } +} diff --git a/vendor/k8s.io/client-go/tools/record/util/util.go b/vendor/k8s.io/client-go/tools/record/util/util.go new file mode 100644 index 000000000..d1818a8d9 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/util/util.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "net/http" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" +) + +// ValidateEventType checks that eventtype is an expected type of event +func ValidateEventType(eventtype string) bool { + switch eventtype { + case v1.EventTypeNormal, v1.EventTypeWarning: + return true + } + return false +} + +// IsKeyNotFoundError is utility function that checks if an error is not found error +func IsKeyNotFoundError(err error) bool { + statusErr, _ := err.(*errors.StatusError) + + if statusErr != nil && statusErr.Status().Code == http.StatusNotFound { + return true + } + + return false +} diff --git a/vendor/k8s.io/kube-openapi/LICENSE b/vendor/k8s.io/kube-openapi/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS new file mode 100644 index 000000000..9621a6a3a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS @@ -0,0 +1,2 @@ +approvers: +- apelisse diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go new file mode 100644 index 000000000..11ed8a6b7 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package proto is a collection of libraries for parsing and indexing the type definitions. +// The openapi spec contains the object model definitions and extensions metadata. +package proto diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go new file mode 100644 index 000000000..5eb957aff --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -0,0 +1,318 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" + + "github.com/googleapis/gnostic/OpenAPIv2" + "gopkg.in/yaml.v2" +) + +func newSchemaError(path *Path, format string, a ...interface{}) error { + err := fmt.Sprintf(format, a...) + if path.Len() == 0 { + return fmt.Errorf("SchemaError: %v", err) + } + return fmt.Errorf("SchemaError(%v): %v", path, err) +} + +// VendorExtensionToMap converts openapi VendorExtension to a map. +func VendorExtensionToMap(e []*openapi_v2.NamedAny) map[string]interface{} { + values := map[string]interface{}{} + + for _, na := range e { + if na.GetName() == "" || na.GetValue() == nil { + continue + } + if na.GetValue().GetYaml() == "" { + continue + } + var value interface{} + err := yaml.Unmarshal([]byte(na.GetValue().GetYaml()), &value) + if err != nil { + continue + } + + values[na.GetName()] = value + } + + return values +} + +// Definitions is an implementation of `Models`. It looks for +// models in an openapi Schema. +type Definitions struct { + models map[string]Schema +} + +var _ Models = &Definitions{} + +// NewOpenAPIData creates a new `Models` out of the openapi document. +func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) { + definitions := Definitions{ + models: map[string]Schema{}, + } + + // Save the list of all models first. This will allow us to + // validate that we don't have any dangling reference. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + definitions.models[namedSchema.GetName()] = nil + } + + // Now, parse each model. We can validate that references exists. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + path := NewPath(namedSchema.GetName()) + schema, err := definitions.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + definitions.models[namedSchema.GetName()] = schema + } + + return &definitions, nil +} + +// We believe the schema is a reference, verify that and returns a new +// Schema +func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) { + // TODO(wrong): a schema with a $ref can have properties. We can ignore them (would be incomplete), but we cannot return an error. + if len(s.GetProperties().GetAdditionalProperties()) > 0 { + return nil, newSchemaError(path, "unallowed embedded type definition") + } + // TODO(wrong): a schema with a $ref can have a type. We can ignore it (would be incomplete), but we cannot return an error. + if len(s.GetType().GetValue()) > 0 { + return nil, newSchemaError(path, "definition reference can't have a type") + } + + // TODO(wrong): $refs outside of the definitions are completely valid. We can ignore them (would be incomplete), but we cannot return an error. + if !strings.HasPrefix(s.GetXRef(), "#/definitions/") { + return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef()) + } + reference := strings.TrimPrefix(s.GetXRef(), "#/definitions/") + if _, ok := d.models[reference]; !ok { + return nil, newSchemaError(path, "unknown model in reference: %q", reference) + } + return &Ref{ + BaseSchema: d.parseBaseSchema(s, path), + reference: reference, + definitions: d, + }, nil +} + +func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) BaseSchema { + return BaseSchema{ + Description: s.GetDescription(), + Extensions: VendorExtensionToMap(s.GetVendorExtension()), + Path: *path, + } +} + +// We believe the schema is a map, verify and return a new schema +func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + var sub Schema + // TODO(incomplete): this misses the boolean case as AdditionalProperties is a bool+schema sum type. + if s.GetAdditionalProperties().GetSchema() == nil { + sub = &Arbitrary{ + BaseSchema: d.parseBaseSchema(s, path), + } + } else { + var err error + sub, err = d.ParseSchema(s.GetAdditionalProperties().GetSchema(), path) + if err != nil { + return nil, err + } + } + return &Map{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, error) { + var t string + if len(s.GetType().GetValue()) > 1 { + return nil, newSchemaError(path, "primitive can't have more than 1 type") + } + if len(s.GetType().GetValue()) == 1 { + t = s.GetType().GetValue()[0] + } + switch t { + case String: // do nothing + case Number: // do nothing + case Integer: // do nothing + case Boolean: // do nothing + // TODO(wrong): this misses "null". Would skip the null case (would be incomplete), but we cannot return an error. + default: + return nil, newSchemaError(path, "Unknown primitive type: %q", t) + } + return &Primitive{ + BaseSchema: d.parseBaseSchema(s, path), + Type: t, + Format: s.GetFormat(), + }, nil +} + +func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 1 { + return nil, newSchemaError(path, "array should have exactly one type") + } + if s.GetType().GetValue()[0] != array { + return nil, newSchemaError(path, `array should have type "array"`) + } + if len(s.GetItems().GetSchema()) != 1 { + // TODO(wrong): Items can have multiple elements. We can ignore Items then (would be incomplete), but we cannot return an error. + // TODO(wrong): "type: array" witohut any items at all is completely valid. + return nil, newSchemaError(path, "array should have exactly one sub-item") + } + sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path) + if err != nil { + return nil, err + } + return &Array{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + if s.GetProperties() == nil { + return nil, newSchemaError(path, "object doesn't have properties") + } + + fields := map[string]Schema{} + fieldOrder := []string{} + + for _, namedSchema := range s.GetProperties().GetAdditionalProperties() { + var err error + name := namedSchema.GetName() + path := path.FieldPath(name) + fields[name], err = d.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + fieldOrder = append(fieldOrder, name) + } + + return &Kind{ + BaseSchema: d.parseBaseSchema(s, path), + RequiredFields: s.GetRequired(), + Fields: fields, + FieldOrder: fieldOrder, + }, nil +} + +func (d *Definitions) parseArbitrary(s *openapi_v2.Schema, path *Path) (Schema, error) { + return &Arbitrary{ + BaseSchema: d.parseBaseSchema(s, path), + }, nil +} + +// ParseSchema creates a walkable Schema from an openapi schema. While +// this function is public, it doesn't leak through the interface. +func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { + if s.GetXRef() != "" { + // TODO(incomplete): ignoring the rest of s is wrong. As long as there are no conflict, everything from s must be considered + // Reference: https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#path-item-object + return d.parseReference(s, path) + } + objectTypes := s.GetType().GetValue() + switch len(objectTypes) { + case 0: + // in the OpenAPI schema served by older k8s versions, object definitions created from structs did not include + // the type:object property (they only included the "properties" property), so we need to handle this case + // TODO: validate that we ever published empty, non-nil properties. JSON roundtripping nils them. + if s.GetProperties() != nil { + // TODO(wrong): when verifying a non-object later against this, it will be rejected as invalid type. + // TODO(CRD validation schema publishing): we have to filter properties (empty or not) if type=object is not given + return d.parseKind(s, path) + } else { + // Definition has no type and no properties. Treat it as an arbitrary value + // TODO(incomplete): what if it has additionalProperties=false or patternProperties? + // ANSWER: parseArbitrary is less strict than it has to be with patternProperties (which is ignored). So this is correct (of course not complete). + return d.parseArbitrary(s, path) + } + case 1: + t := objectTypes[0] + switch t { + case object: + if s.GetProperties() != nil { + return d.parseKind(s, path) + } else { + return d.parseMap(s, path) + } + case array: + return d.parseArray(s, path) + } + return d.parsePrimitive(s, path) + default: + // the OpenAPI generator never generates (nor it ever did in the past) OpenAPI type definitions with multiple types + // TODO(wrong): this is rejecting a completely valid OpenAPI spec + // TODO(CRD validation schema publishing): filter these out + return nil, newSchemaError(path, "definitions with multiple types aren't supported") + } +} + +// LookupModel is public through the interface of Models. It +// returns a visitable schema from the given model name. +func (d *Definitions) LookupModel(model string) Schema { + return d.models[model] +} + +func (d *Definitions) ListModels() []string { + models := []string{} + + for model := range d.models { + models = append(models, model) + } + + sort.Strings(models) + return models +} + +type Ref struct { + BaseSchema + + reference string + definitions *Definitions +} + +var _ Reference = &Ref{} + +func (r *Ref) Reference() string { + return r.reference +} + +func (r *Ref) SubSchema() Schema { + return r.definitions.models[r.reference] +} + +func (r *Ref) Accept(v SchemaVisitor) { + v.VisitReference(r) +} + +func (r *Ref) GetName() string { + return fmt.Sprintf("Reference to %q", r.reference) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go new file mode 100644 index 000000000..46643aa50 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -0,0 +1,278 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" +) + +// Defines openapi types. +const ( + Integer = "integer" + Number = "number" + String = "string" + Boolean = "boolean" + + // These types are private as they should never leak, and are + // represented by actual structs. + array = "array" + object = "object" +) + +// Models interface describe a model provider. They can give you the +// schema for a specific model. +type Models interface { + LookupModel(string) Schema + ListModels() []string +} + +// SchemaVisitor is an interface that you need to implement if you want +// to "visit" an openapi schema. A dispatch on the Schema type will call +// the appropriate function based on its actual type: +// - Array is a list of one and only one given subtype +// - Map is a map of string to one and only one given subtype +// - Primitive can be string, integer, number and boolean. +// - Kind is an object with specific fields mapping to specific types. +// - Reference is a link to another definition. +type SchemaVisitor interface { + VisitArray(*Array) + VisitMap(*Map) + VisitPrimitive(*Primitive) + VisitKind(*Kind) + VisitReference(Reference) +} + +// SchemaVisitorArbitrary is an additional visitor interface which handles +// arbitrary types. For backwards compatibility, it's a separate interface +// which is checked for at runtime. +type SchemaVisitorArbitrary interface { + SchemaVisitor + VisitArbitrary(*Arbitrary) +} + +// Schema is the base definition of an openapi type. +type Schema interface { + // Giving a visitor here will let you visit the actual type. + Accept(SchemaVisitor) + + // Pretty print the name of the type. + GetName() string + // Describes how to access this field. + GetPath() *Path + // Describes the field. + GetDescription() string + // Returns type extensions. + GetExtensions() map[string]interface{} +} + +// Path helps us keep track of type paths +type Path struct { + parent *Path + key string +} + +func NewPath(key string) Path { + return Path{key: key} +} + +func (p *Path) Get() []string { + if p == nil { + return []string{} + } + if p.key == "" { + return p.parent.Get() + } + return append(p.parent.Get(), p.key) +} + +func (p *Path) Len() int { + return len(p.Get()) +} + +func (p *Path) String() string { + return strings.Join(p.Get(), "") +} + +// ArrayPath appends an array index and creates a new path +func (p *Path) ArrayPath(i int) Path { + return Path{ + parent: p, + key: fmt.Sprintf("[%d]", i), + } +} + +// FieldPath appends a field name and creates a new path +func (p *Path) FieldPath(field string) Path { + return Path{ + parent: p, + key: fmt.Sprintf(".%s", field), + } +} + +// BaseSchema holds data used by each types of schema. +type BaseSchema struct { + Description string + Extensions map[string]interface{} + + Path Path +} + +func (b *BaseSchema) GetDescription() string { + return b.Description +} + +func (b *BaseSchema) GetExtensions() map[string]interface{} { + return b.Extensions +} + +func (b *BaseSchema) GetPath() *Path { + return &b.Path +} + +// Array must have all its element of the same `SubType`. +type Array struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Array{} + +func (a *Array) Accept(v SchemaVisitor) { + v.VisitArray(a) +} + +func (a *Array) GetName() string { + return fmt.Sprintf("Array of %s", a.SubType.GetName()) +} + +// Kind is a complex object. It can have multiple different +// subtypes for each field, as defined in the `Fields` field. Mandatory +// fields are listed in `RequiredFields`. The key of the object is +// always of type `string`. +type Kind struct { + BaseSchema + + // Lists names of required fields. + RequiredFields []string + // Maps field names to types. + Fields map[string]Schema + // FieldOrder reports the canonical order for the fields. + FieldOrder []string +} + +var _ Schema = &Kind{} + +func (k *Kind) Accept(v SchemaVisitor) { + v.VisitKind(k) +} + +func (k *Kind) GetName() string { + properties := []string{} + for key := range k.Fields { + properties = append(properties, key) + } + return fmt.Sprintf("Kind(%v)", properties) +} + +// IsRequired returns true if `field` is a required field for this type. +func (k *Kind) IsRequired(field string) bool { + for _, f := range k.RequiredFields { + if f == field { + return true + } + } + return false +} + +// Keys returns a alphabetically sorted list of keys. +func (k *Kind) Keys() []string { + keys := make([]string, 0) + for key := range k.Fields { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +// Map is an object who values must all be of the same `SubType`. +// The key of the object is always of type `string`. +type Map struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Map{} + +func (m *Map) Accept(v SchemaVisitor) { + v.VisitMap(m) +} + +func (m *Map) GetName() string { + return fmt.Sprintf("Map of %s", m.SubType.GetName()) +} + +// Primitive is a literal. There can be multiple types of primitives, +// and this subtype can be visited through the `subType` field. +type Primitive struct { + BaseSchema + + // Type of a primitive must be one of: integer, number, string, boolean. + Type string + Format string +} + +var _ Schema = &Primitive{} + +func (p *Primitive) Accept(v SchemaVisitor) { + v.VisitPrimitive(p) +} + +func (p *Primitive) GetName() string { + if p.Format == "" { + return p.Type + } + return fmt.Sprintf("%s (%s)", p.Type, p.Format) +} + +// Arbitrary is a value of any type (primitive, object or array) +type Arbitrary struct { + BaseSchema +} + +var _ Schema = &Arbitrary{} + +func (a *Arbitrary) Accept(v SchemaVisitor) { + if visitor, ok := v.(SchemaVisitorArbitrary); ok { + visitor.VisitArbitrary(a) + } +} + +func (a *Arbitrary) GetName() string { + return "Arbitrary value (primitive, object or array)" +} + +// Reference implementation depends on the type of document. +type Reference interface { + Schema + + Reference() string + SubSchema() Schema +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 882eb4bfd..5005ff1ed 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -890,6 +890,12 @@ "revision": "23def4e6c14b4da8ac2ed8007337bc5eb5007998", "revisionTime": "2016-01-25T20:49:56Z" }, + { + "checksumSHA1": "LHNzQwau1zPeFPPG5zbNf8AgUOQ=", + "path": "github.com/golang/groupcache/lru", + "revision": "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b", + "revisionTime": "2019-01-29T15:46:38Z" + }, { "checksumSHA1": "IHIm2BA6DrJDHvALz65pY2G38sM=", "path": "github.com/golang/mock/gomock", @@ -1769,32 +1775,32 @@ { "checksumSHA1": "CbjGrF/nr3TExev5K0mT4ISqtPs=", "path": "github.com/portworx/sched-ops/k8s", - "revision": "736d495dae86ea670876759029b2e605195d381f", - "revisionTime": "2020-03-03T19:25:26Z" + "revision": "6f93ebad686b9831ac4e9e8e70dd1a46a3fe3e5a", + "revisionTime": "2020-04-13T19:14:45Z" }, { "checksumSHA1": "4PlYObbaQFXyMQLOTbiWwGKAX7s=", "path": "github.com/portworx/sched-ops/k8s/common", - "revision": "736d495dae86ea670876759029b2e605195d381f", - "revisionTime": "2020-03-03T19:25:26Z" + "revision": "6f93ebad686b9831ac4e9e8e70dd1a46a3fe3e5a", + "revisionTime": "2020-04-13T19:14:45Z" }, { - "checksumSHA1": "oH4KF+IcYCtHTkyyOPBz75mp9LY=", + "checksumSHA1": "0NZIbE/8hay7k0l3tF0Vllj1haE=", "path": "github.com/portworx/sched-ops/k8s/core", - "revision": "736d495dae86ea670876759029b2e605195d381f", - "revisionTime": "2020-03-03T19:25:26Z" + "revision": "6f93ebad686b9831ac4e9e8e70dd1a46a3fe3e5a", + "revisionTime": "2020-04-13T19:14:45Z" }, { "checksumSHA1": "lLPF1/eoOkO94IOsMfQHWmBlgk4=", "path": "github.com/portworx/sched-ops/k8s/errors", - "revision": "736d495dae86ea670876759029b2e605195d381f", - "revisionTime": "2020-03-03T19:25:26Z" + "revision": "6f93ebad686b9831ac4e9e8e70dd1a46a3fe3e5a", + "revisionTime": "2020-04-13T19:14:45Z" }, { "checksumSHA1": "Uinzjq8KoiM0M8/wcU2wEytL3hI=", "path": "github.com/portworx/sched-ops/task", - "revision": "736d495dae86ea670876759029b2e605195d381f", - "revisionTime": "2020-03-03T19:25:26Z" + "revision": "6f93ebad686b9831ac4e9e8e70dd1a46a3fe3e5a", + "revisionTime": "2020-04-13T19:14:45Z" }, { "checksumSHA1": "ddMxWhUIhxqvxOt8P8SMG3EdnIw=", @@ -2705,218 +2711,218 @@ { "checksumSHA1": "dfisn5RPLe8B2K9I/ZoLO3FobPE=", "path": "k8s.io/api/admissionregistration/v1beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "pSRGAUgXccOjjhSJ6nM3QN3XB+w=", "path": "k8s.io/api/apps/v1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "LlJm9My5sH4xPYIlDnc1cupoN5E=", "path": "k8s.io/api/apps/v1beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "F21Xzle1YhTfocLa5lNaOcFCFhI=", "path": "k8s.io/api/apps/v1beta2", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "k4iYPaDs/UI21wIuNQIXlGMgzuk=", "path": "k8s.io/api/auditregistration/v1alpha1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "ouyhDg6MKMv5AcoDp36/Xe2283E=", "path": "k8s.io/api/authentication/v1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "tyTaW2BrsNwFVlo/3+WElFUlvNA=", "path": "k8s.io/api/authentication/v1beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "4ih/tGMVabAWWAArIqh6CEYcCH0=", "path": "k8s.io/api/authorization/v1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "n2Ixm3PmJt8HcvrPadaqwNU12Sg=", "path": "k8s.io/api/authorization/v1beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "HIxtRjzcI4kq6TOSFq2hHM7fxHo=", "path": "k8s.io/api/autoscaling/v1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "ZKOS7PAooo6H4tTktcZu5RxXsEw=", "path": "k8s.io/api/autoscaling/v2beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "xUQ+Gc8E23VYYB92Lb9wZCzQe4E=", "path": "k8s.io/api/autoscaling/v2beta2", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "ued1O8G/lwPW7IpdKTgnyRwSNnM=", "path": "k8s.io/api/batch/v1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "EqMcEkbJEZJQMZuTMts5SqY0ITo=", "path": "k8s.io/api/batch/v1beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "P8S34HM8viOwqStIzQS8DFe5l7g=", "path": "k8s.io/api/batch/v2alpha1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "o0zqnZlVFk0bBjo7jygVgcWsaIs=", "path": "k8s.io/api/certificates/v1beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "k2HxKn/fcN559dg6ENRLtfqjxrw=", "path": "k8s.io/api/coordination/v1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "feRBQ1ZsR9TV93Mum3hBiORJbn8=", "path": "k8s.io/api/coordination/v1beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "cTIs1IQXjEyxxDLHHZClGm50kVs=", "path": "k8s.io/api/core/v1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "xmLXO/VOK6ZaCbK0PffsEJhpYxc=", "path": "k8s.io/api/events/v1beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "8NkCmlHjCuAXR1PLTaVBxz6zjb4=", "path": "k8s.io/api/extensions/v1beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "V6CwZKXlkTg5W9IdCJ0Tp5qDAKA=", "path": "k8s.io/api/networking/v1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "f5GdezSSAEjgf21BgXY/LZWlya4=", "path": "k8s.io/api/networking/v1beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "QuuGWn/hTfv4hD+QKf8PCZFliJM=", "path": "k8s.io/api/node/v1alpha1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "4SQ6qxTXYbOyevgxQEGeaCnuVII=", "path": "k8s.io/api/node/v1beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "KIfHrxABa0bR6+2CIV4YwCitChc=", "path": "k8s.io/api/policy/v1beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "AAw5r10kUxF7kSfyBePqrVvpnMc=", "path": "k8s.io/api/rbac/v1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "tR8dRwM6lLNMH4CFCgTtxYn2PZs=", "path": "k8s.io/api/rbac/v1alpha1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "YHuIZ96JXb0tsi/xb6uu4/QPXes=", "path": "k8s.io/api/rbac/v1beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "6OjtUgce7EenldPD5jBH1s3DsnU=", "path": "k8s.io/api/scheduling/v1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "IoVuRIUfnR4plXIBv2xj6eVGHQU=", "path": "k8s.io/api/scheduling/v1alpha1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "ElxpP1Kfibom7R9Qsd+CN3/Juko=", "path": "k8s.io/api/scheduling/v1beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "lwutZCRDAyZw9bIM8wrTO3bepks=", "path": "k8s.io/api/settings/v1alpha1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "sIJm1/N9KyuuDZ+6Y9A5AVEI8T4=", "path": "k8s.io/api/storage/v1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "bboISMm3jS7WRemApXYt7g0jG+Q=", "path": "k8s.io/api/storage/v1alpha1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "QzvWnbr+MA5OpFpEnTUyDKAK96o=", "path": "k8s.io/api/storage/v1beta1", - "revision": "e3a6b8045b0b303430f6d0c261fd9e35be50800e", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "159aefb8556bb8ed4be7631461d1558546d304db", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "842zFSBF27hjQGTxXEP61lj1OOA=", @@ -2951,614 +2957,644 @@ { "checksumSHA1": "zihscif0VKHE39gyw3eh0lj3Xqk=", "path": "k8s.io/apimachinery/pkg/api/errors", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "kJRqR5k9Ptb/USP1CVuXbDx4lN8=", "path": "k8s.io/apimachinery/pkg/api/meta", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "qUU97FVbzCunx23D45SQPkZB8ss=", "path": "k8s.io/apimachinery/pkg/api/resource", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "LTv0lhS2bsp6a243zPT88+n5d18=", "path": "k8s.io/apimachinery/pkg/apis/meta/v1", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "NylhW4M+UksZhAtpPDmgNyYnAbQ=", "path": "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "WY+KsnkJL1hF3gOLyLYijFU5MJw=", "path": "k8s.io/apimachinery/pkg/apis/meta/v1beta1", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "I6+J//YuSZdxncAuc/kar8G1jkY=", "path": "k8s.io/apimachinery/pkg/conversion", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "lZgVHIUDKFlQKJBYQVWqED5sDnE=", "path": "k8s.io/apimachinery/pkg/conversion/queryparams", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "zgYf1VhDcfCpouwRgF5Zodz+g6M=", "path": "k8s.io/apimachinery/pkg/fields", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "kw0p45bmrpOXcurZuhj7n7RJPwI=", "path": "k8s.io/apimachinery/pkg/labels", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "RUSyEBiYHRTCFM2HYG8W+DGUPR0=", "path": "k8s.io/apimachinery/pkg/runtime", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "ZbSF8zWw5/K0F9TrftkOt3uD3tY=", "path": "k8s.io/apimachinery/pkg/runtime/schema", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "zrzqafjVBMG7MbUDRkDjvb/tKjM=", "path": "k8s.io/apimachinery/pkg/runtime/serializer", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { - "checksumSHA1": "bxY1I5srHiPJx3VhswgGi3CwyXo=", + "checksumSHA1": "MXsz3LCS6x2eaXNHEphpzGjsUbI=", "path": "k8s.io/apimachinery/pkg/runtime/serializer/json", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "W74c9THnzEY5+PdqB0Fx6VQJAq0=", "path": "k8s.io/apimachinery/pkg/runtime/serializer/protobuf", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "CxRm8Ny1sWZVlEw1WBfbOMtJP40=", "path": "k8s.io/apimachinery/pkg/runtime/serializer/recognizer", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "7R78WUhv6d/VIW8EykOe6sIhDgU=", "path": "k8s.io/apimachinery/pkg/runtime/serializer/streaming", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "pg/e2n60NZuGAfE/w0GvdZX9QZg=", "path": "k8s.io/apimachinery/pkg/runtime/serializer/versioning", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "p9Wv7xurZXAW0jYL/SLNPbiUjaA=", "path": "k8s.io/apimachinery/pkg/selection", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "Buo3TchdY2l9bkweZyRUGJ5w3jY=", "path": "k8s.io/apimachinery/pkg/types", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "Gtwo9p42g21v46mUVvj/Qh8yUSE=", "path": "k8s.io/apimachinery/pkg/util/clock", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "swbgG4Znnjtkk2HNC8fvkLmV4+8=", "path": "k8s.io/apimachinery/pkg/util/errors", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "bQOeFTINeXcfMOAOPArjRma/1mk=", "path": "k8s.io/apimachinery/pkg/util/framer", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "WIQsM2luYFd1BVg3Yg3JVw82y5o=", "path": "k8s.io/apimachinery/pkg/util/httpstream", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "KjMBISUBvxOjMGtPPR9kshQEfLI=", "path": "k8s.io/apimachinery/pkg/util/httpstream/spdy", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "QptuSXomhT8gURHhjIqMtQXiDxE=", "path": "k8s.io/apimachinery/pkg/util/intstr", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { - "checksumSHA1": "NaIzEbjlhEMikewzRXivi2r0xyM=", + "checksumSHA1": "Z6Ug9TOFgZ/lIuCCPEeJD5wo+L8=", "path": "k8s.io/apimachinery/pkg/util/json", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" + }, + { + "checksumSHA1": "7AatSkMyuEulOhCxKEeOWp3mQCw=", + "path": "k8s.io/apimachinery/pkg/util/mergepatch", + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "y1dxj1ipJgt2PRXXSXDmXrSka6A=", "path": "k8s.io/apimachinery/pkg/util/naming", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "hWC3sKhF9O0/Sfzf8sZwoy4UAxY=", "path": "k8s.io/apimachinery/pkg/util/net", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "PvefNiZTc/uwwPAOkBKZ3ST25Tc=", "path": "k8s.io/apimachinery/pkg/util/remotecommand", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "THEgyKf4FQGNKuHYAMTda9mChJ8=", "path": "k8s.io/apimachinery/pkg/util/runtime", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "ozEwMzA48zwMyQ50SwNKSM852U4=", "path": "k8s.io/apimachinery/pkg/util/sets", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" + }, + { + "checksumSHA1": "X/JxuH5NDWLGOPw+886Nn8XcVBA=", + "path": "k8s.io/apimachinery/pkg/util/strategicpatch", + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "zo5JN/KNwbIovPfiC2LyGpWD+nc=", "path": "k8s.io/apimachinery/pkg/util/validation", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "T+DDCd+Y07tEOHiPNt2zzXFq6Tw=", "path": "k8s.io/apimachinery/pkg/util/validation/field", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "oTaKiqOfjZRhfx8ddmddZj/OgaA=", "path": "k8s.io/apimachinery/pkg/util/wait", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "EhVstowFTSrsoSiMuG/PqPwrYsw=", "path": "k8s.io/apimachinery/pkg/util/yaml", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "KLENC52WPY7h2cBq3P1NMM0FNSs=", "path": "k8s.io/apimachinery/pkg/version", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "NjlHslsa2RXAn/UWondsFMwW9ic=", "path": "k8s.io/apimachinery/pkg/watch", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" + }, + { + "checksumSHA1": "T71NmWmOCXfkHn42es1Zf/sxZJw=", + "path": "k8s.io/apimachinery/third_party/forked/golang/json", + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "OGbsdDRP+y4F++eGe/dh3FIWThg=", "path": "k8s.io/apimachinery/third_party/forked/golang/netutil", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "9sFA+EjKrjpmK4OofQH0p0Rowfg=", "path": "k8s.io/apimachinery/third_party/forked/golang/reflect", - "revision": "a9f1d8a9c10182d101acf19b5145c3d4e9299adb", - "revisionTime": "2019-08-16T21:34:12Z" + "revision": "c5d2f014d689246b84637774317fa0288dd3129b", + "revisionTime": "2019-10-04T07:49:56Z" }, { "checksumSHA1": "Ovxe5OrkHu7tTYVE/XbTD5+DvF4=", "path": "k8s.io/client-go/discovery", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "mgVdewcg4jTq0piouybeQoXd/yQ=", "path": "k8s.io/client-go/kubernetes", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "Jmg4wTN/9ztjnzQgADNhFALddv8=", "path": "k8s.io/client-go/kubernetes/scheme", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "pmmsHaBIeDlD1LSw/KM2wVdYh2I=", "path": "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "t7UdbYz8ir1FewYR53Izec0Sivw=", "path": "k8s.io/client-go/kubernetes/typed/apps/v1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "JU//A2Vhtt2lmEJvjPSzotPbax4=", "path": "k8s.io/client-go/kubernetes/typed/apps/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "09lwkraBQBm444xRhFCgefu8kck=", "path": "k8s.io/client-go/kubernetes/typed/apps/v1beta2", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "sSK15WvG4aK3PcUrDU1F5O94z4Q=", "path": "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "z8RBQhqz0ef52GAYcGnHbdkRLQw=", "path": "k8s.io/client-go/kubernetes/typed/authentication/v1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "xNCsaw6UsrEtSjg+2rMYZueCBIM=", "path": "k8s.io/client-go/kubernetes/typed/authentication/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "m8sWDYsVaRB+meitgU1dx+1P0ow=", "path": "k8s.io/client-go/kubernetes/typed/authorization/v1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "XucNO6BHSXXNkSQAM1ZdiwDmksc=", "path": "k8s.io/client-go/kubernetes/typed/authorization/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "g6sY+YnoyJ+YKuJ23A7kBES6T+I=", "path": "k8s.io/client-go/kubernetes/typed/autoscaling/v1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "KH7kRVstgAg+9L6HoO/pggR9+eI=", "path": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "1gmsRcUU069YjYPVh42BxUugxlU=", "path": "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "u+SQV91fDXsbT35x1qDurWjWhOM=", "path": "k8s.io/client-go/kubernetes/typed/batch/v1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "ydEd57JAnwWWXbOL6Y9XgOds7jE=", "path": "k8s.io/client-go/kubernetes/typed/batch/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "XbaL6H8hcf613bHRpLl8O8Ax1sQ=", "path": "k8s.io/client-go/kubernetes/typed/batch/v2alpha1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "4nR+ErKIrCr77OvOFJ3iqArk7Cc=", "path": "k8s.io/client-go/kubernetes/typed/certificates/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "qo7VmVzJfzywNgJ1od1Eijws47Y=", "path": "k8s.io/client-go/kubernetes/typed/coordination/v1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "UnCB9l/X/xvB22kpT+Fyg0P80Rc=", "path": "k8s.io/client-go/kubernetes/typed/coordination/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "8o+F1NeIfq4NY+AyeDcM7NyIKqw=", "path": "k8s.io/client-go/kubernetes/typed/core/v1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "ykLmGOjvLCr/URumft1aG8pkujo=", "path": "k8s.io/client-go/kubernetes/typed/events/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "HvngFdOekeng/h5wmYnmKQDLTEw=", "path": "k8s.io/client-go/kubernetes/typed/extensions/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "6gtdIT3Qp6z/kwgF0T3l+a1Ns4g=", "path": "k8s.io/client-go/kubernetes/typed/networking/v1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "YDQIT9x9Z7wtSFOfxXePZijd8yM=", "path": "k8s.io/client-go/kubernetes/typed/networking/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "y6Cs3car/1URUMRg0iUtoV9r4fk=", "path": "k8s.io/client-go/kubernetes/typed/node/v1alpha1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "lES5qxgoORaVuyKLDIv41r+bgmY=", "path": "k8s.io/client-go/kubernetes/typed/node/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "dVba4gvzMK97tqCVwwF69bue8tg=", "path": "k8s.io/client-go/kubernetes/typed/policy/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "8mzr6jmw61rtVvJ0aerLF91qQd0=", "path": "k8s.io/client-go/kubernetes/typed/rbac/v1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "9Vdi03QcPkw8fOxMy8MwO1uHbl0=", "path": "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "+wa2e0UKWqLWJNajZ1/hHxvCe94=", "path": "k8s.io/client-go/kubernetes/typed/rbac/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "8gZ5hLCC7Os9FbNh+blOqjFoMQg=", "path": "k8s.io/client-go/kubernetes/typed/scheduling/v1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "44fHtB2fH5krsp71kJy6u10dj5s=", "path": "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "6y5FUtAVxgACmJIR98cB0tVzLbI=", "path": "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "tIaveHngAO+OYXfaBtOLolwJiRs=", "path": "k8s.io/client-go/kubernetes/typed/settings/v1alpha1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "svzkNqhp1FHxdNd/iYojR/ZNdEU=", "path": "k8s.io/client-go/kubernetes/typed/storage/v1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "+DQkr+rhP6CAV5/PBa9mOkzBBbU=", "path": "k8s.io/client-go/kubernetes/typed/storage/v1alpha1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "5+CSGQ14e+jdEZ2eLl1bKSZ6SnY=", "path": "k8s.io/client-go/kubernetes/typed/storage/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "xzgS2YeBsvo8tnz4nbRiv3gcedw=", "path": "k8s.io/client-go/pkg/apis/clientauthentication", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "hQgaMoLTsSOv2xQ3CA8fA5MoEr4=", "path": "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "6DMbz5JnwquAwMbT/w5aYt4D3Hc=", "path": "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "ktNfV92Lly2Tp/6VBE/3iFH0ns8=", "path": "k8s.io/client-go/pkg/version", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "Ml1uDQHTKU1lEdm29DlVIqT2u4A=", "path": "k8s.io/client-go/plugin/pkg/client/auth/exec", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "PUL/6GeshL/Z3x7tOpO7VA77dPU=", "path": "k8s.io/client-go/rest", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "+wYCwQaVc1GvMWwOWc3wXd2GT5s=", "path": "k8s.io/client-go/rest/watch", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "SQugt5VcVLRueY5ZXj0WNQ9CG+I=", "path": "k8s.io/client-go/tools/auth", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "eXHdUD+QGu5xdtOP27XQAumIA9I=", "path": "k8s.io/client-go/tools/clientcmd", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "VE3e55mIzsNE7Buny/wNmDSciyQ=", "path": "k8s.io/client-go/tools/clientcmd/api", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "REsiLFxKYUjcTcPnDBwgr4GkArg=", "path": "k8s.io/client-go/tools/clientcmd/api/latest", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "gOq6PL80JftrypouhV+xxbf/WUo=", "path": "k8s.io/client-go/tools/clientcmd/api/v1", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "DrrW127wAVyI/ixPg1+g4qwCt80=", "path": "k8s.io/client-go/tools/metrics", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" + }, + { + "checksumSHA1": "wjIhPUFz0mMzt+wDZH+nHYE0D2c=", + "path": "k8s.io/client-go/tools/record", + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" + }, + { + "checksumSHA1": "W+yDEnCoCF5vhUc2bOWH+TkoW/U=", + "path": "k8s.io/client-go/tools/record/util", + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "5/CmC6dOtCw/g/1C0XjvvN7z34o=", "path": "k8s.io/client-go/tools/reference", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "GnWOc6osd2nxPjjGkGYxuN+2qXY=", "path": "k8s.io/client-go/tools/remotecommand", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "pxIo3sZzPC8AyIlBJDr3/KYL0CI=", "path": "k8s.io/client-go/transport", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "XdRD1nHERagwdSH0y1j2r9elARg=", "path": "k8s.io/client-go/transport/spdy", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "zS5phoJsfi1XeKiyQaV0vIwkuZM=", "path": "k8s.io/client-go/util/cert", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "KSB22yQOaax0P/RkHCDp80u+Jcs=", "path": "k8s.io/client-go/util/connrotation", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "vOdL2QO08zL5IS+UBOVJMGwgbFI=", "path": "k8s.io/client-go/util/exec", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "+fuKd8FWWfKiJm8JL6c4XjVJU9g=", "path": "k8s.io/client-go/util/flowcontrol", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "WRb0rXGx56fwcCisVW7GoI6gO/A=", "path": "k8s.io/client-go/util/homedir", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "PCmCOo7V1KqZAd4iFALqcNjBt5A=", "path": "k8s.io/client-go/util/keyutil", - "revision": "01520b8320fc57d1081df88f32ae8a45547b2190", - "revisionTime": "2019-10-04T07:49:56Z" + "revision": "8e412805300829bc5a39e63eaccfc8a3355e4797", + "revisionTime": "2019-10-29T00:54:44Z" }, { "checksumSHA1": "oRCnXOz9PDToxW4OCQtT2pZMr1M=", @@ -3566,6 +3602,12 @@ "revision": "3aab9e5c735f254e186a576b4c32702f6cf4c705", "revisionTime": "2018-12-27T13:44:20Z" }, + { + "checksumSHA1": "2vICmfAeZAAtm+DnoAuIbRkliLU=", + "path": "k8s.io/kube-openapi/pkg/util/proto", + "revision": "0270cf2f1c1d995d34b36019a6f65d58e6e33ad4", + "revisionTime": "2019-09-18T14:33:30Z" + }, { "checksumSHA1": "bvvbPZitrlqA/Elm/XwdZdHDQSA=", "path": "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib", diff --git a/volume/drivers/fake/fake.go b/volume/drivers/fake/fake.go index 6ad0304f8..c3e7737b2 100644 --- a/volume/drivers/fake/fake.go +++ b/volume/drivers/fake/fake.go @@ -140,14 +140,23 @@ func (d *driver) Status() [][2]string { } func (d *driver) Inspect(volumeIDs []string) ([]*api.Volume, error) { + empty := make([]*api.Volume, 0, len(volumeIDs)) + + // The Kubernetes intree driver for portworx incorrectly requests version + // from Inspect, which can be interpreted as a volumd id. Instead we catch + // it here and return success with an empty list. + if len(volumeIDs) > 0 && volumeIDs[0] == "versions" { + return empty, nil + } + volumes, err := d.StoreEnumerator.Inspect(volumeIDs) if err != nil { - return nil, err + return empty, err } else if err == nil && len(volumes) == 0 { - return nil, kvdb.ErrNotFound + return empty, kvdb.ErrNotFound } - return volumes, err + return volumes, nil } // diff --git a/volume/drivers/fake/fake_test.go b/volume/drivers/fake/fake_test.go index d91453935..d45dd0f53 100644 --- a/volume/drivers/fake/fake_test.go +++ b/volume/drivers/fake/fake_test.go @@ -118,7 +118,7 @@ func TestFakeInspect(t *testing.T) { assert.NotNil(t, err) assert.Error(t, err) assert.Equal(t, err, kvdb.ErrNotFound) - assert.Nil(t, v) + assert.NotNil(t, v) } func TestFakeCapacityUsage(t *testing.T) {